xref: /linux/fs/bcachefs/btree_locking.h (revision c27dfca4555bf74dd7dd7161d8ef2790ec1c7283)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
4 
5 /*
6  * Only for internal btree use:
7  *
8  * The btree iterator tracks what locks it wants to take, and what locks it
9  * currently has - here we have wrappers for locking/unlocking btree nodes and
10  * updating the iterator state
11  */
12 
13 #include "btree_iter.h"
14 #include "six.h"
15 
16 void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
17 
18 #ifdef CONFIG_LOCKDEP
19 void bch2_assert_btree_nodes_not_locked(void);
20 #else
21 static inline void bch2_assert_btree_nodes_not_locked(void) {}
22 #endif
23 
24 void bch2_trans_unlock_noassert(struct btree_trans *);
25 
26 static inline bool is_btree_node(struct btree_path *path, unsigned l)
27 {
28 	return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
29 }
30 
31 static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
32 {
33 	return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
34 		? &trans->c->btree_transaction_stats[trans->fn_idx]
35 		: NULL;
36 }
37 
38 /* matches six lock types */
39 enum btree_node_locked_type {
40 	BTREE_NODE_UNLOCKED		= -1,
41 	BTREE_NODE_READ_LOCKED		= SIX_LOCK_read,
42 	BTREE_NODE_INTENT_LOCKED	= SIX_LOCK_intent,
43 	BTREE_NODE_WRITE_LOCKED		= SIX_LOCK_write,
44 };
45 
46 static inline int btree_node_locked_type(struct btree_path *path,
47 					 unsigned level)
48 {
49 	return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
50 }
51 
52 static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
53 {
54 	return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
55 }
56 
57 static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
58 {
59 	return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
60 }
61 
62 static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
63 {
64 	return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
65 }
66 
67 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
68 {
69 	return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
70 }
71 
72 static inline void mark_btree_node_locked_noreset(struct btree_path *path,
73 						  unsigned level,
74 						  enum btree_node_locked_type type)
75 {
76 	/* relying on this to avoid a branch */
77 	BUILD_BUG_ON(SIX_LOCK_read   != 0);
78 	BUILD_BUG_ON(SIX_LOCK_intent != 1);
79 
80 	path->nodes_locked &= ~(3U << (level << 1));
81 	path->nodes_locked |= (type + 1) << (level << 1);
82 }
83 
84 static inline void mark_btree_node_unlocked(struct btree_path *path,
85 					    unsigned level)
86 {
87 	EBUG_ON(btree_node_write_locked(path, level));
88 	mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
89 }
90 
91 static inline void mark_btree_node_locked(struct btree_trans *trans,
92 					  struct btree_path *path,
93 					  unsigned level,
94 					  enum btree_node_locked_type type)
95 {
96 	mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
97 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
98 	path->l[level].lock_taken_time = local_clock();
99 #endif
100 }
101 
102 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
103 {
104 	return level < path->locks_want
105 		? SIX_LOCK_intent
106 		: SIX_LOCK_read;
107 }
108 
109 static inline enum btree_node_locked_type
110 btree_lock_want(struct btree_path *path, int level)
111 {
112 	if (level < path->level)
113 		return BTREE_NODE_UNLOCKED;
114 	if (level < path->locks_want)
115 		return BTREE_NODE_INTENT_LOCKED;
116 	if (level == path->level)
117 		return BTREE_NODE_READ_LOCKED;
118 	return BTREE_NODE_UNLOCKED;
119 }
120 
121 static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
122 					      struct btree_path *path, unsigned level)
123 {
124 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
125 	struct btree_transaction_stats *s = btree_trans_stats(trans);
126 
127 	if (s)
128 		__bch2_time_stats_update(&s->lock_hold_times,
129 					 path->l[level].lock_taken_time,
130 					 local_clock());
131 #endif
132 }
133 
134 /* unlock: */
135 
136 static inline void btree_node_unlock(struct btree_trans *trans,
137 				     struct btree_path *path, unsigned level)
138 {
139 	int lock_type = btree_node_locked_type(path, level);
140 
141 	EBUG_ON(level >= BTREE_MAX_DEPTH);
142 
143 	if (lock_type != BTREE_NODE_UNLOCKED) {
144 		six_unlock_type(&path->l[level].b->c.lock, lock_type);
145 		btree_trans_lock_hold_time_update(trans, path, level);
146 	}
147 	mark_btree_node_unlocked(path, level);
148 }
149 
150 static inline int btree_path_lowest_level_locked(struct btree_path *path)
151 {
152 	return __ffs(path->nodes_locked) >> 1;
153 }
154 
155 static inline int btree_path_highest_level_locked(struct btree_path *path)
156 {
157 	return __fls(path->nodes_locked) >> 1;
158 }
159 
160 static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
161 					    struct btree_path *path)
162 {
163 	btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
164 
165 	while (path->nodes_locked)
166 		btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
167 }
168 
169 /*
170  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
171  * succeed:
172  */
173 static inline void
174 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
175 				     struct btree *b)
176 {
177 	struct btree_path *linked;
178 
179 	EBUG_ON(path->l[b->c.level].b != b);
180 	EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
181 	EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
182 
183 	mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
184 
185 	trans_for_each_path_with_node(trans, b, linked)
186 		linked->l[b->c.level].lock_seq++;
187 
188 	six_unlock_write(&b->c.lock);
189 }
190 
191 void bch2_btree_node_unlock_write(struct btree_trans *,
192 			struct btree_path *, struct btree *);
193 
194 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
195 
196 /* lock: */
197 
198 static inline int __btree_node_lock_nopath(struct btree_trans *trans,
199 					 struct btree_bkey_cached_common *b,
200 					 enum six_lock_type type,
201 					 bool lock_may_not_fail,
202 					 unsigned long ip)
203 {
204 	int ret;
205 
206 	trans->lock_may_not_fail = lock_may_not_fail;
207 	trans->lock_must_abort	= false;
208 	trans->locking		= b;
209 
210 	ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
211 				 bch2_six_check_for_deadlock, trans, ip);
212 	WRITE_ONCE(trans->locking, NULL);
213 	WRITE_ONCE(trans->locking_wait.start_time, 0);
214 	return ret;
215 }
216 
217 static inline int __must_check
218 btree_node_lock_nopath(struct btree_trans *trans,
219 		       struct btree_bkey_cached_common *b,
220 		       enum six_lock_type type,
221 		       unsigned long ip)
222 {
223 	return __btree_node_lock_nopath(trans, b, type, false, ip);
224 }
225 
226 static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
227 					 struct btree_bkey_cached_common *b,
228 					 enum six_lock_type type)
229 {
230 	int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_);
231 
232 	BUG_ON(ret);
233 }
234 
235 /*
236  * Lock a btree node if we already have it locked on one of our linked
237  * iterators:
238  */
239 static inline bool btree_node_lock_increment(struct btree_trans *trans,
240 					     struct btree_bkey_cached_common *b,
241 					     unsigned level,
242 					     enum btree_node_locked_type want)
243 {
244 	struct btree_path *path;
245 
246 	trans_for_each_path(trans, path)
247 		if (&path->l[level].b->c == b &&
248 		    btree_node_locked_type(path, level) >= want) {
249 			six_lock_increment(&b->lock, (enum six_lock_type) want);
250 			return true;
251 		}
252 
253 	return false;
254 }
255 
256 static inline int btree_node_lock(struct btree_trans *trans,
257 			struct btree_path *path,
258 			struct btree_bkey_cached_common *b,
259 			unsigned level,
260 			enum six_lock_type type,
261 			unsigned long ip)
262 {
263 	int ret = 0;
264 
265 	EBUG_ON(level >= BTREE_MAX_DEPTH);
266 	EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
267 
268 	if (likely(six_trylock_type(&b->lock, type)) ||
269 	    btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
270 	    !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
271 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
272 		path->l[b->level].lock_taken_time = local_clock();
273 #endif
274 	}
275 
276 	return ret;
277 }
278 
279 int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *,
280 				 struct btree_bkey_cached_common *b, bool);
281 
282 static inline int __btree_node_lock_write(struct btree_trans *trans,
283 					  struct btree_path *path,
284 					  struct btree_bkey_cached_common *b,
285 					  bool lock_may_not_fail)
286 {
287 	EBUG_ON(&path->l[b->level].b->c != b);
288 	EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
289 	EBUG_ON(!btree_node_intent_locked(path, b->level));
290 
291 	/*
292 	 * six locks are unfair, and read locks block while a thread wants a
293 	 * write lock: thus, we need to tell the cycle detector we have a write
294 	 * lock _before_ taking the lock:
295 	 */
296 	mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
297 
298 	return likely(six_trylock_write(&b->lock))
299 		? 0
300 		: __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail);
301 }
302 
303 static inline int __must_check
304 bch2_btree_node_lock_write(struct btree_trans *trans,
305 			   struct btree_path *path,
306 			   struct btree_bkey_cached_common *b)
307 {
308 	return __btree_node_lock_write(trans, path, b, false);
309 }
310 
311 void bch2_btree_node_lock_write_nofail(struct btree_trans *,
312 				       struct btree_path *,
313 				       struct btree_bkey_cached_common *);
314 
315 /* relock: */
316 
317 bool bch2_btree_path_relock_norestart(struct btree_trans *,
318 				      struct btree_path *, unsigned long);
319 int __bch2_btree_path_relock(struct btree_trans *,
320 			     struct btree_path *, unsigned long);
321 
322 static inline int bch2_btree_path_relock(struct btree_trans *trans,
323 				struct btree_path *path, unsigned long trace_ip)
324 {
325 	return btree_node_locked(path, path->level)
326 		? 0
327 		: __bch2_btree_path_relock(trans, path, trace_ip);
328 }
329 
330 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
331 
332 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
333 					  struct btree_path *path, unsigned level)
334 {
335 	EBUG_ON(btree_node_locked(path, level) &&
336 		!btree_node_write_locked(path, level) &&
337 		btree_node_locked_type(path, level) != __btree_lock_want(path, level));
338 
339 	return likely(btree_node_locked(path, level)) ||
340 		(!IS_ERR_OR_NULL(path->l[level].b) &&
341 		 __bch2_btree_node_relock(trans, path, level, true));
342 }
343 
344 static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
345 						  struct btree_path *path, unsigned level)
346 {
347 	EBUG_ON(btree_node_locked(path, level) &&
348 		!btree_node_write_locked(path, level) &&
349 		btree_node_locked_type(path, level) != __btree_lock_want(path, level));
350 
351 	return likely(btree_node_locked(path, level)) ||
352 		(!IS_ERR_OR_NULL(path->l[level].b) &&
353 		 __bch2_btree_node_relock(trans, path, level, false));
354 }
355 
356 /* upgrade */
357 
358 
359 struct get_locks_fail {
360 	unsigned	l;
361 	struct btree	*b;
362 };
363 
364 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
365 			       struct btree_path *, unsigned,
366 			       struct get_locks_fail *);
367 
368 bool __bch2_btree_path_upgrade(struct btree_trans *,
369 			       struct btree_path *, unsigned,
370 			       struct get_locks_fail *);
371 
372 static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
373 					  struct btree_path *path,
374 					  unsigned new_locks_want)
375 {
376 	struct get_locks_fail f;
377 	unsigned old_locks_want = path->locks_want;
378 
379 	new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
380 
381 	if (path->locks_want < new_locks_want
382 	    ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
383 	    : path->uptodate == BTREE_ITER_UPTODATE)
384 		return 0;
385 
386 	trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
387 			old_locks_want, new_locks_want, &f);
388 	return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
389 }
390 
391 /* misc: */
392 
393 static inline void btree_path_set_should_be_locked(struct btree_path *path)
394 {
395 	EBUG_ON(!btree_node_locked(path, path->level));
396 	EBUG_ON(path->uptodate);
397 
398 	path->should_be_locked = true;
399 }
400 
401 static inline void __btree_path_set_level_up(struct btree_trans *trans,
402 				      struct btree_path *path,
403 				      unsigned l)
404 {
405 	btree_node_unlock(trans, path, l);
406 	path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
407 }
408 
409 static inline void btree_path_set_level_up(struct btree_trans *trans,
410 				    struct btree_path *path)
411 {
412 	__btree_path_set_level_up(trans, path, path->level++);
413 	btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
414 }
415 
416 /* debug */
417 
418 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
419 				struct btree_path *,
420 				struct btree_bkey_cached_common *b,
421 				unsigned);
422 
423 int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *);
424 
425 #ifdef CONFIG_BCACHEFS_DEBUG
426 void bch2_btree_path_verify_locks(struct btree_path *);
427 void bch2_trans_verify_locks(struct btree_trans *);
428 #else
429 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
430 static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
431 #endif
432 
433 #endif /* _BCACHEFS_BTREE_LOCKING_H */
434