xref: /linux/fs/btrfs/locking.c (revision fd71def6d9abc5ae362fb9995d46049b7b0ed391)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include <trace/events/btrfs.h>
12 #include "ctree.h"
13 #include "extent_io.h"
14 #include "locking.h"
15 
16 /*
17  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
18  * eb, the lockdep key is determined by the btrfs_root it belongs to and
19  * the level the eb occupies in the tree.
20  *
21  * Different roots are used for different purposes and may nest inside each
22  * other and they require separate keysets.  As lockdep keys should be
23  * static, assign keysets according to the purpose of the root as indicated
24  * by btrfs_root->root_key.objectid.  This ensures that all special purpose
25  * roots have separate keysets.
26  *
27  * Lock-nesting across peer nodes is always done with the immediate parent
28  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
29  * subclass to avoid triggering lockdep warning in such cases.
30  *
31  * The key is set by the readpage_end_io_hook after the buffer has passed
32  * csum validation but before the pages are unlocked.  It is also set by
33  * btrfs_init_new_buffer on freshly allocated blocks.
34  *
35  * We also add a check to make sure the highest level of the tree is the
36  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
37  * needs update as well.
38  */
39 #ifdef CONFIG_DEBUG_LOCK_ALLOC
40 #if BTRFS_MAX_LEVEL != 8
41 #error
42 #endif
43 
44 #define DEFINE_LEVEL(stem, level)					\
45 	.names[level] = "btrfs-" stem "-0" #level,
46 
47 #define DEFINE_NAME(stem)						\
48 	DEFINE_LEVEL(stem, 0)						\
49 	DEFINE_LEVEL(stem, 1)						\
50 	DEFINE_LEVEL(stem, 2)						\
51 	DEFINE_LEVEL(stem, 3)						\
52 	DEFINE_LEVEL(stem, 4)						\
53 	DEFINE_LEVEL(stem, 5)						\
54 	DEFINE_LEVEL(stem, 6)						\
55 	DEFINE_LEVEL(stem, 7)
56 
57 static struct btrfs_lockdep_keyset {
58 	u64			id;		/* root objectid */
59 	/* Longest entry: btrfs-block-group-00 */
60 	char			names[BTRFS_MAX_LEVEL][24];
61 	struct lock_class_key	keys[BTRFS_MAX_LEVEL];
62 } btrfs_lockdep_keysets[] = {
63 	{ .id = BTRFS_ROOT_TREE_OBJECTID,	DEFINE_NAME("root")	},
64 	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	DEFINE_NAME("extent")	},
65 	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	DEFINE_NAME("chunk")	},
66 	{ .id = BTRFS_DEV_TREE_OBJECTID,	DEFINE_NAME("dev")	},
67 	{ .id = BTRFS_CSUM_TREE_OBJECTID,	DEFINE_NAME("csum")	},
68 	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	DEFINE_NAME("quota")	},
69 	{ .id = BTRFS_TREE_LOG_OBJECTID,	DEFINE_NAME("log")	},
70 	{ .id = BTRFS_TREE_RELOC_OBJECTID,	DEFINE_NAME("treloc")	},
71 	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	DEFINE_NAME("dreloc")	},
72 	{ .id = BTRFS_UUID_TREE_OBJECTID,	DEFINE_NAME("uuid")	},
73 	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	DEFINE_NAME("free-space") },
74 	{ .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") },
75 	{ .id = BTRFS_RAID_STRIPE_TREE_OBJECTID, DEFINE_NAME("raid-stripe") },
76 	{ .id = 0,				DEFINE_NAME("tree")	},
77 };
78 
79 #undef DEFINE_LEVEL
80 #undef DEFINE_NAME
81 
btrfs_set_buffer_lockdep_class(u64 objectid,struct extent_buffer * eb,int level)82 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level)
83 {
84 	struct btrfs_lockdep_keyset *ks;
85 
86 	ASSERT(level < ARRAY_SIZE(ks->keys));
87 
88 	/* Find the matching keyset, id 0 is the default entry */
89 	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
90 		if (ks->id == objectid)
91 			break;
92 
93 	lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]);
94 }
95 
btrfs_maybe_reset_lockdep_class(struct btrfs_root * root,struct extent_buffer * eb)96 void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb)
97 {
98 	if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
99 		btrfs_set_buffer_lockdep_class(btrfs_root_id(root),
100 					       eb, btrfs_header_level(eb));
101 }
102 
103 #endif
104 
105 #ifdef CONFIG_BTRFS_DEBUG
btrfs_set_eb_lock_owner(struct extent_buffer * eb,pid_t owner)106 static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner)
107 {
108 	eb->lock_owner = owner;
109 }
110 #else
btrfs_set_eb_lock_owner(struct extent_buffer * eb,pid_t owner)111 static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner) { }
112 #endif
113 
114 /*
115  * Extent buffer locking
116  * =====================
117  *
118  * We use a rw_semaphore for tree locking, and the semantics are exactly the
119  * same:
120  *
121  * - reader/writer exclusion
122  * - writer/writer exclusion
123  * - reader/reader sharing
124  * - try-lock semantics for readers and writers
125  *
126  * The rwsem implementation does opportunistic spinning which reduces number of
127  * times the locking task needs to sleep.
128  */
129 
130 /*
131  * btrfs_tree_read_lock_nested - lock extent buffer for read
132  * @eb:		the eb to be locked
133  * @nest:	the nesting level to be used for lockdep
134  *
135  * This takes the read lock on the extent buffer, using the specified nesting
136  * level for lockdep purposes.
137  */
btrfs_tree_read_lock_nested(struct extent_buffer * eb,enum btrfs_lock_nesting nest)138 void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
139 {
140 	u64 start_ns = 0;
141 
142 	if (trace_btrfs_tree_read_lock_enabled())
143 		start_ns = ktime_get_ns();
144 
145 	down_read_nested(&eb->lock, nest);
146 	trace_btrfs_tree_read_lock(eb, start_ns);
147 }
148 
149 /*
150  * Try-lock for read.
151  *
152  * Return 1 if the rwlock has been taken, 0 otherwise
153  */
btrfs_try_tree_read_lock(struct extent_buffer * eb)154 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
155 {
156 	if (down_read_trylock(&eb->lock)) {
157 		trace_btrfs_try_tree_read_lock(eb);
158 		return 1;
159 	}
160 	return 0;
161 }
162 
163 /*
164  * Release read lock.
165  */
btrfs_tree_read_unlock(struct extent_buffer * eb)166 void btrfs_tree_read_unlock(struct extent_buffer *eb)
167 {
168 	trace_btrfs_tree_read_unlock(eb);
169 	up_read(&eb->lock);
170 }
171 
172 /*
173  * Lock eb for write.
174  *
175  * @eb:		the eb to lock
176  * @nest:	the nesting to use for the lock
177  *
178  * Returns with the eb->lock write locked.
179  */
btrfs_tree_lock_nested(struct extent_buffer * eb,enum btrfs_lock_nesting nest)180 void btrfs_tree_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
181 	__acquires(&eb->lock)
182 {
183 	u64 start_ns = 0;
184 
185 	if (trace_btrfs_tree_lock_enabled())
186 		start_ns = ktime_get_ns();
187 
188 	down_write_nested(&eb->lock, nest);
189 	btrfs_set_eb_lock_owner(eb, current->pid);
190 	trace_btrfs_tree_lock(eb, start_ns);
191 }
192 
193 /*
194  * Release the write lock.
195  */
btrfs_tree_unlock(struct extent_buffer * eb)196 void btrfs_tree_unlock(struct extent_buffer *eb)
197 {
198 	trace_btrfs_tree_unlock(eb);
199 	btrfs_set_eb_lock_owner(eb, 0);
200 	up_write(&eb->lock);
201 }
202 
203 /*
204  * This releases any locks held in the path starting at level and going all the
205  * way up to the root.
206  *
207  * btrfs_search_slot will keep the lock held on higher nodes in a few corner
208  * cases, such as COW of the block at slot zero in the node.  This ignores
209  * those rules, and it should only be called when there are no more updates to
210  * be done higher up in the tree.
211  */
btrfs_unlock_up_safe(struct btrfs_path * path,int level)212 void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
213 {
214 	int i;
215 
216 	if (path->keep_locks)
217 		return;
218 
219 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
220 		if (!path->nodes[i])
221 			continue;
222 		if (!path->locks[i])
223 			continue;
224 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
225 		path->locks[i] = 0;
226 	}
227 }
228 
229 /*
230  * Loop around taking references on and locking the root node of the tree until
231  * we end up with a lock on the root node.
232  *
233  * Return: root extent buffer with write lock held
234  */
btrfs_lock_root_node(struct btrfs_root * root)235 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
236 {
237 	struct extent_buffer *eb;
238 
239 	while (1) {
240 		eb = btrfs_root_node(root);
241 
242 		btrfs_maybe_reset_lockdep_class(root, eb);
243 		btrfs_tree_lock(eb);
244 		if (eb == root->node)
245 			break;
246 		btrfs_tree_unlock(eb);
247 		free_extent_buffer(eb);
248 	}
249 	return eb;
250 }
251 
252 /*
253  * Loop around taking references on and locking the root node of the tree until
254  * we end up with a lock on the root node.
255  *
256  * Return: root extent buffer with read lock held
257  */
btrfs_read_lock_root_node(struct btrfs_root * root)258 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
259 {
260 	struct extent_buffer *eb;
261 
262 	while (1) {
263 		eb = btrfs_root_node(root);
264 
265 		btrfs_maybe_reset_lockdep_class(root, eb);
266 		btrfs_tree_read_lock(eb);
267 		if (eb == root->node)
268 			break;
269 		btrfs_tree_read_unlock(eb);
270 		free_extent_buffer(eb);
271 	}
272 	return eb;
273 }
274 
275 /*
276  * Loop around taking references on and locking the root node of the tree in
277  * nowait mode until we end up with a lock on the root node or returning to
278  * avoid blocking.
279  *
280  * Return: root extent buffer with read lock held or -EAGAIN.
281  */
btrfs_try_read_lock_root_node(struct btrfs_root * root)282 struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root)
283 {
284 	struct extent_buffer *eb;
285 
286 	while (1) {
287 		eb = btrfs_root_node(root);
288 		if (!btrfs_try_tree_read_lock(eb)) {
289 			free_extent_buffer(eb);
290 			return ERR_PTR(-EAGAIN);
291 		}
292 		if (eb == root->node)
293 			break;
294 		btrfs_tree_read_unlock(eb);
295 		free_extent_buffer(eb);
296 	}
297 	return eb;
298 }
299 
300 /*
301  * DREW locks
302  * ==========
303  *
304  * DREW stands for double-reader-writer-exclusion lock. It's used in situation
305  * where you want to provide A-B exclusion but not AA or BB.
306  *
307  * Currently implementation gives more priority to reader. If a reader and a
308  * writer both race to acquire their respective sides of the lock the writer
309  * would yield its lock as soon as it detects a concurrent reader. Additionally
310  * if there are pending readers no new writers would be allowed to come in and
311  * acquire the lock.
312  */
313 
btrfs_drew_lock_init(struct btrfs_drew_lock * lock)314 void btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
315 {
316 	atomic_set(&lock->readers, 0);
317 	atomic_set(&lock->writers, 0);
318 	init_waitqueue_head(&lock->pending_readers);
319 	init_waitqueue_head(&lock->pending_writers);
320 }
321 
322 /* Return true if acquisition is successful, false otherwise */
btrfs_drew_try_write_lock(struct btrfs_drew_lock * lock)323 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
324 {
325 	if (atomic_read(&lock->readers))
326 		return false;
327 
328 	atomic_inc(&lock->writers);
329 
330 	/* Ensure writers count is updated before we check for pending readers */
331 	smp_mb__after_atomic();
332 	if (atomic_read(&lock->readers)) {
333 		btrfs_drew_write_unlock(lock);
334 		return false;
335 	}
336 
337 	return true;
338 }
339 
btrfs_drew_write_lock(struct btrfs_drew_lock * lock)340 void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
341 {
342 	while (true) {
343 		if (btrfs_drew_try_write_lock(lock))
344 			return;
345 		wait_event(lock->pending_writers, !atomic_read(&lock->readers));
346 	}
347 }
348 
btrfs_drew_write_unlock(struct btrfs_drew_lock * lock)349 void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
350 {
351 	/*
352 	 * atomic_dec_and_test() implies a full barrier, so woken up readers are
353 	 * guaranteed to see the decrement.
354 	 */
355 	if (atomic_dec_and_test(&lock->writers))
356 		wake_up(&lock->pending_readers);
357 }
358 
btrfs_drew_read_lock(struct btrfs_drew_lock * lock)359 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
360 {
361 	atomic_inc(&lock->readers);
362 
363 	/*
364 	 * Ensure the pending reader count is perceieved BEFORE this reader
365 	 * goes to sleep in case of active writers. This guarantees new writers
366 	 * won't be allowed and that the current reader will be woken up when
367 	 * the last active writer finishes its jobs.
368 	 */
369 	smp_mb__after_atomic();
370 
371 	wait_event(lock->pending_readers, atomic_read(&lock->writers) == 0);
372 }
373 
btrfs_drew_read_unlock(struct btrfs_drew_lock * lock)374 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
375 {
376 	/*
377 	 * atomic_dec_and_test implies a full barrier, so woken up writers
378 	 * are guaranteed to see the decrement
379 	 */
380 	if (atomic_dec_and_test(&lock->readers))
381 		wake_up(&lock->pending_writers);
382 }
383