1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/spinlock.h> 9 #include <linux/page-flags.h> 10 #include <asm/bug.h> 11 #include "misc.h" 12 #include "ctree.h" 13 #include "extent_io.h" 14 #include "locking.h" 15 #include "accessors.h" 16 17 /* 18 * Lockdep class keys for extent_buffer->lock's in this root. For a given 19 * eb, the lockdep key is determined by the btrfs_root it belongs to and 20 * the level the eb occupies in the tree. 21 * 22 * Different roots are used for different purposes and may nest inside each 23 * other and they require separate keysets. As lockdep keys should be 24 * static, assign keysets according to the purpose of the root as indicated 25 * by btrfs_root->root_key.objectid. This ensures that all special purpose 26 * roots have separate keysets. 27 * 28 * Lock-nesting across peer nodes is always done with the immediate parent 29 * node locked thus preventing deadlock. As lockdep doesn't know this, use 30 * subclass to avoid triggering lockdep warning in such cases. 31 * 32 * The key is set by the readpage_end_io_hook after the buffer has passed 33 * csum validation but before the pages are unlocked. It is also set by 34 * btrfs_init_new_buffer on freshly allocated blocks. 35 * 36 * We also add a check to make sure the highest level of the tree is the 37 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code 38 * needs update as well. 39 */ 40 #ifdef CONFIG_DEBUG_LOCK_ALLOC 41 #if BTRFS_MAX_LEVEL != 8 42 #error 43 #endif 44 45 #define DEFINE_LEVEL(stem, level) \ 46 .names[level] = "btrfs-" stem "-0" #level, 47 48 #define DEFINE_NAME(stem) \ 49 DEFINE_LEVEL(stem, 0) \ 50 DEFINE_LEVEL(stem, 1) \ 51 DEFINE_LEVEL(stem, 2) \ 52 DEFINE_LEVEL(stem, 3) \ 53 DEFINE_LEVEL(stem, 4) \ 54 DEFINE_LEVEL(stem, 5) \ 55 DEFINE_LEVEL(stem, 6) \ 56 DEFINE_LEVEL(stem, 7) 57 58 static struct btrfs_lockdep_keyset { 59 u64 id; /* root objectid */ 60 /* Longest entry: btrfs-free-space-00 */ 61 char names[BTRFS_MAX_LEVEL][20]; 62 struct lock_class_key keys[BTRFS_MAX_LEVEL]; 63 } btrfs_lockdep_keysets[] = { 64 { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") }, 65 { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") }, 66 { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") }, 67 { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") }, 68 { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") }, 69 { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") }, 70 { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") }, 71 { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") }, 72 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") }, 73 { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") }, 74 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") }, 75 { .id = 0, DEFINE_NAME("tree") }, 76 }; 77 78 #undef DEFINE_LEVEL 79 #undef DEFINE_NAME 80 81 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level) 82 { 83 struct btrfs_lockdep_keyset *ks; 84 85 BUG_ON(level >= ARRAY_SIZE(ks->keys)); 86 87 /* Find the matching keyset, id 0 is the default entry */ 88 for (ks = btrfs_lockdep_keysets; ks->id; ks++) 89 if (ks->id == objectid) 90 break; 91 92 lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]); 93 } 94 95 void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb) 96 { 97 if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) 98 btrfs_set_buffer_lockdep_class(root->root_key.objectid, 99 eb, btrfs_header_level(eb)); 100 } 101 102 #endif 103 104 /* 105 * Extent buffer locking 106 * ===================== 107 * 108 * We use a rw_semaphore for tree locking, and the semantics are exactly the 109 * same: 110 * 111 * - reader/writer exclusion 112 * - writer/writer exclusion 113 * - reader/reader sharing 114 * - try-lock semantics for readers and writers 115 * 116 * The rwsem implementation does opportunistic spinning which reduces number of 117 * times the locking task needs to sleep. 118 */ 119 120 /* 121 * __btrfs_tree_read_lock - lock extent buffer for read 122 * @eb: the eb to be locked 123 * @nest: the nesting level to be used for lockdep 124 * 125 * This takes the read lock on the extent buffer, using the specified nesting 126 * level for lockdep purposes. 127 */ 128 void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest) 129 { 130 u64 start_ns = 0; 131 132 if (trace_btrfs_tree_read_lock_enabled()) 133 start_ns = ktime_get_ns(); 134 135 down_read_nested(&eb->lock, nest); 136 trace_btrfs_tree_read_lock(eb, start_ns); 137 } 138 139 void btrfs_tree_read_lock(struct extent_buffer *eb) 140 { 141 __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL); 142 } 143 144 /* 145 * Try-lock for read. 146 * 147 * Return 1 if the rwlock has been taken, 0 otherwise 148 */ 149 int btrfs_try_tree_read_lock(struct extent_buffer *eb) 150 { 151 if (down_read_trylock(&eb->lock)) { 152 trace_btrfs_try_tree_read_lock(eb); 153 return 1; 154 } 155 return 0; 156 } 157 158 /* 159 * Try-lock for write. 160 * 161 * Return 1 if the rwlock has been taken, 0 otherwise 162 */ 163 int btrfs_try_tree_write_lock(struct extent_buffer *eb) 164 { 165 if (down_write_trylock(&eb->lock)) { 166 eb->lock_owner = current->pid; 167 trace_btrfs_try_tree_write_lock(eb); 168 return 1; 169 } 170 return 0; 171 } 172 173 /* 174 * Release read lock. 175 */ 176 void btrfs_tree_read_unlock(struct extent_buffer *eb) 177 { 178 trace_btrfs_tree_read_unlock(eb); 179 up_read(&eb->lock); 180 } 181 182 /* 183 * __btrfs_tree_lock - lock eb for write 184 * @eb: the eb to lock 185 * @nest: the nesting to use for the lock 186 * 187 * Returns with the eb->lock write locked. 188 */ 189 void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest) 190 __acquires(&eb->lock) 191 { 192 u64 start_ns = 0; 193 194 if (trace_btrfs_tree_lock_enabled()) 195 start_ns = ktime_get_ns(); 196 197 down_write_nested(&eb->lock, nest); 198 eb->lock_owner = current->pid; 199 trace_btrfs_tree_lock(eb, start_ns); 200 } 201 202 void btrfs_tree_lock(struct extent_buffer *eb) 203 { 204 __btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL); 205 } 206 207 /* 208 * Release the write lock. 209 */ 210 void btrfs_tree_unlock(struct extent_buffer *eb) 211 { 212 trace_btrfs_tree_unlock(eb); 213 eb->lock_owner = 0; 214 up_write(&eb->lock); 215 } 216 217 /* 218 * This releases any locks held in the path starting at level and going all the 219 * way up to the root. 220 * 221 * btrfs_search_slot will keep the lock held on higher nodes in a few corner 222 * cases, such as COW of the block at slot zero in the node. This ignores 223 * those rules, and it should only be called when there are no more updates to 224 * be done higher up in the tree. 225 */ 226 void btrfs_unlock_up_safe(struct btrfs_path *path, int level) 227 { 228 int i; 229 230 if (path->keep_locks) 231 return; 232 233 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 234 if (!path->nodes[i]) 235 continue; 236 if (!path->locks[i]) 237 continue; 238 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 239 path->locks[i] = 0; 240 } 241 } 242 243 /* 244 * Loop around taking references on and locking the root node of the tree until 245 * we end up with a lock on the root node. 246 * 247 * Return: root extent buffer with write lock held 248 */ 249 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) 250 { 251 struct extent_buffer *eb; 252 253 while (1) { 254 eb = btrfs_root_node(root); 255 256 btrfs_maybe_reset_lockdep_class(root, eb); 257 btrfs_tree_lock(eb); 258 if (eb == root->node) 259 break; 260 btrfs_tree_unlock(eb); 261 free_extent_buffer(eb); 262 } 263 return eb; 264 } 265 266 /* 267 * Loop around taking references on and locking the root node of the tree until 268 * we end up with a lock on the root node. 269 * 270 * Return: root extent buffer with read lock held 271 */ 272 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) 273 { 274 struct extent_buffer *eb; 275 276 while (1) { 277 eb = btrfs_root_node(root); 278 279 btrfs_maybe_reset_lockdep_class(root, eb); 280 btrfs_tree_read_lock(eb); 281 if (eb == root->node) 282 break; 283 btrfs_tree_read_unlock(eb); 284 free_extent_buffer(eb); 285 } 286 return eb; 287 } 288 289 /* 290 * Loop around taking references on and locking the root node of the tree in 291 * nowait mode until we end up with a lock on the root node or returning to 292 * avoid blocking. 293 * 294 * Return: root extent buffer with read lock held or -EAGAIN. 295 */ 296 struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root) 297 { 298 struct extent_buffer *eb; 299 300 while (1) { 301 eb = btrfs_root_node(root); 302 if (!btrfs_try_tree_read_lock(eb)) { 303 free_extent_buffer(eb); 304 return ERR_PTR(-EAGAIN); 305 } 306 if (eb == root->node) 307 break; 308 btrfs_tree_read_unlock(eb); 309 free_extent_buffer(eb); 310 } 311 return eb; 312 } 313 314 /* 315 * DREW locks 316 * ========== 317 * 318 * DREW stands for double-reader-writer-exclusion lock. It's used in situation 319 * where you want to provide A-B exclusion but not AA or BB. 320 * 321 * Currently implementation gives more priority to reader. If a reader and a 322 * writer both race to acquire their respective sides of the lock the writer 323 * would yield its lock as soon as it detects a concurrent reader. Additionally 324 * if there are pending readers no new writers would be allowed to come in and 325 * acquire the lock. 326 */ 327 328 int btrfs_drew_lock_init(struct btrfs_drew_lock *lock) 329 { 330 int ret; 331 332 ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL); 333 if (ret) 334 return ret; 335 336 atomic_set(&lock->readers, 0); 337 init_waitqueue_head(&lock->pending_readers); 338 init_waitqueue_head(&lock->pending_writers); 339 340 return 0; 341 } 342 343 void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock) 344 { 345 percpu_counter_destroy(&lock->writers); 346 } 347 348 /* Return true if acquisition is successful, false otherwise */ 349 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock) 350 { 351 if (atomic_read(&lock->readers)) 352 return false; 353 354 percpu_counter_inc(&lock->writers); 355 356 /* Ensure writers count is updated before we check for pending readers */ 357 smp_mb(); 358 if (atomic_read(&lock->readers)) { 359 btrfs_drew_write_unlock(lock); 360 return false; 361 } 362 363 return true; 364 } 365 366 void btrfs_drew_write_lock(struct btrfs_drew_lock *lock) 367 { 368 while (true) { 369 if (btrfs_drew_try_write_lock(lock)) 370 return; 371 wait_event(lock->pending_writers, !atomic_read(&lock->readers)); 372 } 373 } 374 375 void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock) 376 { 377 percpu_counter_dec(&lock->writers); 378 cond_wake_up(&lock->pending_readers); 379 } 380 381 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock) 382 { 383 atomic_inc(&lock->readers); 384 385 /* 386 * Ensure the pending reader count is perceieved BEFORE this reader 387 * goes to sleep in case of active writers. This guarantees new writers 388 * won't be allowed and that the current reader will be woken up when 389 * the last active writer finishes its jobs. 390 */ 391 smp_mb__after_atomic(); 392 393 wait_event(lock->pending_readers, 394 percpu_counter_sum(&lock->writers) == 0); 395 } 396 397 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock) 398 { 399 /* 400 * atomic_dec_and_test implies a full barrier, so woken up writers 401 * are guaranteed to see the decrement 402 */ 403 if (atomic_dec_and_test(&lock->readers)) 404 wake_up(&lock->pending_writers); 405 } 406