1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #ifndef BTRFS_LOCKING_H 7 #define BTRFS_LOCKING_H 8 9 #include <linux/atomic.h> 10 #include <linux/wait.h> 11 #include <linux/lockdep.h> 12 #include <linux/percpu_counter.h> 13 #include "extent_io.h" 14 #include "locking.h" 15 16 struct extent_buffer; 17 struct btrfs_path; 18 struct btrfs_root; 19 20 #define BTRFS_WRITE_LOCK 1 21 #define BTRFS_READ_LOCK 2 22 23 /* 24 * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at 25 * the time of this patch is 8, which is how many we use. Keep this in mind if 26 * you decide you want to add another subclass. 27 */ 28 enum btrfs_lock_nesting { 29 BTRFS_NESTING_NORMAL, 30 31 /* 32 * When we COW a block we are holding the lock on the original block, 33 * and since our lockdep maps are rootid+level, this confuses lockdep 34 * when we lock the newly allocated COW'd block. Handle this by having 35 * a subclass for COW'ed blocks so that lockdep doesn't complain. 36 */ 37 BTRFS_NESTING_COW, 38 39 /* 40 * Oftentimes we need to lock adjacent nodes on the same level while 41 * still holding the lock on the original node we searched to, such as 42 * for searching forward or for split/balance. 43 * 44 * Because of this we need to indicate to lockdep that this is 45 * acceptable by having a different subclass for each of these 46 * operations. 47 */ 48 BTRFS_NESTING_LEFT, 49 BTRFS_NESTING_RIGHT, 50 51 /* 52 * When splitting we will be holding a lock on the left/right node when 53 * we need to cow that node, thus we need a new set of subclasses for 54 * these two operations. 55 */ 56 BTRFS_NESTING_LEFT_COW, 57 BTRFS_NESTING_RIGHT_COW, 58 59 /* 60 * When splitting we may push nodes to the left or right, but still use 61 * the subsequent nodes in our path, keeping our locks on those adjacent 62 * blocks. Thus when we go to allocate a new split block we've already 63 * used up all of our available subclasses, so this subclass exists to 64 * handle this case where we need to allocate a new split block. 65 */ 66 BTRFS_NESTING_SPLIT, 67 68 /* 69 * When promoting a new block to a root we need to have a special 70 * subclass so we don't confuse lockdep, as it will appear that we are 71 * locking a higher level node before a lower level one. Copying also 72 * has this problem as it appears we're locking the same block again 73 * when we make a snapshot of an existing root. 74 */ 75 BTRFS_NESTING_NEW_ROOT, 76 77 /* 78 * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so 79 * add this in here and add a static_assert to keep us from going over 80 * the limit. As of this writing we're limited to 8, and we're 81 * definitely using 8, hence this check to keep us from messing up in 82 * the future. 83 */ 84 BTRFS_NESTING_MAX, 85 }; 86 87 enum btrfs_lockdep_trans_states { 88 BTRFS_LOCKDEP_TRANS_COMMIT_PREP, 89 BTRFS_LOCKDEP_TRANS_UNBLOCKED, 90 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED, 91 BTRFS_LOCKDEP_TRANS_COMPLETED, 92 }; 93 94 /* 95 * Lockdep annotation for wait events. 96 * 97 * @owner: The struct where the lockdep map is defined 98 * @lock: The lockdep map corresponding to a wait event 99 * 100 * This macro is used to annotate a wait event. In this case a thread acquires 101 * the lockdep map as writer (exclusive lock) because it has to block until all 102 * the threads that hold the lock as readers signal the condition for the wait 103 * event and release their locks. 104 */ 105 #define btrfs_might_wait_for_event(owner, lock) \ 106 do { \ 107 rwsem_acquire(&owner->lock##_map, 0, 0, _THIS_IP_); \ 108 rwsem_release(&owner->lock##_map, _THIS_IP_); \ 109 } while (0) 110 111 /* 112 * Protection for the resource/condition of a wait event. 113 * 114 * @owner: The struct where the lockdep map is defined 115 * @lock: The lockdep map corresponding to a wait event 116 * 117 * Many threads can modify the condition for the wait event at the same time 118 * and signal the threads that block on the wait event. The threads that modify 119 * the condition and do the signaling acquire the lock as readers (shared 120 * lock). 121 */ 122 #define btrfs_lockdep_acquire(owner, lock) \ 123 rwsem_acquire_read(&owner->lock##_map, 0, 0, _THIS_IP_) 124 125 /* 126 * Used after signaling the condition for a wait event to release the lockdep 127 * map held by a reader thread. 128 */ 129 #define btrfs_lockdep_release(owner, lock) \ 130 rwsem_release(&owner->lock##_map, _THIS_IP_) 131 132 /* 133 * Macros for the transaction states wait events, similar to the generic wait 134 * event macros. 135 */ 136 #define btrfs_might_wait_for_state(owner, i) \ 137 do { \ 138 rwsem_acquire(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_); \ 139 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_); \ 140 } while (0) 141 142 #define btrfs_trans_state_lockdep_acquire(owner, i) \ 143 rwsem_acquire_read(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_) 144 145 #define btrfs_trans_state_lockdep_release(owner, i) \ 146 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_) 147 148 /* Initialization of the lockdep map */ 149 #define btrfs_lockdep_init_map(owner, lock) \ 150 do { \ 151 static struct lock_class_key lock##_key; \ 152 lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \ 153 } while (0) 154 155 /* Initialization of the transaction states lockdep maps. */ 156 #define btrfs_state_lockdep_init_map(owner, lock, state) \ 157 do { \ 158 static struct lock_class_key lock##_key; \ 159 lockdep_init_map(&owner->btrfs_state_change_map[state], #lock, \ 160 &lock##_key, 0); \ 161 } while (0) 162 163 static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES, 164 "too many lock subclasses defined"); 165 166 void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest); 167 void btrfs_tree_lock(struct extent_buffer *eb); 168 void btrfs_tree_unlock(struct extent_buffer *eb); 169 170 void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest); 171 void btrfs_tree_read_lock(struct extent_buffer *eb); 172 void btrfs_tree_read_unlock(struct extent_buffer *eb); 173 int btrfs_try_tree_read_lock(struct extent_buffer *eb); 174 int btrfs_try_tree_write_lock(struct extent_buffer *eb); 175 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); 176 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root); 177 struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root); 178 179 #ifdef CONFIG_BTRFS_DEBUG 180 static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) 181 { 182 lockdep_assert_held_write(&eb->lock); 183 } 184 #else 185 static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { } 186 #endif 187 188 void btrfs_unlock_up_safe(struct btrfs_path *path, int level); 189 190 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) 191 { 192 if (rw == BTRFS_WRITE_LOCK) 193 btrfs_tree_unlock(eb); 194 else if (rw == BTRFS_READ_LOCK) 195 btrfs_tree_read_unlock(eb); 196 else 197 BUG(); 198 } 199 200 struct btrfs_drew_lock { 201 atomic_t readers; 202 atomic_t writers; 203 wait_queue_head_t pending_writers; 204 wait_queue_head_t pending_readers; 205 }; 206 207 void btrfs_drew_lock_init(struct btrfs_drew_lock *lock); 208 void btrfs_drew_write_lock(struct btrfs_drew_lock *lock); 209 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock); 210 void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock); 211 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock); 212 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock); 213 214 #ifdef CONFIG_DEBUG_LOCK_ALLOC 215 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level); 216 void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb); 217 #else 218 static inline void btrfs_set_buffer_lockdep_class(u64 objectid, 219 struct extent_buffer *eb, int level) 220 { 221 } 222 static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, 223 struct extent_buffer *eb) 224 { 225 } 226 #endif 227 228 #endif 229