xref: /linux/fs/btrfs/locking.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/spinlock.h>
21 #include <linux/page-flags.h>
22 #include <asm/bug.h>
23 #include "ctree.h"
24 #include "extent_io.h"
25 #include "locking.h"
26 
27 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
28 
29 /*
30  * if we currently have a spinning reader or writer lock
31  * (indicated by the rw flag) this will bump the count
32  * of blocking holders and drop the spinlock.
33  */
34 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
35 {
36 	/*
37 	 * no lock is required.  The lock owner may change if
38 	 * we have a read lock, but it won't change to or away
39 	 * from us.  If we have the write lock, we are the owner
40 	 * and it'll never change.
41 	 */
42 	if (eb->lock_nested && current->pid == eb->lock_owner)
43 		return;
44 	if (rw == BTRFS_WRITE_LOCK) {
45 		if (atomic_read(&eb->blocking_writers) == 0) {
46 			WARN_ON(atomic_read(&eb->spinning_writers) != 1);
47 			atomic_dec(&eb->spinning_writers);
48 			btrfs_assert_tree_locked(eb);
49 			atomic_inc(&eb->blocking_writers);
50 			write_unlock(&eb->lock);
51 		}
52 	} else if (rw == BTRFS_READ_LOCK) {
53 		btrfs_assert_tree_read_locked(eb);
54 		atomic_inc(&eb->blocking_readers);
55 		WARN_ON(atomic_read(&eb->spinning_readers) == 0);
56 		atomic_dec(&eb->spinning_readers);
57 		read_unlock(&eb->lock);
58 	}
59 }
60 
61 /*
62  * if we currently have a blocking lock, take the spinlock
63  * and drop our blocking count
64  */
65 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
66 {
67 	/*
68 	 * no lock is required.  The lock owner may change if
69 	 * we have a read lock, but it won't change to or away
70 	 * from us.  If we have the write lock, we are the owner
71 	 * and it'll never change.
72 	 */
73 	if (eb->lock_nested && current->pid == eb->lock_owner)
74 		return;
75 
76 	if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
77 		BUG_ON(atomic_read(&eb->blocking_writers) != 1);
78 		write_lock(&eb->lock);
79 		WARN_ON(atomic_read(&eb->spinning_writers));
80 		atomic_inc(&eb->spinning_writers);
81 		/*
82 		 * atomic_dec_and_test implies a barrier for waitqueue_active
83 		 */
84 		if (atomic_dec_and_test(&eb->blocking_writers) &&
85 		    waitqueue_active(&eb->write_lock_wq))
86 			wake_up(&eb->write_lock_wq);
87 	} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
88 		BUG_ON(atomic_read(&eb->blocking_readers) == 0);
89 		read_lock(&eb->lock);
90 		atomic_inc(&eb->spinning_readers);
91 		/*
92 		 * atomic_dec_and_test implies a barrier for waitqueue_active
93 		 */
94 		if (atomic_dec_and_test(&eb->blocking_readers) &&
95 		    waitqueue_active(&eb->read_lock_wq))
96 			wake_up(&eb->read_lock_wq);
97 	}
98 }
99 
100 /*
101  * take a spinning read lock.  This will wait for any blocking
102  * writers
103  */
104 void btrfs_tree_read_lock(struct extent_buffer *eb)
105 {
106 again:
107 	BUG_ON(!atomic_read(&eb->blocking_writers) &&
108 	       current->pid == eb->lock_owner);
109 
110 	read_lock(&eb->lock);
111 	if (atomic_read(&eb->blocking_writers) &&
112 	    current->pid == eb->lock_owner) {
113 		/*
114 		 * This extent is already write-locked by our thread. We allow
115 		 * an additional read lock to be added because it's for the same
116 		 * thread. btrfs_find_all_roots() depends on this as it may be
117 		 * called on a partly (write-)locked tree.
118 		 */
119 		BUG_ON(eb->lock_nested);
120 		eb->lock_nested = 1;
121 		read_unlock(&eb->lock);
122 		return;
123 	}
124 	if (atomic_read(&eb->blocking_writers)) {
125 		read_unlock(&eb->lock);
126 		wait_event(eb->write_lock_wq,
127 			   atomic_read(&eb->blocking_writers) == 0);
128 		goto again;
129 	}
130 	atomic_inc(&eb->read_locks);
131 	atomic_inc(&eb->spinning_readers);
132 }
133 
134 /*
135  * take a spinning read lock.
136  * returns 1 if we get the read lock and 0 if we don't
137  * this won't wait for blocking writers
138  */
139 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
140 {
141 	if (atomic_read(&eb->blocking_writers))
142 		return 0;
143 
144 	read_lock(&eb->lock);
145 	if (atomic_read(&eb->blocking_writers)) {
146 		read_unlock(&eb->lock);
147 		return 0;
148 	}
149 	atomic_inc(&eb->read_locks);
150 	atomic_inc(&eb->spinning_readers);
151 	return 1;
152 }
153 
154 /*
155  * returns 1 if we get the read lock and 0 if we don't
156  * this won't wait for blocking writers
157  */
158 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
159 {
160 	if (atomic_read(&eb->blocking_writers))
161 		return 0;
162 
163 	if (!read_trylock(&eb->lock))
164 		return 0;
165 
166 	if (atomic_read(&eb->blocking_writers)) {
167 		read_unlock(&eb->lock);
168 		return 0;
169 	}
170 	atomic_inc(&eb->read_locks);
171 	atomic_inc(&eb->spinning_readers);
172 	return 1;
173 }
174 
175 /*
176  * returns 1 if we get the read lock and 0 if we don't
177  * this won't wait for blocking writers or readers
178  */
179 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
180 {
181 	if (atomic_read(&eb->blocking_writers) ||
182 	    atomic_read(&eb->blocking_readers))
183 		return 0;
184 
185 	write_lock(&eb->lock);
186 	if (atomic_read(&eb->blocking_writers) ||
187 	    atomic_read(&eb->blocking_readers)) {
188 		write_unlock(&eb->lock);
189 		return 0;
190 	}
191 	atomic_inc(&eb->write_locks);
192 	atomic_inc(&eb->spinning_writers);
193 	eb->lock_owner = current->pid;
194 	return 1;
195 }
196 
197 /*
198  * drop a spinning read lock
199  */
200 void btrfs_tree_read_unlock(struct extent_buffer *eb)
201 {
202 	/*
203 	 * if we're nested, we have the write lock.  No new locking
204 	 * is needed as long as we are the lock owner.
205 	 * The write unlock will do a barrier for us, and the lock_nested
206 	 * field only matters to the lock owner.
207 	 */
208 	if (eb->lock_nested && current->pid == eb->lock_owner) {
209 		eb->lock_nested = 0;
210 		return;
211 	}
212 	btrfs_assert_tree_read_locked(eb);
213 	WARN_ON(atomic_read(&eb->spinning_readers) == 0);
214 	atomic_dec(&eb->spinning_readers);
215 	atomic_dec(&eb->read_locks);
216 	read_unlock(&eb->lock);
217 }
218 
219 /*
220  * drop a blocking read lock
221  */
222 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
223 {
224 	/*
225 	 * if we're nested, we have the write lock.  No new locking
226 	 * is needed as long as we are the lock owner.
227 	 * The write unlock will do a barrier for us, and the lock_nested
228 	 * field only matters to the lock owner.
229 	 */
230 	if (eb->lock_nested && current->pid == eb->lock_owner) {
231 		eb->lock_nested = 0;
232 		return;
233 	}
234 	btrfs_assert_tree_read_locked(eb);
235 	WARN_ON(atomic_read(&eb->blocking_readers) == 0);
236 	/*
237 	 * atomic_dec_and_test implies a barrier for waitqueue_active
238 	 */
239 	if (atomic_dec_and_test(&eb->blocking_readers) &&
240 	    waitqueue_active(&eb->read_lock_wq))
241 		wake_up(&eb->read_lock_wq);
242 	atomic_dec(&eb->read_locks);
243 }
244 
245 /*
246  * take a spinning write lock.  This will wait for both
247  * blocking readers or writers
248  */
249 void btrfs_tree_lock(struct extent_buffer *eb)
250 {
251 	WARN_ON(eb->lock_owner == current->pid);
252 again:
253 	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
254 	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
255 	write_lock(&eb->lock);
256 	if (atomic_read(&eb->blocking_readers)) {
257 		write_unlock(&eb->lock);
258 		wait_event(eb->read_lock_wq,
259 			   atomic_read(&eb->blocking_readers) == 0);
260 		goto again;
261 	}
262 	if (atomic_read(&eb->blocking_writers)) {
263 		write_unlock(&eb->lock);
264 		wait_event(eb->write_lock_wq,
265 			   atomic_read(&eb->blocking_writers) == 0);
266 		goto again;
267 	}
268 	WARN_ON(atomic_read(&eb->spinning_writers));
269 	atomic_inc(&eb->spinning_writers);
270 	atomic_inc(&eb->write_locks);
271 	eb->lock_owner = current->pid;
272 }
273 
274 /*
275  * drop a spinning or a blocking write lock.
276  */
277 void btrfs_tree_unlock(struct extent_buffer *eb)
278 {
279 	int blockers = atomic_read(&eb->blocking_writers);
280 
281 	BUG_ON(blockers > 1);
282 
283 	btrfs_assert_tree_locked(eb);
284 	eb->lock_owner = 0;
285 	atomic_dec(&eb->write_locks);
286 
287 	if (blockers) {
288 		WARN_ON(atomic_read(&eb->spinning_writers));
289 		atomic_dec(&eb->blocking_writers);
290 		/*
291 		 * Make sure counter is updated before we wake up waiters.
292 		 */
293 		smp_mb();
294 		if (waitqueue_active(&eb->write_lock_wq))
295 			wake_up(&eb->write_lock_wq);
296 	} else {
297 		WARN_ON(atomic_read(&eb->spinning_writers) != 1);
298 		atomic_dec(&eb->spinning_writers);
299 		write_unlock(&eb->lock);
300 	}
301 }
302 
303 void btrfs_assert_tree_locked(struct extent_buffer *eb)
304 {
305 	BUG_ON(!atomic_read(&eb->write_locks));
306 }
307 
308 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
309 {
310 	BUG_ON(!atomic_read(&eb->read_locks));
311 }
312