xref: /freebsd/sys/kern/kern_rangelock.c (revision d8a0fe102c0cfdfcd5b818f850eff09d8536c9bc)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009 Konstantin Belousov <kib@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/rangelock.h>
38 #include <sys/systm.h>
39 
40 #include <vm/uma.h>
41 
42 struct rl_q_entry {
43 	TAILQ_ENTRY(rl_q_entry) rl_q_link;
44 	off_t		rl_q_start, rl_q_end;
45 	int		rl_q_flags;
46 };
47 
48 static uma_zone_t rl_entry_zone;
49 
50 static void
51 rangelock_sys_init(void)
52 {
53 
54 	rl_entry_zone = uma_zcreate("rl_entry", sizeof(struct rl_q_entry),
55 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
56 }
57 SYSINIT(vfs, SI_SUB_LOCK, SI_ORDER_ANY, rangelock_sys_init, NULL);
58 
59 static struct rl_q_entry *
60 rlqentry_alloc(void)
61 {
62 
63 	return (uma_zalloc(rl_entry_zone, M_WAITOK));
64 }
65 
66 void
67 rlqentry_free(struct rl_q_entry *rleq)
68 {
69 
70 	uma_zfree(rl_entry_zone, rleq);
71 }
72 
73 void
74 rangelock_init(struct rangelock *lock)
75 {
76 
77 	TAILQ_INIT(&lock->rl_waiters);
78 	lock->rl_currdep = NULL;
79 }
80 
81 void
82 rangelock_destroy(struct rangelock *lock)
83 {
84 
85 	KASSERT(TAILQ_EMPTY(&lock->rl_waiters), ("Dangling waiters"));
86 }
87 
88 /*
89  * Two entries are compatible if their ranges do not overlap, or both
90  * entries are for read.
91  */
92 static int
93 ranges_overlap(const struct rl_q_entry *e1,
94     const struct rl_q_entry *e2)
95 {
96 
97 	if (e1->rl_q_start < e2->rl_q_end && e1->rl_q_end > e2->rl_q_start)
98 		return (1);
99 	return (0);
100 }
101 
102 /*
103  * Recalculate the lock->rl_currdep after an unlock.
104  */
105 static void
106 rangelock_calc_block(struct rangelock *lock)
107 {
108 	struct rl_q_entry *entry, *nextentry, *entry1;
109 
110 	for (entry = lock->rl_currdep; entry != NULL; entry = nextentry) {
111 		nextentry = TAILQ_NEXT(entry, rl_q_link);
112 		if (entry->rl_q_flags & RL_LOCK_READ) {
113 			/* Reads must not overlap with granted writes. */
114 			for (entry1 = TAILQ_FIRST(&lock->rl_waiters);
115 			    !(entry1->rl_q_flags & RL_LOCK_READ);
116 			    entry1 = TAILQ_NEXT(entry1, rl_q_link)) {
117 				if (ranges_overlap(entry, entry1))
118 					goto out;
119 			}
120 		} else {
121 			/* Write must not overlap with any granted locks. */
122 			for (entry1 = TAILQ_FIRST(&lock->rl_waiters);
123 			    entry1 != entry;
124 			    entry1 = TAILQ_NEXT(entry1, rl_q_link)) {
125 				if (ranges_overlap(entry, entry1))
126 					goto out;
127 			}
128 
129 			/* Move grantable write locks to the front. */
130 			TAILQ_REMOVE(&lock->rl_waiters, entry, rl_q_link);
131 			TAILQ_INSERT_HEAD(&lock->rl_waiters, entry, rl_q_link);
132 		}
133 
134 		/* Grant this lock. */
135 		entry->rl_q_flags |= RL_LOCK_GRANTED;
136 		wakeup(entry);
137 	}
138 out:
139 	lock->rl_currdep = entry;
140 }
141 
142 static void
143 rangelock_unlock_locked(struct rangelock *lock, struct rl_q_entry *entry,
144     struct mtx *ilk)
145 {
146 
147 	MPASS(lock != NULL && entry != NULL && ilk != NULL);
148 	mtx_assert(ilk, MA_OWNED);
149 	KASSERT(entry != lock->rl_currdep, ("stuck currdep"));
150 
151 	TAILQ_REMOVE(&lock->rl_waiters, entry, rl_q_link);
152 	rangelock_calc_block(lock);
153 	mtx_unlock(ilk);
154 	if (curthread->td_rlqe == NULL)
155 		curthread->td_rlqe = entry;
156 	else
157 		rlqentry_free(entry);
158 }
159 
160 void
161 rangelock_unlock(struct rangelock *lock, void *cookie, struct mtx *ilk)
162 {
163 
164 	MPASS(lock != NULL && cookie != NULL && ilk != NULL);
165 
166 	mtx_lock(ilk);
167 	rangelock_unlock_locked(lock, cookie, ilk);
168 }
169 
170 /*
171  * Unlock the sub-range of granted lock.
172  */
173 void *
174 rangelock_unlock_range(struct rangelock *lock, void *cookie, off_t start,
175     off_t end, struct mtx *ilk)
176 {
177 	struct rl_q_entry *entry;
178 
179 	MPASS(lock != NULL && cookie != NULL && ilk != NULL);
180 	entry = cookie;
181 	KASSERT(entry->rl_q_flags & RL_LOCK_GRANTED,
182 	    ("Unlocking non-granted lock"));
183 	KASSERT(entry->rl_q_start == start, ("wrong start"));
184 	KASSERT(entry->rl_q_end >= end, ("wrong end"));
185 
186 	mtx_lock(ilk);
187 	if (entry->rl_q_end == end) {
188 		rangelock_unlock_locked(lock, cookie, ilk);
189 		return (NULL);
190 	}
191 	entry->rl_q_end = end;
192 	rangelock_calc_block(lock);
193 	mtx_unlock(ilk);
194 	return (cookie);
195 }
196 
197 /*
198  * Add the lock request to the queue of the pending requests for
199  * rangelock.  Sleep until the request can be granted.
200  */
201 static void *
202 rangelock_enqueue(struct rangelock *lock, off_t start, off_t end, int mode,
203     struct mtx *ilk)
204 {
205 	struct rl_q_entry *entry;
206 	struct thread *td;
207 
208 	MPASS(lock != NULL && ilk != NULL);
209 
210 	td = curthread;
211 	if (td->td_rlqe != NULL) {
212 		entry = td->td_rlqe;
213 		td->td_rlqe = NULL;
214 	} else
215 		entry = rlqentry_alloc();
216 	MPASS(entry != NULL);
217 	entry->rl_q_flags = mode;
218 	entry->rl_q_start = start;
219 	entry->rl_q_end = end;
220 
221 	mtx_lock(ilk);
222 	/*
223 	 * XXXKIB TODO. Check that a thread does not try to enqueue a
224 	 * lock that is incompatible with another request from the same
225 	 * thread.
226 	 */
227 
228 	TAILQ_INSERT_TAIL(&lock->rl_waiters, entry, rl_q_link);
229 	if (lock->rl_currdep == NULL)
230 		lock->rl_currdep = entry;
231 	rangelock_calc_block(lock);
232 	while (!(entry->rl_q_flags & RL_LOCK_GRANTED))
233 		msleep(entry, ilk, 0, "range", 0);
234 	mtx_unlock(ilk);
235 	return (entry);
236 }
237 
238 void *
239 rangelock_rlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk)
240 {
241 
242 	return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk));
243 }
244 
245 void *
246 rangelock_wlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk)
247 {
248 
249 	return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk));
250 }
251