xref: /freebsd/sys/kern/kern_sx.c (revision 5521ff5a4d1929056e7ffc982fac3341ca54df7c)
1 /*
2  * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice(s), this list of conditions and the following disclaimer as
9  *    the first lines of this file unmodified other than the possible
10  *    addition of one or more copyright notices.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice(s), this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
25  * DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 /*
31  * Shared/exclusive locks.  This implementation assures deterministic lock
32  * granting behavior, so that slocks and xlocks are interleaved.
33  *
34  * Priority propagation will not generally raise the priority of lock holders,
35  * so should not be relied upon in combination with sx locks.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/ktr.h>
41 #include <sys/condvar.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/sx.h>
45 
46 struct lock_class lock_class_sx = {
47 	"sx",
48 	LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE
49 };
50 
51 void
52 sx_init(struct sx *sx, const char *description)
53 {
54 	struct lock_object *lock;
55 
56 	bzero(sx, sizeof(*sx));
57 	lock = &sx->sx_object;
58 	lock->lo_class = &lock_class_sx;
59 	lock->lo_name = description;
60 	lock->lo_flags = LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE;
61 	mtx_init(&sx->sx_lock, "sx backing lock",
62 	    MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
63 	sx->sx_cnt = 0;
64 	cv_init(&sx->sx_shrd_cv, description);
65 	sx->sx_shrd_wcnt = 0;
66 	cv_init(&sx->sx_excl_cv, description);
67 	sx->sx_excl_wcnt = 0;
68 	sx->sx_xholder = NULL;
69 
70 	LOCK_LOG_INIT(lock, 0);
71 
72 	WITNESS_INIT(lock);
73 }
74 
75 void
76 sx_destroy(struct sx *sx)
77 {
78 
79 	LOCK_LOG_DESTROY(&sx->sx_object, 0);
80 
81 	KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
82 	    0), ("%s (%s): holders or waiters\n", __FUNCTION__,
83 	    sx->sx_object.lo_name));
84 
85 	mtx_destroy(&sx->sx_lock);
86 	cv_destroy(&sx->sx_shrd_cv);
87 	cv_destroy(&sx->sx_excl_cv);
88 
89 	WITNESS_DESTROY(&sx->sx_object);
90 }
91 
92 void
93 _sx_slock(struct sx *sx, const char *file, int line)
94 {
95 
96 	mtx_lock(&sx->sx_lock);
97 	KASSERT(sx->sx_xholder != curproc,
98 	    ("%s (%s): slock while xlock is held @ %s:%d\n", __FUNCTION__,
99 	    sx->sx_object.lo_name, file, line));
100 
101 	/*
102 	 * Loop in case we lose the race for lock acquisition.
103 	 */
104 	while (sx->sx_cnt < 0) {
105 		sx->sx_shrd_wcnt++;
106 		cv_wait(&sx->sx_shrd_cv, &sx->sx_lock);
107 		sx->sx_shrd_wcnt--;
108 	}
109 
110 	/* Acquire a shared lock. */
111 	sx->sx_cnt++;
112 
113 	LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
114 	WITNESS_LOCK(&sx->sx_object, 0, file, line);
115 
116 	mtx_unlock(&sx->sx_lock);
117 }
118 
119 int
120 _sx_try_slock(struct sx *sx, const char *file, int line)
121 {
122 
123 	mtx_lock(&sx->sx_lock);
124 	if (sx->sx_cnt >= 0) {
125 		sx->sx_cnt++;
126 		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
127 		WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
128 		mtx_unlock(&sx->sx_lock);
129 		return (1);
130 	} else {
131 		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
132 		mtx_unlock(&sx->sx_lock);
133 		return (0);
134 	}
135 }
136 
137 void
138 _sx_xlock(struct sx *sx, const char *file, int line)
139 {
140 
141 	mtx_lock(&sx->sx_lock);
142 
143 	/*
144 	 * With sx locks, we're absolutely not permitted to recurse on
145 	 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
146 	 * by WITNESS, but as it is not semantically correct to hold the
147 	 * xlock while in here, we consider it API abuse and put it under
148 	 * INVARIANTS.
149 	 */
150 	KASSERT(sx->sx_xholder != curproc,
151 	    ("%s (%s): xlock already held @ %s:%d", __FUNCTION__,
152 	    sx->sx_object.lo_name, file, line));
153 
154 	/* Loop in case we lose the race for lock acquisition. */
155 	while (sx->sx_cnt != 0) {
156 		sx->sx_excl_wcnt++;
157 		cv_wait(&sx->sx_excl_cv, &sx->sx_lock);
158 		sx->sx_excl_wcnt--;
159 	}
160 
161 	MPASS(sx->sx_cnt == 0);
162 
163 	/* Acquire an exclusive lock. */
164 	sx->sx_cnt--;
165 	sx->sx_xholder = curproc;
166 
167 	LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
168 	WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
169 
170 	mtx_unlock(&sx->sx_lock);
171 }
172 
173 int
174 _sx_try_xlock(struct sx *sx, const char *file, int line)
175 {
176 
177 	mtx_lock(&sx->sx_lock);
178 	if (sx->sx_cnt == 0) {
179 		sx->sx_cnt--;
180 		sx->sx_xholder = curproc;
181 		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
182 		WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
183 		    line);
184 		mtx_unlock(&sx->sx_lock);
185 		return (1);
186 	} else {
187 		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
188 		mtx_unlock(&sx->sx_lock);
189 		return (0);
190 	}
191 }
192 
193 void
194 _sx_sunlock(struct sx *sx, const char *file, int line)
195 {
196 
197 	mtx_lock(&sx->sx_lock);
198 	_SX_ASSERT_SLOCKED(sx, file, line);
199 
200 	WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
201 
202 	/* Release. */
203 	sx->sx_cnt--;
204 
205 	/*
206 	 * If we just released the last shared lock, wake any waiters up, giving
207 	 * exclusive lockers precedence.  In order to make sure that exclusive
208 	 * lockers won't be blocked forever, don't wake shared lock waiters if
209 	 * there are exclusive lock waiters.
210 	 */
211 	if (sx->sx_excl_wcnt > 0) {
212 		if (sx->sx_cnt == 0)
213 			cv_signal(&sx->sx_excl_cv);
214 	} else if (sx->sx_shrd_wcnt > 0)
215 		cv_broadcast(&sx->sx_shrd_cv);
216 
217 	LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
218 
219 	mtx_unlock(&sx->sx_lock);
220 }
221 
222 void
223 _sx_xunlock(struct sx *sx, const char *file, int line)
224 {
225 
226 	mtx_lock(&sx->sx_lock);
227 	_SX_ASSERT_XLOCKED(sx, file, line);
228 	MPASS(sx->sx_cnt == -1);
229 
230 	WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
231 
232 	/* Release. */
233 	sx->sx_cnt++;
234 	sx->sx_xholder = NULL;
235 
236 	/*
237 	 * Wake up waiters if there are any.  Give precedence to slock waiters.
238 	 */
239 	if (sx->sx_shrd_wcnt > 0)
240 		cv_broadcast(&sx->sx_shrd_cv);
241 	else if (sx->sx_excl_wcnt > 0)
242 		cv_signal(&sx->sx_excl_cv);
243 
244 	LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
245 
246 	mtx_unlock(&sx->sx_lock);
247 }
248