xref: /freebsd/sys/kern/kern_umtx.c (revision 77b7cdf1999ee965ad494fddd184b18f532ac91a)
1 /*
2  * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  */
29 
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/lock.h>
33 #include <sys/mutex.h>
34 #include <sys/proc.h>
35 #include <sys/signalvar.h>
36 #include <sys/sysent.h>
37 #include <sys/systm.h>
38 #include <sys/sysproto.h>
39 #include <sys/thr.h>
40 #include <sys/umtx.h>
41 
42 int
43 _umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
44     /* struct umtx *umtx */
45 {
46 	struct umtx *umtx;
47 	struct thread *blocked;
48 	intptr_t owner;
49 	intptr_t old;
50 	int error;
51 
52 	error = 0;
53 
54 	/*
55 	 * Care must be exercised when dealing with this structure.  It
56 	 * can fault on any access.
57 	 */
58 	umtx = uap->umtx;
59 
60 	PROC_LOCK(td->td_proc);
61 
62 	for (;;) {
63 		/*
64 		 * Try the uncontested case.  This should be done in userland.
65 		 */
66 		owner = casuptr((intptr_t *)&umtx->u_owner,
67 		    UMTX_UNOWNED, (intptr_t)td);
68 
69 		/* The acquire succeeded. */
70 		if (owner == UMTX_UNOWNED) {
71 			error = 0;
72 			goto out;
73 		}
74 
75 		/* The address was invalid. */
76 		if (owner == -1) {
77 			error = EFAULT;
78 			goto out;
79 		}
80 
81 		if (owner & UMTX_CONTESTED)
82 			break;
83 
84 		/*
85 		 * Set the contested bit so that a release in user space
86 		 * knows to use the system call for unlock.  If this fails
87 		 * either some one else has acquired the lock or it has been
88 		 * released.
89 		 */
90 		old = casuptr((intptr_t *)&umtx->u_owner, owner,
91 		    owner | UMTX_CONTESTED);
92 
93 		/* We set the contested bit. */
94 		if (old == owner)
95 			break;
96 
97 		/* The address was invalid. */
98 		if (old == -1) {
99 			error = EFAULT;
100 			goto out;
101 		}
102 		/* We didn't set the contested bit, try again. */
103 	}
104 
105 	/*
106 	 * We are now protected from further races via the proc lock.
107 	 * If userland messes with their mutex without using cmpset
108 	 * they will deadlock themselves but they will still be
109 	 * killable via signals.
110 	 */
111 
112 	if ((owner = fuword(&umtx->u_blocked)) == -1) {
113 		error = EFAULT;
114 		goto out;
115 	}
116 
117 	if (owner == UMTX_UNOWNED) {
118 		if (suword(&umtx->u_blocked, (long)td) == -1) {
119 			error = EFAULT;
120 			goto out;
121 		}
122 		/*
123 		 * Other blocked threads will reside here.
124 		 */
125 		STAILQ_INIT(&td->td_umtxq);
126 	} else {
127 		FOREACH_THREAD_IN_PROC(td->td_proc, blocked)
128 			if (blocked == (struct thread *)(owner))
129 				break;
130 
131 		if (blocked == NULL) {
132 			error = EINVAL;
133 			goto out;
134 		}
135 		/*
136 		 * Insert us onto the end of the TAILQ.
137 		 */
138 		STAILQ_INSERT_TAIL(&blocked->td_umtxq, td, td_umtx);
139 	}
140 
141 	for (;;) {
142 		/*
143 		 * Sleep until we can acquire the lock.  We must still deliver
144 		 * signals so that they are not deferred until we acquire the
145 		 * lock which may be never.  The threads actual priority is
146 		 * used to maintain proper ordering.
147 		 */
148 
149 		error = msleep(&td->td_umtx, &td->td_proc->p_mtx,
150 		    td->td_priority | PCATCH, "umtx", 0);
151 
152 		/*
153 		 * When we are woken up we need to see if we now own the lock
154 		 * even if a signal was delivered.
155 		 */
156 		if ((owner = fuword(&umtx->u_owner)) == -1) {
157 			error = EFAULT;
158 			break;
159 		}
160 		owner &= ~UMTX_CONTESTED;
161 		if ((struct thread *)owner == td) {
162 			error = 0;
163 			break;
164 		}
165 
166 		/*
167 		 * We may have signals to deliver.
168 		 */
169 		if (error)
170 			break;
171 	}
172 
173 out:
174 	PROC_UNLOCK(td->td_proc);
175 
176 	return (error);
177 }
178 
179 int
180 _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
181     /* struct umtx *umtx */
182 {
183 	struct thread *td0;
184 	struct umtx *umtx;
185 	intptr_t owner;
186 	intptr_t blocked;
187 	intptr_t old;
188 	int error;
189 
190 	error = 0;
191 	umtx = uap->umtx;
192 
193 	PROC_LOCK(td->td_proc);
194 
195 	/*
196 	 * Make sure we own this mtx.
197 	 *
198 	 * XXX Need a {fu,su}ptr this is not correct on arch where
199 	 * sizeof(intptr_t) != sizeof(long).
200 	 */
201 	if ((owner = fuword(&umtx->u_owner)) == -1) {
202 		error = EFAULT;
203 		goto out;
204 	}
205 	if ((struct thread *)(owner & ~UMTX_CONTESTED) != td) {
206 		error = EPERM;
207 		goto out;
208 	}
209 	/*
210 	 * If we own it but it isn't contested then we can just release and
211 	 * return.
212 	 */
213 	if ((owner & UMTX_CONTESTED) == 0) {
214 		owner = casuptr((intptr_t *)&umtx->u_owner,
215 		    (intptr_t)td, UMTX_UNOWNED);
216 
217 		if (owner == -1)
218 			error = EFAULT;
219 		/*
220 		 * If this failed someone modified the memory without going
221 		 * through this api.
222 		 */
223 		else if (owner != (intptr_t)td)
224 			error = EINVAL;
225 		else
226 			error = 0;
227 
228 		goto out;
229 	}
230 
231 	/*
232 	 * Since we own the mutex and the proc lock we are free to inspect
233 	 * the blocked queue.  It must have one valid entry since the
234 	 * CONTESTED bit was set.
235 	 */
236 	blocked = fuword(&umtx->u_blocked);
237 	if (blocked == -1) {
238 		error = EFAULT;
239 		goto out;
240 	}
241 	if (blocked == 0) {
242 		error = EINVAL;
243 		goto out;
244 	}
245 
246 	FOREACH_THREAD_IN_PROC(td->td_proc, td0)
247 		if (td0 == (struct thread *)blocked)
248 			break;
249 
250 	if (td0 == NULL) {
251 		error = EINVAL;
252 		goto out;
253 	}
254 
255 	if (!STAILQ_EMPTY(&td0->td_umtxq)) {
256 		struct thread *next;
257 
258 		blocked |= UMTX_CONTESTED;
259 		next = STAILQ_FIRST(&td0->td_umtxq);
260 		if (suword(&umtx->u_blocked, (long)next) == -1) {
261 			error = EFAULT;
262 			goto out;
263 		}
264 		STAILQ_REMOVE_HEAD(&td0->td_umtxq, td_umtx);
265 
266 		/*
267 		 * Switch the queue over to the next blocked thread.
268 		 */
269 		if (!STAILQ_EMPTY(&td0->td_umtxq)) {
270 			next->td_umtxq = td0->td_umtxq;
271 			STAILQ_INIT(&td0->td_umtxq);
272 		} else
273 			STAILQ_INIT(&next->td_umtxq);
274 	} else {
275 		if (suword(&umtx->u_blocked, UMTX_UNOWNED) == -1) {
276 			error = EFAULT;
277 			goto out;
278 		}
279 	}
280 	/*
281 	 * Now directly assign this mutex to the first thread that was
282 	 * blocked on it.
283 	 */
284 	old = casuptr((intptr_t *)&umtx->u_owner, owner, blocked);
285 
286 	/*
287 	 * This will only happen if someone modifies the lock without going
288 	 * through this api.
289 	 */
290 	if (old != owner) {
291 		error = EINVAL;
292 		goto out;
293 	}
294 	if (old == -1) {
295 		error = EFAULT;
296 		goto out;
297 	}
298 	/* Success. */
299 	error = 0;
300 	wakeup(&td0->td_umtx);
301 
302 out:
303 	PROC_UNLOCK(td->td_proc);
304 
305 	return (error);
306 }
307