1 /* 2 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/limits.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/mutex.h> 36 #include <sys/proc.h> 37 #include <sys/signalvar.h> 38 #include <sys/sysent.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/sx.h> 42 #include <sys/thr.h> 43 #include <sys/umtx.h> 44 45 struct umtx_q { 46 LIST_ENTRY(umtx_q) uq_next; /* Linked list for the hash. */ 47 TAILQ_HEAD(, thread) uq_tdq; /* List of threads blocked here. */ 48 struct umtx *uq_umtx; /* Pointer key component. */ 49 pid_t uq_pid; /* Pid key component. */ 50 }; 51 52 #define UMTX_QUEUES 128 53 #define UMTX_HASH(pid, umtx) \ 54 (((uintptr_t)pid + ((uintptr_t)umtx & ~65535)) % UMTX_QUEUES) 55 56 LIST_HEAD(umtx_head, umtx_q); 57 static struct umtx_head queues[UMTX_QUEUES]; 58 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory"); 59 60 static struct mtx umtx_lock; 61 MTX_SYSINIT(umtx, &umtx_lock, "umtx", MTX_DEF); 62 63 #define UMTX_LOCK() mtx_lock(&umtx_lock); 64 #define UMTX_UNLOCK() mtx_unlock(&umtx_lock); 65 66 #define UMTX_CONTESTED LONG_MIN 67 68 static struct umtx_q *umtx_lookup(struct thread *, struct umtx *umtx); 69 static struct umtx_q *umtx_insert(struct thread *, struct umtx *umtx); 70 71 static struct umtx_q * 72 umtx_lookup(struct thread *td, struct umtx *umtx) 73 { 74 struct umtx_head *head; 75 struct umtx_q *uq; 76 pid_t pid; 77 78 pid = td->td_proc->p_pid; 79 80 head = &queues[UMTX_HASH(td->td_proc->p_pid, umtx)]; 81 82 LIST_FOREACH(uq, head, uq_next) { 83 if (uq->uq_pid == pid && uq->uq_umtx == umtx) 84 return (uq); 85 } 86 87 return (NULL); 88 } 89 90 /* 91 * Insert a thread onto the umtx queue. 92 */ 93 static struct umtx_q * 94 umtx_insert(struct thread *td, struct umtx *umtx) 95 { 96 struct umtx_head *head; 97 struct umtx_q *uq; 98 pid_t pid; 99 100 pid = td->td_proc->p_pid; 101 102 if ((uq = umtx_lookup(td, umtx)) == NULL) { 103 struct umtx_q *ins; 104 105 UMTX_UNLOCK(); 106 ins = malloc(sizeof(*uq), M_UMTX, M_ZERO | M_WAITOK); 107 UMTX_LOCK(); 108 109 /* 110 * Some one else could have succeeded while we were blocked 111 * waiting on memory. 112 */ 113 if ((uq = umtx_lookup(td, umtx)) == NULL) { 114 head = &queues[UMTX_HASH(pid, umtx)]; 115 uq = ins; 116 uq->uq_pid = pid; 117 uq->uq_umtx = umtx; 118 LIST_INSERT_HEAD(head, uq, uq_next); 119 TAILQ_INIT(&uq->uq_tdq); 120 } else 121 free(ins, M_UMTX); 122 } 123 124 /* 125 * Insert us onto the end of the TAILQ. 126 */ 127 TAILQ_INSERT_TAIL(&uq->uq_tdq, td, td_umtx); 128 129 return (uq); 130 } 131 132 static void 133 umtx_remove(struct umtx_q *uq, struct thread *td) 134 { 135 TAILQ_REMOVE(&uq->uq_tdq, td, td_umtx); 136 137 if (TAILQ_EMPTY(&uq->uq_tdq)) { 138 LIST_REMOVE(uq, uq_next); 139 free(uq, M_UMTX); 140 } 141 } 142 143 int 144 _umtx_lock(struct thread *td, struct _umtx_lock_args *uap) 145 /* struct umtx *umtx */ 146 { 147 struct umtx_q *uq; 148 struct umtx *umtx; 149 intptr_t owner; 150 intptr_t old; 151 int error; 152 153 uq = NULL; 154 155 /* 156 * Care must be exercised when dealing with this structure. It 157 * can fault on any access. 158 */ 159 umtx = uap->umtx; 160 161 for (;;) { 162 /* 163 * Try the uncontested case. This should be done in userland. 164 */ 165 owner = casuptr((intptr_t *)&umtx->u_owner, 166 UMTX_UNOWNED, td->td_tid); 167 168 /* The address was invalid. */ 169 if (owner == -1) 170 return (EFAULT); 171 172 /* The acquire succeeded. */ 173 if (owner == UMTX_UNOWNED) 174 return (0); 175 176 /* If no one owns it but it is contested try to acquire it. */ 177 if (owner == UMTX_CONTESTED) { 178 owner = casuptr((intptr_t *)&umtx->u_owner, 179 UMTX_CONTESTED, td->td_tid | UMTX_CONTESTED); 180 181 /* The address was invalid. */ 182 if (owner == -1) 183 return (EFAULT); 184 185 if (owner == UMTX_CONTESTED) 186 return (0); 187 188 /* If this failed the lock has changed, restart. */ 189 continue; 190 } 191 192 193 UMTX_LOCK(); 194 uq = umtx_insert(td, umtx); 195 UMTX_UNLOCK(); 196 197 /* 198 * Set the contested bit so that a release in user space 199 * knows to use the system call for unlock. If this fails 200 * either some one else has acquired the lock or it has been 201 * released. 202 */ 203 old = casuptr((intptr_t *)&umtx->u_owner, owner, 204 owner | UMTX_CONTESTED); 205 206 /* The address was invalid. */ 207 if (old == -1) { 208 UMTX_LOCK(); 209 umtx_remove(uq, td); 210 UMTX_UNLOCK(); 211 return (EFAULT); 212 } 213 214 /* 215 * We set the contested bit, sleep. Otherwise the lock changed 216 * and we need to retry or we lost a race to the thread 217 * unlocking the umtx. 218 */ 219 PROC_LOCK(td->td_proc); 220 mtx_lock_spin(&sched_lock); 221 if (old == owner && (td->td_flags & TDF_UMTXWAKEUP) == 0) { 222 mtx_unlock_spin(&sched_lock); 223 error = msleep(td, &td->td_proc->p_mtx, 224 td->td_priority | PCATCH, "umtx", 0); 225 mtx_lock_spin(&sched_lock); 226 } else 227 error = 0; 228 td->td_flags &= ~TDF_UMTXWAKEUP; 229 mtx_unlock_spin(&sched_lock); 230 PROC_UNLOCK(td->td_proc); 231 232 UMTX_LOCK(); 233 umtx_remove(uq, td); 234 UMTX_UNLOCK(); 235 236 /* 237 * If we caught a signal we might have to retry or exit 238 * immediately. 239 */ 240 if (error) 241 return (error); 242 } 243 244 return (0); 245 } 246 247 int 248 _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap) 249 /* struct umtx *umtx */ 250 { 251 struct thread *blocked; 252 struct umtx *umtx; 253 struct umtx_q *uq; 254 intptr_t owner; 255 intptr_t old; 256 257 umtx = uap->umtx; 258 259 /* 260 * Make sure we own this mtx. 261 * 262 * XXX Need a {fu,su}ptr this is not correct on arch where 263 * sizeof(intptr_t) != sizeof(long). 264 */ 265 if ((owner = fuword(&umtx->u_owner)) == -1) 266 return (EFAULT); 267 268 if ((owner & ~UMTX_CONTESTED) != td->td_tid) 269 return (EPERM); 270 271 /* We should only ever be in here for contested locks */ 272 if ((owner & UMTX_CONTESTED) == 0) 273 return (EINVAL); 274 blocked = NULL; 275 276 /* 277 * When unlocking the umtx, it must be marked as unowned if 278 * there is zero or one thread only waiting for it. 279 * Otherwise, it must be marked as contested. 280 */ 281 UMTX_LOCK(); 282 uq = umtx_lookup(td, umtx); 283 if (uq == NULL || 284 (uq != NULL && (blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL && 285 TAILQ_NEXT(blocked, td_umtx) == NULL)) { 286 UMTX_UNLOCK(); 287 old = casuptr((intptr_t *)&umtx->u_owner, owner, 288 UMTX_UNOWNED); 289 if (old == -1) 290 return (EFAULT); 291 if (old != owner) 292 return (EINVAL); 293 294 /* 295 * Recheck the umtx queue to make sure another thread 296 * didn't put itself on it after it was unlocked. 297 */ 298 UMTX_LOCK(); 299 uq = umtx_lookup(td, umtx); 300 if (uq != NULL && 301 ((blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL && 302 TAILQ_NEXT(blocked, td_umtx) != NULL)) { 303 UMTX_UNLOCK(); 304 old = casuptr((intptr_t *)&umtx->u_owner, 305 UMTX_UNOWNED, UMTX_CONTESTED); 306 } else { 307 UMTX_UNLOCK(); 308 } 309 } else { 310 UMTX_UNLOCK(); 311 old = casuptr((intptr_t *)&umtx->u_owner, 312 owner, UMTX_CONTESTED); 313 if (old != -1 && old != owner) 314 return (EINVAL); 315 } 316 317 if (old == -1) 318 return (EFAULT); 319 320 /* 321 * If there is a thread waiting on the umtx, wake it up. 322 */ 323 if (blocked != NULL) { 324 PROC_LOCK(blocked->td_proc); 325 mtx_lock_spin(&sched_lock); 326 blocked->td_flags |= TDF_UMTXWAKEUP; 327 mtx_unlock_spin(&sched_lock); 328 PROC_UNLOCK(blocked->td_proc); 329 wakeup(blocked); 330 } 331 332 return (0); 333 } 334