1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 #include <stdlib.h> 35 #include <errno.h> 36 #include <string.h> 37 #include <sys/param.h> 38 #include <sys/queue.h> 39 #include <pthread.h> 40 #include "thr_private.h" 41 42 /* 43 * Prototypes 44 */ 45 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; 46 47 static struct pthread_mutex_attr static_mutex_attr = 48 PTHREAD_MUTEXATTR_STATIC_INITIALIZER; 49 static pthread_mutexattr_t static_mattr = &static_mutex_attr; 50 51 /* Single underscore versions provided for libc internal usage: */ 52 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 53 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 54 55 /* No difference between libc and application usage of these: */ 56 __weak_reference(_pthread_mutex_init, pthread_mutex_init); 57 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 58 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 59 60 61 /* Reinitialize a mutex to defaults. */ 62 int 63 _mutex_reinit(pthread_mutex_t * mutex) 64 { 65 int ret = 0; 66 67 if (mutex == NULL) 68 return (EINVAL); 69 if (*mutex == NULL) 70 return (pthread_mutex_init(mutex, NULL)); 71 72 (*mutex)->m_attr.m_type = PTHREAD_MUTEX_DEFAULT; 73 (*mutex)->m_attr.m_protocol = PTHREAD_PRIO_NONE; 74 (*mutex)->m_attr.m_ceiling = 0; 75 (*mutex)->m_attr.m_flags &= MUTEX_FLAGS_PRIVATE; 76 (*mutex)->m_attr.m_flags |= MUTEX_FLAGS_INITED; 77 bzero(&(*mutex)->m_mtx, sizeof(struct umtx)); 78 (*mutex)->m_owner = NULL; 79 (*mutex)->m_count = 0; 80 (*mutex)->m_refcount = 0; 81 82 return (0); 83 } 84 85 int 86 _pthread_mutex_init(pthread_mutex_t * mutex, 87 const pthread_mutexattr_t * mutex_attr) 88 { 89 enum pthread_mutextype type; 90 pthread_mutex_t pmutex; 91 92 if (mutex == NULL) 93 return (EINVAL); 94 95 /* 96 * Allocate mutex. 97 */ 98 pmutex = (pthread_mutex_t)calloc(1, sizeof(struct pthread_mutex)); 99 if (pmutex == NULL) 100 return (ENOMEM); 101 102 bzero(pmutex, sizeof(*pmutex)); 103 104 /* Set mutex attributes. */ 105 if (mutex_attr == NULL || *mutex_attr == NULL) { 106 /* Default to a (error checking) POSIX mutex. */ 107 pmutex->m_attr.m_type = PTHREAD_MUTEX_ERRORCHECK; 108 pmutex->m_attr.m_protocol = PTHREAD_PRIO_NONE; 109 pmutex->m_attr.m_ceiling = 0; 110 pmutex->m_attr.m_flags = 0; 111 } else 112 bcopy(*mutex_attr, &pmutex->m_attr, sizeof(mutex_attr)); 113 114 /* 115 * Sanity check mutex type. 116 */ 117 if ((pmutex->m_attr.m_type < PTHREAD_MUTEX_ERRORCHECK) || 118 (pmutex->m_attr.m_type >= MUTEX_TYPE_MAX) || 119 (pmutex->m_attr.m_protocol < PTHREAD_PRIO_NONE) || 120 (pmutex->m_attr.m_protocol > PTHREAD_MUTEX_RECURSIVE)) 121 goto err; 122 123 124 /* 125 * Initialize mutex. 126 */ 127 pmutex->m_attr.m_flags |= MUTEX_FLAGS_INITED; 128 *mutex = pmutex; 129 130 return (0); 131 err: 132 free(pmutex); 133 return (EINVAL); 134 } 135 136 int 137 _pthread_mutex_destroy(pthread_mutex_t * mutex) 138 { 139 int ret = 0; 140 141 if (mutex == NULL || *mutex == NULL) 142 return (EINVAL); 143 144 /* Ensure that the mutex is unlocked. */ 145 if (((*mutex)->m_owner != NULL) || 146 ((*mutex)->m_refcount != 0)) 147 return (EBUSY); 148 149 150 /* Free it. */ 151 free(*mutex); 152 *mutex = NULL; 153 return (0); 154 } 155 156 static int 157 init_static(pthread_mutex_t *mutex) 158 { 159 pthread_t curthread; 160 int ret; 161 162 curthread = _get_curthread(); 163 GIANT_LOCK(curthread); 164 if (*mutex == NULL) 165 ret = pthread_mutex_init(mutex, NULL); 166 else 167 ret = 0; 168 GIANT_UNLOCK(curthread); 169 return (ret); 170 } 171 172 static int 173 init_static_private(pthread_mutex_t *mutex) 174 { 175 pthread_t curthread; 176 int ret; 177 178 curthread = _get_curthread(); 179 GIANT_LOCK(curthread); 180 if (*mutex == NULL) 181 ret = pthread_mutex_init(mutex, &static_mattr); 182 else 183 ret = 0; 184 GIANT_UNLOCK(curthread); 185 return(ret); 186 } 187 188 static int 189 mutex_trylock_common(pthread_mutex_t *mutex) 190 { 191 struct pthread *curthread = _get_curthread(); 192 int error; 193 194 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), 195 "Uninitialized mutex in pthread_mutex_trylock_common"); 196 197 /* 198 * Attempt to obtain the lock. 199 */ 200 if ((error = umtx_trylock(&(*mutex)->m_mtx, curthread->thr_id)) == 0) { 201 (*mutex)->m_owner = curthread; 202 TAILQ_INSERT_TAIL(&curthread->mutexq, *mutex, m_qe); 203 204 return (0); 205 } 206 /* The lock was invalid. */ 207 if (error != EBUSY) 208 abort(); 209 210 if ((*mutex)->m_owner == curthread) { 211 if ((*mutex)->m_attr.m_type == PTHREAD_MUTEX_RECURSIVE) { 212 (*mutex)->m_count++; 213 return (0); 214 } else 215 return (EDEADLK); 216 } 217 218 return (error); 219 } 220 221 int 222 __pthread_mutex_trylock(pthread_mutex_t *mutex) 223 { 224 int ret; 225 226 if (mutex == NULL) 227 return (EINVAL); 228 229 /* 230 * If the mutex is statically initialized, perform the dynamic 231 * initialization: 232 */ 233 if ((*mutex == NULL) && (ret = init_static(mutex)) != 0) 234 return (ret); 235 236 237 return (mutex_trylock_common(mutex)); 238 } 239 240 int 241 _pthread_mutex_trylock(pthread_mutex_t *mutex) 242 { 243 int ret; 244 245 if (mutex == NULL) 246 return (EINVAL); 247 248 /* 249 * If the mutex is statically initialized, perform the dynamic 250 * initialization marking the mutex private (delete safe): 251 */ 252 if ((*mutex == NULL) && (ret = init_static_private(mutex)) != 0) 253 return (ret); 254 255 return (mutex_trylock_common(mutex)); 256 } 257 258 static int 259 mutex_lock_common(pthread_mutex_t * mutex) 260 { 261 struct pthread *curthread = _get_curthread(); 262 int giant_count; 263 int error; 264 265 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), 266 "Uninitialized mutex in pthread_mutex_trylock_common"); 267 268 /* 269 * Obtain the lock. 270 */ 271 if ((error = umtx_trylock(&(*mutex)->m_mtx, curthread->thr_id)) == 0) { 272 (*mutex)->m_owner = curthread; 273 TAILQ_INSERT_TAIL(&curthread->mutexq, *mutex, m_qe); 274 275 return (0); 276 } 277 /* The lock was invalid. */ 278 if (error != EBUSY) 279 abort(); 280 281 if ((*mutex)->m_owner == curthread) { 282 if ((*mutex)->m_attr.m_type == PTHREAD_MUTEX_RECURSIVE) { 283 (*mutex)->m_count++; 284 285 return (0); 286 } else 287 return (EDEADLK); 288 } 289 290 /* 291 * Lock Giant so we can save the recursion count and set our 292 * state. Then we'll call into the kernel to block on this mutex. 293 */ 294 295 GIANT_LOCK(curthread); 296 PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT); 297 if (_giant_count != 1) 298 abort(); 299 giant_count = _giant_count; 300 301 /* 302 * This will unwind all references. 303 */ 304 _giant_count = 1; 305 GIANT_UNLOCK(curthread); 306 307 if ((error = umtx_lock(&(*mutex)->m_mtx, curthread->thr_id)) == 0) { 308 (*mutex)->m_owner = curthread; 309 TAILQ_INSERT_TAIL(&curthread->mutexq, *mutex, m_qe); 310 } else 311 _thread_printf(0, "umtx_lock(%d)\n", error); 312 313 /* 314 * Set our state and restore our recursion count. 315 */ 316 GIANT_LOCK(curthread); 317 PTHREAD_SET_STATE(curthread, PS_RUNNING); 318 319 giant_count = _giant_count; 320 GIANT_UNLOCK(curthread); 321 322 return (error); 323 } 324 325 int 326 __pthread_mutex_lock(pthread_mutex_t *mutex) 327 { 328 int ret; 329 330 if (_thread_initial == NULL) 331 _thread_init(); 332 333 if (mutex == NULL) 334 return (EINVAL); 335 336 /* 337 * If the mutex is statically initialized, perform the dynamic 338 * initialization: 339 */ 340 if ((*mutex == NULL) && ((ret = init_static(mutex)) != 0)) 341 return (ret); 342 343 return (mutex_lock_common(mutex)); 344 } 345 346 int 347 _pthread_mutex_lock(pthread_mutex_t *mutex) 348 { 349 int ret = 0; 350 351 if (_thread_initial == NULL) 352 _thread_init(); 353 354 if (mutex == NULL) 355 return (EINVAL); 356 357 /* 358 * If the mutex is statically initialized, perform the dynamic 359 * initialization marking it private (delete safe): 360 */ 361 if ((*mutex == NULL) && ((ret = init_static_private(mutex)) != 0)) 362 return (ret); 363 364 return (mutex_lock_common(mutex)); 365 } 366 367 368 int 369 _mutex_cv_unlock(pthread_mutex_t * mutex) 370 { 371 int ret; 372 373 if ((ret = pthread_mutex_unlock(mutex)) == 0) 374 (*mutex)->m_refcount++; 375 376 return (ret); 377 } 378 379 int 380 _mutex_cv_lock(pthread_mutex_t * mutex) 381 { 382 int ret; 383 384 385 if ((ret = pthread_mutex_lock(mutex)) == 0) 386 (*mutex)->m_refcount--; 387 388 return (ret); 389 } 390 391 int 392 _pthread_mutex_unlock(pthread_mutex_t * mutex) 393 { 394 struct pthread *curthread = _get_curthread(); 395 thr_id_t sav; 396 int ret = 0; 397 398 if (mutex == NULL || *mutex == NULL) 399 return (EINVAL); 400 401 if ((*mutex)->m_owner != curthread) 402 return (EPERM); 403 404 if ((*mutex)->m_count != 0) { 405 (*mutex)->m_count--; 406 return (0); 407 } 408 409 TAILQ_REMOVE(&curthread->mutexq, *mutex, m_qe); 410 (*mutex)->m_owner = NULL; 411 412 sav = (*mutex)->m_mtx.u_owner; 413 ret = umtx_unlock(&(*mutex)->m_mtx, curthread->thr_id); 414 if (ret) { 415 _thread_printf(0, "umtx_unlock(%d)", ret); 416 _thread_printf(0, "%x : %x : %x\n", curthread, (*mutex)->m_mtx.u_owner, sav); 417 } 418 419 return (ret); 420 } 421 422 void 423 _mutex_unlock_private(pthread_t pthread) 424 { 425 struct pthread_mutex *m, *m_next; 426 427 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 428 m_next = TAILQ_NEXT(m, m_qe); 429 if ((m->m_attr.m_flags & MUTEX_FLAGS_PRIVATE) != 0) 430 pthread_mutex_unlock(&m); 431 } 432 } 433