1 /* 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "thr_private.h" 31 #include "thr_umtx.h" 32 33 #ifndef HAS__UMTX_OP_ERR 34 int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2) 35 { 36 37 if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1) 38 return (errno); 39 return (0); 40 } 41 #endif 42 43 void 44 _thr_umutex_init(struct umutex *mtx) 45 { 46 static const struct umutex default_mtx = DEFAULT_UMUTEX; 47 48 *mtx = default_mtx; 49 } 50 51 void 52 _thr_urwlock_init(struct urwlock *rwl) 53 { 54 static const struct urwlock default_rwl = DEFAULT_URWLOCK; 55 56 *rwl = default_rwl; 57 } 58 59 int 60 __thr_umutex_lock(struct umutex *mtx, uint32_t id) 61 { 62 uint32_t owner; 63 64 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) 65 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0)); 66 67 for (;;) { 68 owner = mtx->m_owner; 69 if ((owner & ~UMUTEX_CONTESTED) == 0 && 70 atomic_cmpset_acq_32(&mtx->m_owner, owner, id | owner)) 71 return (0); 72 if (owner == UMUTEX_RB_OWNERDEAD && 73 atomic_cmpset_acq_32(&mtx->m_owner, owner, 74 id | UMUTEX_CONTESTED)) 75 return (EOWNERDEAD); 76 if (owner == UMUTEX_RB_NOTRECOV) 77 return (ENOTRECOVERABLE); 78 79 /* wait in kernel */ 80 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 81 } 82 } 83 84 #define SPINLOOPS 1000 85 86 int 87 __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) 88 { 89 uint32_t owner; 90 int count; 91 92 if (!_thr_is_smp) 93 return (__thr_umutex_lock(mtx, id)); 94 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) 95 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0)); 96 97 for (;;) { 98 count = SPINLOOPS; 99 while (count--) { 100 owner = mtx->m_owner; 101 if ((owner & ~UMUTEX_CONTESTED) == 0 && 102 atomic_cmpset_acq_32(&mtx->m_owner, owner, 103 id | owner)) 104 return (0); 105 if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) && 106 atomic_cmpset_acq_32(&mtx->m_owner, owner, 107 id | UMUTEX_CONTESTED)) 108 return (EOWNERDEAD); 109 if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) 110 return (ENOTRECOVERABLE); 111 CPU_SPINWAIT; 112 } 113 114 /* wait in kernel */ 115 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 116 } 117 } 118 119 int 120 __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 121 const struct timespec *abstime) 122 { 123 struct _umtx_time *tm_p, timeout; 124 size_t tm_size; 125 uint32_t owner; 126 int ret; 127 128 if (abstime == NULL) { 129 tm_p = NULL; 130 tm_size = 0; 131 } else { 132 timeout._clockid = CLOCK_REALTIME; 133 timeout._flags = UMTX_ABSTIME; 134 timeout._timeout = *abstime; 135 tm_p = &timeout; 136 tm_size = sizeof(timeout); 137 } 138 139 for (;;) { 140 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | 141 UMUTEX_PRIO_INHERIT)) == 0) { 142 /* try to lock it */ 143 owner = mtx->m_owner; 144 if ((owner & ~UMUTEX_CONTESTED) == 0 && 145 atomic_cmpset_acq_32(&mtx->m_owner, owner, 146 id | owner)) 147 return (0); 148 if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) && 149 atomic_cmpset_acq_32(&mtx->m_owner, owner, 150 id | UMUTEX_CONTESTED)) 151 return (EOWNERDEAD); 152 if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) 153 return (ENOTRECOVERABLE); 154 /* wait in kernel */ 155 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 156 (void *)tm_size, __DECONST(void *, tm_p)); 157 } else { 158 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 159 (void *)tm_size, __DECONST(void *, tm_p)); 160 if (ret == 0 || ret == EOWNERDEAD || 161 ret == ENOTRECOVERABLE) 162 break; 163 } 164 if (ret == ETIMEDOUT) 165 break; 166 } 167 return (ret); 168 } 169 170 int 171 __thr_umutex_unlock(struct umutex *mtx, uint32_t id) 172 { 173 174 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0)); 175 } 176 177 int 178 __thr_umutex_trylock(struct umutex *mtx) 179 { 180 181 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0)); 182 } 183 184 int 185 __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, 186 uint32_t *oldceiling) 187 { 188 189 return (_umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0)); 190 } 191 192 int 193 _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout) 194 { 195 196 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 197 timeout->tv_nsec <= 0))) 198 return (ETIMEDOUT); 199 return (_umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, 200 __DECONST(void*, timeout))); 201 } 202 203 int 204 _thr_umtx_wait_uint(volatile u_int *mtx, u_int id, 205 const struct timespec *timeout, int shared) 206 { 207 208 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 209 timeout->tv_nsec <= 0))) 210 return (ETIMEDOUT); 211 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 212 UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, 213 __DECONST(void*, timeout))); 214 } 215 216 int 217 _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid, 218 const struct timespec *abstime, int shared) 219 { 220 struct _umtx_time *tm_p, timeout; 221 size_t tm_size; 222 223 if (abstime == NULL) { 224 tm_p = NULL; 225 tm_size = 0; 226 } else { 227 timeout._clockid = clockid; 228 timeout._flags = UMTX_ABSTIME; 229 timeout._timeout = *abstime; 230 tm_p = &timeout; 231 tm_size = sizeof(timeout); 232 } 233 234 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 235 UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 236 (void *)tm_size, __DECONST(void *, tm_p))); 237 } 238 239 int 240 _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) 241 { 242 243 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 244 UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, nr_wakeup, 0, 0)); 245 } 246 247 void 248 _thr_ucond_init(struct ucond *cv) 249 { 250 251 bzero(cv, sizeof(struct ucond)); 252 } 253 254 int 255 _thr_ucond_wait(struct ucond *cv, struct umutex *m, 256 const struct timespec *timeout, int flags) 257 { 258 struct pthread *curthread; 259 260 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 261 timeout->tv_nsec <= 0))) { 262 curthread = _get_curthread(); 263 _thr_umutex_unlock(m, TID(curthread)); 264 return (ETIMEDOUT); 265 } 266 return (_umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m, 267 __DECONST(void*, timeout))); 268 } 269 270 int 271 _thr_ucond_signal(struct ucond *cv) 272 { 273 274 if (!cv->c_has_waiters) 275 return (0); 276 return (_umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL)); 277 } 278 279 int 280 _thr_ucond_broadcast(struct ucond *cv) 281 { 282 283 if (!cv->c_has_waiters) 284 return (0); 285 return (_umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL)); 286 } 287 288 int 289 __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, 290 const struct timespec *tsp) 291 { 292 struct _umtx_time timeout, *tm_p; 293 size_t tm_size; 294 295 if (tsp == NULL) { 296 tm_p = NULL; 297 tm_size = 0; 298 } else { 299 timeout._timeout = *tsp; 300 timeout._flags = UMTX_ABSTIME; 301 timeout._clockid = CLOCK_REALTIME; 302 tm_p = &timeout; 303 tm_size = sizeof(timeout); 304 } 305 return (_umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, 306 (void *)tm_size, tm_p)); 307 } 308 309 int 310 __thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp) 311 { 312 struct _umtx_time timeout, *tm_p; 313 size_t tm_size; 314 315 if (tsp == NULL) { 316 tm_p = NULL; 317 tm_size = 0; 318 } else { 319 timeout._timeout = *tsp; 320 timeout._flags = UMTX_ABSTIME; 321 timeout._clockid = CLOCK_REALTIME; 322 tm_p = &timeout; 323 tm_size = sizeof(timeout); 324 } 325 return (_umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, 326 tm_p)); 327 } 328 329 int 330 __thr_rwlock_unlock(struct urwlock *rwlock) 331 { 332 333 return (_umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL)); 334 } 335 336 void 337 _thr_rwl_rdlock(struct urwlock *rwlock) 338 { 339 int ret; 340 341 for (;;) { 342 if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0) 343 return; 344 ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL); 345 if (ret == 0) 346 return; 347 if (ret != EINTR) 348 PANIC("rdlock error"); 349 } 350 } 351 352 void 353 _thr_rwl_wrlock(struct urwlock *rwlock) 354 { 355 int ret; 356 357 for (;;) { 358 if (_thr_rwlock_trywrlock(rwlock) == 0) 359 return; 360 ret = __thr_rwlock_wrlock(rwlock, NULL); 361 if (ret == 0) 362 return; 363 if (ret != EINTR) 364 PANIC("wrlock error"); 365 } 366 } 367 368 void 369 _thr_rwl_unlock(struct urwlock *rwlock) 370 { 371 372 if (_thr_rwlock_unlock(rwlock)) 373 PANIC("unlock error"); 374 } 375