1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "thr_private.h" 33 #include "thr_umtx.h" 34 35 #ifndef HAS__UMTX_OP_ERR 36 int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2) 37 { 38 39 if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1) 40 return (errno); 41 return (0); 42 } 43 #endif 44 45 void 46 _thr_umutex_init(struct umutex *mtx) 47 { 48 static const struct umutex default_mtx = DEFAULT_UMUTEX; 49 50 *mtx = default_mtx; 51 } 52 53 void 54 _thr_urwlock_init(struct urwlock *rwl) 55 { 56 static const struct urwlock default_rwl = DEFAULT_URWLOCK; 57 58 *rwl = default_rwl; 59 } 60 61 int 62 __thr_umutex_lock(struct umutex *mtx, uint32_t id) 63 { 64 uint32_t owner; 65 66 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) 67 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0)); 68 69 for (;;) { 70 owner = mtx->m_owner; 71 if ((owner & ~UMUTEX_CONTESTED) == 0 && 72 atomic_cmpset_acq_32(&mtx->m_owner, owner, id | owner)) 73 return (0); 74 if (owner == UMUTEX_RB_OWNERDEAD && 75 atomic_cmpset_acq_32(&mtx->m_owner, owner, 76 id | UMUTEX_CONTESTED)) 77 return (EOWNERDEAD); 78 if (owner == UMUTEX_RB_NOTRECOV) 79 return (ENOTRECOVERABLE); 80 81 /* wait in kernel */ 82 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 83 } 84 } 85 86 #define SPINLOOPS 1000 87 88 int 89 __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) 90 { 91 uint32_t owner; 92 int count; 93 94 if (!_thr_is_smp) 95 return (__thr_umutex_lock(mtx, id)); 96 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) 97 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0)); 98 99 for (;;) { 100 count = SPINLOOPS; 101 while (count--) { 102 owner = mtx->m_owner; 103 if ((owner & ~UMUTEX_CONTESTED) == 0 && 104 atomic_cmpset_acq_32(&mtx->m_owner, owner, 105 id | owner)) 106 return (0); 107 if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) && 108 atomic_cmpset_acq_32(&mtx->m_owner, owner, 109 id | UMUTEX_CONTESTED)) 110 return (EOWNERDEAD); 111 if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) 112 return (ENOTRECOVERABLE); 113 CPU_SPINWAIT; 114 } 115 116 /* wait in kernel */ 117 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 118 } 119 } 120 121 int 122 __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 123 const struct timespec *abstime) 124 { 125 struct _umtx_time *tm_p, timeout; 126 size_t tm_size; 127 uint32_t owner; 128 int ret; 129 130 if (abstime == NULL) { 131 tm_p = NULL; 132 tm_size = 0; 133 } else { 134 timeout._clockid = CLOCK_REALTIME; 135 timeout._flags = UMTX_ABSTIME; 136 timeout._timeout = *abstime; 137 tm_p = &timeout; 138 tm_size = sizeof(timeout); 139 } 140 141 for (;;) { 142 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | 143 UMUTEX_PRIO_INHERIT)) == 0) { 144 /* try to lock it */ 145 owner = mtx->m_owner; 146 if ((owner & ~UMUTEX_CONTESTED) == 0 && 147 atomic_cmpset_acq_32(&mtx->m_owner, owner, 148 id | owner)) 149 return (0); 150 if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) && 151 atomic_cmpset_acq_32(&mtx->m_owner, owner, 152 id | UMUTEX_CONTESTED)) 153 return (EOWNERDEAD); 154 if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) 155 return (ENOTRECOVERABLE); 156 /* wait in kernel */ 157 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 158 (void *)tm_size, __DECONST(void *, tm_p)); 159 } else { 160 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 161 (void *)tm_size, __DECONST(void *, tm_p)); 162 if (ret == 0 || ret == EOWNERDEAD || 163 ret == ENOTRECOVERABLE) 164 break; 165 } 166 if (ret == ETIMEDOUT) 167 break; 168 } 169 return (ret); 170 } 171 172 int 173 __thr_umutex_unlock(struct umutex *mtx) 174 { 175 176 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0)); 177 } 178 179 int 180 __thr_umutex_trylock(struct umutex *mtx) 181 { 182 183 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0)); 184 } 185 186 int 187 __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, 188 uint32_t *oldceiling) 189 { 190 191 return (_umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0)); 192 } 193 194 int 195 _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout) 196 { 197 198 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 199 timeout->tv_nsec <= 0))) 200 return (ETIMEDOUT); 201 return (_umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, 202 __DECONST(void*, timeout))); 203 } 204 205 int 206 _thr_umtx_wait_uint(volatile u_int *mtx, u_int id, 207 const struct timespec *timeout, int shared) 208 { 209 210 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 211 timeout->tv_nsec <= 0))) 212 return (ETIMEDOUT); 213 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 214 UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, 215 __DECONST(void*, timeout))); 216 } 217 218 int 219 _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid, 220 const struct timespec *abstime, int shared) 221 { 222 struct _umtx_time *tm_p, timeout; 223 size_t tm_size; 224 225 if (abstime == NULL) { 226 tm_p = NULL; 227 tm_size = 0; 228 } else { 229 timeout._clockid = clockid; 230 timeout._flags = UMTX_ABSTIME; 231 timeout._timeout = *abstime; 232 tm_p = &timeout; 233 tm_size = sizeof(timeout); 234 } 235 236 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 237 UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 238 (void *)tm_size, __DECONST(void *, tm_p))); 239 } 240 241 int 242 _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) 243 { 244 245 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 246 UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, nr_wakeup, 0, 0)); 247 } 248 249 void 250 _thr_ucond_init(struct ucond *cv) 251 { 252 253 bzero(cv, sizeof(struct ucond)); 254 } 255 256 int 257 _thr_ucond_wait(struct ucond *cv, struct umutex *m, 258 const struct timespec *timeout, int flags) 259 { 260 struct pthread *curthread; 261 262 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 263 timeout->tv_nsec <= 0))) { 264 curthread = _get_curthread(); 265 _thr_umutex_unlock(m, TID(curthread)); 266 return (ETIMEDOUT); 267 } 268 return (_umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m, 269 __DECONST(void*, timeout))); 270 } 271 272 int 273 _thr_ucond_signal(struct ucond *cv) 274 { 275 276 if (!cv->c_has_waiters) 277 return (0); 278 return (_umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL)); 279 } 280 281 int 282 _thr_ucond_broadcast(struct ucond *cv) 283 { 284 285 if (!cv->c_has_waiters) 286 return (0); 287 return (_umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL)); 288 } 289 290 int 291 __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, 292 const struct timespec *tsp) 293 { 294 struct _umtx_time timeout, *tm_p; 295 size_t tm_size; 296 297 if (tsp == NULL) { 298 tm_p = NULL; 299 tm_size = 0; 300 } else { 301 timeout._timeout = *tsp; 302 timeout._flags = UMTX_ABSTIME; 303 timeout._clockid = CLOCK_REALTIME; 304 tm_p = &timeout; 305 tm_size = sizeof(timeout); 306 } 307 return (_umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, 308 (void *)tm_size, tm_p)); 309 } 310 311 int 312 __thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp) 313 { 314 struct _umtx_time timeout, *tm_p; 315 size_t tm_size; 316 317 if (tsp == NULL) { 318 tm_p = NULL; 319 tm_size = 0; 320 } else { 321 timeout._timeout = *tsp; 322 timeout._flags = UMTX_ABSTIME; 323 timeout._clockid = CLOCK_REALTIME; 324 tm_p = &timeout; 325 tm_size = sizeof(timeout); 326 } 327 return (_umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, 328 tm_p)); 329 } 330 331 int 332 __thr_rwlock_unlock(struct urwlock *rwlock) 333 { 334 335 return (_umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL)); 336 } 337 338 void 339 _thr_rwl_rdlock(struct urwlock *rwlock) 340 { 341 int ret; 342 343 for (;;) { 344 if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0) 345 return; 346 ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL); 347 if (ret == 0) 348 return; 349 if (ret != EINTR) 350 PANIC("rdlock error"); 351 } 352 } 353 354 void 355 _thr_rwl_wrlock(struct urwlock *rwlock) 356 { 357 int ret; 358 359 for (;;) { 360 if (_thr_rwlock_trywrlock(rwlock) == 0) 361 return; 362 ret = __thr_rwlock_wrlock(rwlock, NULL); 363 if (ret == 0) 364 return; 365 if (ret != EINTR) 366 PANIC("wrlock error"); 367 } 368 } 369 370 void 371 _thr_rwl_unlock(struct urwlock *rwlock) 372 { 373 374 if (_thr_rwlock_unlock(rwlock)) 375 PANIC("unlock error"); 376 } 377