1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include "thr_private.h" 31 #include "thr_umtx.h" 32 33 void 34 _thr_umutex_init(struct umutex *mtx) 35 { 36 static const struct umutex default_mtx = DEFAULT_UMUTEX; 37 38 *mtx = default_mtx; 39 } 40 41 void 42 _thr_urwlock_init(struct urwlock *rwl) 43 { 44 static const struct urwlock default_rwl = DEFAULT_URWLOCK; 45 46 *rwl = default_rwl; 47 } 48 49 int 50 __thr_umutex_lock(struct umutex *mtx, uint32_t id) 51 { 52 uint32_t owner; 53 54 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) 55 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0)); 56 57 for (;;) { 58 owner = mtx->m_owner; 59 if ((owner & ~UMUTEX_CONTESTED) == 0 && 60 atomic_cmpset_acq_32(&mtx->m_owner, owner, id | owner)) 61 return (0); 62 if (owner == UMUTEX_RB_OWNERDEAD && 63 atomic_cmpset_acq_32(&mtx->m_owner, owner, 64 id | UMUTEX_CONTESTED)) 65 return (EOWNERDEAD); 66 if (owner == UMUTEX_RB_NOTRECOV) 67 return (ENOTRECOVERABLE); 68 69 /* wait in kernel */ 70 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 71 } 72 } 73 74 #define SPINLOOPS 1000 75 76 int 77 __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) 78 { 79 uint32_t owner; 80 int count; 81 82 if (!_thr_is_smp) 83 return (__thr_umutex_lock(mtx, id)); 84 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) 85 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0)); 86 87 for (;;) { 88 count = SPINLOOPS; 89 while (count--) { 90 owner = mtx->m_owner; 91 if ((owner & ~UMUTEX_CONTESTED) == 0 && 92 atomic_cmpset_acq_32(&mtx->m_owner, owner, 93 id | owner)) 94 return (0); 95 if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) && 96 atomic_cmpset_acq_32(&mtx->m_owner, owner, 97 id | UMUTEX_CONTESTED)) 98 return (EOWNERDEAD); 99 if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) 100 return (ENOTRECOVERABLE); 101 CPU_SPINWAIT; 102 } 103 104 /* wait in kernel */ 105 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 106 } 107 } 108 109 int 110 __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 111 const struct timespec *abstime) 112 { 113 struct _umtx_time *tm_p, timeout; 114 size_t tm_size; 115 uint32_t owner; 116 int ret; 117 118 if (abstime == NULL) { 119 tm_p = NULL; 120 tm_size = 0; 121 } else { 122 timeout._clockid = CLOCK_REALTIME; 123 timeout._flags = UMTX_ABSTIME; 124 timeout._timeout = *abstime; 125 tm_p = &timeout; 126 tm_size = sizeof(timeout); 127 } 128 129 for (;;) { 130 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | 131 UMUTEX_PRIO_INHERIT)) == 0) { 132 /* try to lock it */ 133 owner = mtx->m_owner; 134 if ((owner & ~UMUTEX_CONTESTED) == 0 && 135 atomic_cmpset_acq_32(&mtx->m_owner, owner, 136 id | owner)) 137 return (0); 138 if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) && 139 atomic_cmpset_acq_32(&mtx->m_owner, owner, 140 id | UMUTEX_CONTESTED)) 141 return (EOWNERDEAD); 142 if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) 143 return (ENOTRECOVERABLE); 144 /* wait in kernel */ 145 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 146 (void *)tm_size, __DECONST(void *, tm_p)); 147 } else { 148 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 149 (void *)tm_size, __DECONST(void *, tm_p)); 150 if (ret == 0 || ret == EOWNERDEAD || 151 ret == ENOTRECOVERABLE) 152 break; 153 } 154 if (ret == ETIMEDOUT) 155 break; 156 } 157 return (ret); 158 } 159 160 int 161 __thr_umutex_unlock(struct umutex *mtx) 162 { 163 164 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0)); 165 } 166 167 int 168 __thr_umutex_trylock(struct umutex *mtx) 169 { 170 171 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0)); 172 } 173 174 int 175 __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, 176 uint32_t *oldceiling) 177 { 178 179 return (_umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0)); 180 } 181 182 int 183 _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout) 184 { 185 186 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 187 timeout->tv_nsec <= 0))) 188 return (ETIMEDOUT); 189 return (_umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, 190 __DECONST(void*, timeout))); 191 } 192 193 int 194 _thr_umtx_wait_uint(volatile u_int *mtx, u_int id, 195 const struct timespec *timeout, int shared) 196 { 197 198 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 199 timeout->tv_nsec <= 0))) 200 return (ETIMEDOUT); 201 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 202 UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, 203 __DECONST(void*, timeout))); 204 } 205 206 int 207 _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid, 208 const struct timespec *abstime, int shared) 209 { 210 struct _umtx_time *tm_p, timeout; 211 size_t tm_size; 212 213 if (abstime == NULL) { 214 tm_p = NULL; 215 tm_size = 0; 216 } else { 217 timeout._clockid = clockid; 218 timeout._flags = UMTX_ABSTIME; 219 timeout._timeout = *abstime; 220 tm_p = &timeout; 221 tm_size = sizeof(timeout); 222 } 223 224 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 225 UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 226 (void *)tm_size, __DECONST(void *, tm_p))); 227 } 228 229 int 230 _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) 231 { 232 233 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 234 UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, nr_wakeup, 0, 0)); 235 } 236 237 void 238 _thr_ucond_init(struct ucond *cv) 239 { 240 241 bzero(cv, sizeof(struct ucond)); 242 } 243 244 int 245 _thr_ucond_wait(struct ucond *cv, struct umutex *m, 246 const struct timespec *timeout, int flags) 247 { 248 struct pthread *curthread; 249 250 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 251 timeout->tv_nsec <= 0))) { 252 curthread = _get_curthread(); 253 _thr_umutex_unlock(m, TID(curthread)); 254 return (ETIMEDOUT); 255 } 256 return (_umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m, 257 __DECONST(void*, timeout))); 258 } 259 260 int 261 _thr_ucond_signal(struct ucond *cv) 262 { 263 264 if (!cv->c_has_waiters) 265 return (0); 266 return (_umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL)); 267 } 268 269 int 270 _thr_ucond_broadcast(struct ucond *cv) 271 { 272 273 if (!cv->c_has_waiters) 274 return (0); 275 return (_umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL)); 276 } 277 278 int 279 __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, 280 const struct timespec *tsp) 281 { 282 struct _umtx_time timeout, *tm_p; 283 size_t tm_size; 284 285 if (tsp == NULL) { 286 tm_p = NULL; 287 tm_size = 0; 288 } else { 289 timeout._timeout = *tsp; 290 timeout._flags = UMTX_ABSTIME; 291 timeout._clockid = CLOCK_REALTIME; 292 tm_p = &timeout; 293 tm_size = sizeof(timeout); 294 } 295 return (_umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, 296 (void *)tm_size, tm_p)); 297 } 298 299 int 300 __thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp) 301 { 302 struct _umtx_time timeout, *tm_p; 303 size_t tm_size; 304 305 if (tsp == NULL) { 306 tm_p = NULL; 307 tm_size = 0; 308 } else { 309 timeout._timeout = *tsp; 310 timeout._flags = UMTX_ABSTIME; 311 timeout._clockid = CLOCK_REALTIME; 312 tm_p = &timeout; 313 tm_size = sizeof(timeout); 314 } 315 return (_umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, 316 tm_p)); 317 } 318 319 int 320 __thr_rwlock_unlock(struct urwlock *rwlock) 321 { 322 323 return (_umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL)); 324 } 325 326 void 327 _thr_rwl_rdlock(struct urwlock *rwlock) 328 { 329 int ret; 330 331 for (;;) { 332 if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0) 333 return; 334 ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL); 335 if (ret == 0) 336 return; 337 if (ret != EINTR) 338 PANIC("rdlock error"); 339 } 340 } 341 342 void 343 _thr_rwl_wrlock(struct urwlock *rwlock) 344 { 345 int ret; 346 347 for (;;) { 348 if (_thr_rwlock_trywrlock(rwlock) == 0) 349 return; 350 ret = __thr_rwlock_wrlock(rwlock, NULL); 351 if (ret == 0) 352 return; 353 if (ret != EINTR) 354 PANIC("wrlock error"); 355 } 356 } 357 358 void 359 _thr_rwl_unlock(struct urwlock *rwlock) 360 { 361 362 if (_thr_rwlock_unlock(rwlock)) 363 PANIC("unlock error"); 364 } 365