1 /* 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 * 28 */ 29 30 #include "thr_private.h" 31 #include "thr_umtx.h" 32 33 #ifndef HAS__UMTX_OP_ERR 34 int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2) 35 { 36 if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1) 37 return (errno); 38 return (0); 39 } 40 #endif 41 42 void 43 _thr_umutex_init(struct umutex *mtx) 44 { 45 static struct umutex default_mtx = DEFAULT_UMUTEX; 46 47 *mtx = default_mtx; 48 } 49 50 void 51 _thr_urwlock_init(struct urwlock *rwl) 52 { 53 static struct urwlock default_rwl = DEFAULT_URWLOCK; 54 *rwl = default_rwl; 55 } 56 57 int 58 __thr_umutex_lock(struct umutex *mtx, uint32_t id) 59 { 60 uint32_t owner; 61 62 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 63 for (;;) { 64 /* wait in kernel */ 65 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 66 67 owner = mtx->m_owner; 68 if ((owner & ~UMUTEX_CONTESTED) == 0 && 69 atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) 70 return (0); 71 } 72 } 73 74 return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); 75 } 76 77 #define SPINLOOPS 1000 78 79 int 80 __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) 81 { 82 uint32_t owner; 83 84 if (!_thr_is_smp) 85 return __thr_umutex_lock(mtx, id); 86 87 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 88 for (;;) { 89 int count = SPINLOOPS; 90 while (count--) { 91 owner = mtx->m_owner; 92 if ((owner & ~UMUTEX_CONTESTED) == 0) { 93 if (atomic_cmpset_acq_32( 94 &mtx->m_owner, 95 owner, id|owner)) { 96 return (0); 97 } 98 } 99 CPU_SPINWAIT; 100 } 101 102 /* wait in kernel */ 103 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 104 } 105 } 106 107 return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); 108 } 109 110 int 111 __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 112 const struct timespec *ets) 113 { 114 struct timespec timo, cts; 115 uint32_t owner; 116 int ret; 117 118 clock_gettime(CLOCK_REALTIME, &cts); 119 TIMESPEC_SUB(&timo, ets, &cts); 120 121 if (timo.tv_sec < 0) 122 return (ETIMEDOUT); 123 124 for (;;) { 125 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 126 127 /* wait in kernel */ 128 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, &timo); 129 130 /* now try to lock it */ 131 owner = mtx->m_owner; 132 if ((owner & ~UMUTEX_CONTESTED) == 0 && 133 atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) 134 return (0); 135 } else { 136 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, &timo); 137 if (ret == 0) 138 break; 139 } 140 if (ret == ETIMEDOUT) 141 break; 142 clock_gettime(CLOCK_REALTIME, &cts); 143 TIMESPEC_SUB(&timo, ets, &cts); 144 if (timo.tv_sec < 0 || (timo.tv_sec == 0 && timo.tv_nsec == 0)) { 145 ret = ETIMEDOUT; 146 break; 147 } 148 } 149 return (ret); 150 } 151 152 int 153 __thr_umutex_unlock(struct umutex *mtx, uint32_t id) 154 { 155 #ifndef __ia64__ 156 /* XXX this logic has a race-condition on ia64. */ 157 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 158 atomic_cmpset_rel_32(&mtx->m_owner, id | UMUTEX_CONTESTED, UMUTEX_CONTESTED); 159 return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE, 0, 0, 0); 160 } 161 #endif /* __ia64__ */ 162 return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0); 163 } 164 165 int 166 __thr_umutex_trylock(struct umutex *mtx) 167 { 168 return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0); 169 } 170 171 int 172 __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, 173 uint32_t *oldceiling) 174 { 175 return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0); 176 } 177 178 int 179 _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout) 180 { 181 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 182 timeout->tv_nsec <= 0))) 183 return (ETIMEDOUT); 184 return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, 185 __DECONST(void*, timeout)); 186 } 187 188 int 189 _thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared) 190 { 191 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 192 timeout->tv_nsec <= 0))) 193 return (ETIMEDOUT); 194 return _umtx_op_err(__DEVOLATILE(void *, mtx), 195 shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, 196 __DECONST(void*, timeout)); 197 } 198 199 int 200 _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid, 201 const struct timespec *abstime, int shared) 202 { 203 struct timespec ts, ts2, *tsp; 204 205 if (abstime != NULL) { 206 clock_gettime(clockid, &ts); 207 TIMESPEC_SUB(&ts2, abstime, &ts); 208 if (ts2.tv_sec < 0 || ts2.tv_nsec <= 0) 209 return (ETIMEDOUT); 210 tsp = &ts2; 211 } else { 212 tsp = NULL; 213 } 214 return _umtx_op_err(__DEVOLATILE(void *, mtx), 215 shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, NULL, 216 tsp); 217 } 218 219 int 220 _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) 221 { 222 return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, 223 nr_wakeup, 0, 0); 224 } 225 226 void 227 _thr_ucond_init(struct ucond *cv) 228 { 229 bzero(cv, sizeof(struct ucond)); 230 } 231 232 int 233 _thr_ucond_wait(struct ucond *cv, struct umutex *m, 234 const struct timespec *timeout, int flags) 235 { 236 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 237 timeout->tv_nsec <= 0))) { 238 struct pthread *curthread = _get_curthread(); 239 _thr_umutex_unlock(m, TID(curthread)); 240 return (ETIMEDOUT); 241 } 242 return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, 243 m, __DECONST(void*, timeout)); 244 } 245 246 int 247 _thr_ucond_signal(struct ucond *cv) 248 { 249 if (!cv->c_has_waiters) 250 return (0); 251 return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL); 252 } 253 254 int 255 _thr_ucond_broadcast(struct ucond *cv) 256 { 257 if (!cv->c_has_waiters) 258 return (0); 259 return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL); 260 } 261 262 int 263 __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) 264 { 265 return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, NULL, tsp); 266 } 267 268 int 269 __thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) 270 { 271 return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, NULL, tsp); 272 } 273 274 int 275 __thr_rwlock_unlock(struct urwlock *rwlock) 276 { 277 return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL); 278 } 279 280 void 281 _thr_rwl_rdlock(struct urwlock *rwlock) 282 { 283 int ret; 284 285 for (;;) { 286 if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0) 287 return; 288 ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL); 289 if (ret == 0) 290 return; 291 if (ret != EINTR) 292 PANIC("rdlock error"); 293 } 294 } 295 296 void 297 _thr_rwl_wrlock(struct urwlock *rwlock) 298 { 299 int ret; 300 301 for (;;) { 302 if (_thr_rwlock_trywrlock(rwlock) == 0) 303 return; 304 ret = __thr_rwlock_wrlock(rwlock, NULL); 305 if (ret == 0) 306 return; 307 if (ret != EINTR) 308 PANIC("wrlock error"); 309 } 310 } 311 312 void 313 _thr_rwl_unlock(struct urwlock *rwlock) 314 { 315 if (_thr_rwlock_unlock(rwlock)) 316 PANIC("unlock error"); 317 } 318