1 /* 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 * 28 */ 29 30 #include "thr_private.h" 31 #include "thr_umtx.h" 32 33 #ifndef HAS__UMTX_OP_ERR 34 int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2) 35 { 36 if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1) 37 return (errno); 38 return (0); 39 } 40 #endif 41 42 void 43 _thr_umutex_init(struct umutex *mtx) 44 { 45 static struct umutex default_mtx = DEFAULT_UMUTEX; 46 47 *mtx = default_mtx; 48 } 49 50 void 51 _thr_urwlock_init(struct urwlock *rwl) 52 { 53 static struct urwlock default_rwl = DEFAULT_URWLOCK; 54 *rwl = default_rwl; 55 } 56 57 int 58 __thr_umutex_lock(struct umutex *mtx, uint32_t id) 59 { 60 uint32_t owner; 61 62 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 63 for (;;) { 64 /* wait in kernel */ 65 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 66 67 owner = mtx->m_owner; 68 if ((owner & ~UMUTEX_CONTESTED) == 0 && 69 atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) 70 return (0); 71 } 72 } 73 74 return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); 75 } 76 77 #define SPINLOOPS 1000 78 79 int 80 __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) 81 { 82 uint32_t owner; 83 84 if (!_thr_is_smp) 85 return __thr_umutex_lock(mtx, id); 86 87 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 88 for (;;) { 89 int count = SPINLOOPS; 90 while (count--) { 91 owner = mtx->m_owner; 92 if ((owner & ~UMUTEX_CONTESTED) == 0) { 93 if (atomic_cmpset_acq_32( 94 &mtx->m_owner, 95 owner, id|owner)) { 96 return (0); 97 } 98 } 99 CPU_SPINWAIT; 100 } 101 102 /* wait in kernel */ 103 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 104 } 105 } 106 107 return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); 108 } 109 110 int 111 __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 112 const struct timespec *ets) 113 { 114 struct timespec timo, cts; 115 uint32_t owner; 116 int ret; 117 118 clock_gettime(CLOCK_REALTIME, &cts); 119 TIMESPEC_SUB(&timo, ets, &cts); 120 121 if (timo.tv_sec < 0) 122 return (ETIMEDOUT); 123 124 for (;;) { 125 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 126 127 /* wait in kernel */ 128 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, &timo); 129 130 /* now try to lock it */ 131 owner = mtx->m_owner; 132 if ((owner & ~UMUTEX_CONTESTED) == 0 && 133 atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) 134 return (0); 135 } else { 136 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, &timo); 137 if (ret == 0) 138 break; 139 } 140 if (ret == ETIMEDOUT) 141 break; 142 clock_gettime(CLOCK_REALTIME, &cts); 143 TIMESPEC_SUB(&timo, ets, &cts); 144 if (timo.tv_sec < 0 || (timo.tv_sec == 0 && timo.tv_nsec == 0)) { 145 ret = ETIMEDOUT; 146 break; 147 } 148 } 149 return (ret); 150 } 151 152 int 153 __thr_umutex_unlock(struct umutex *mtx, uint32_t id) 154 { 155 #ifndef __ia64__ 156 /* XXX this logic has a race-condition on ia64. */ 157 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 158 atomic_cmpset_rel_32(&mtx->m_owner, id | UMUTEX_CONTESTED, UMUTEX_CONTESTED); 159 return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE, 0, 0, 0); 160 } 161 #endif /* __ia64__ */ 162 return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0); 163 } 164 165 int 166 __thr_umutex_trylock(struct umutex *mtx) 167 { 168 return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0); 169 } 170 171 int 172 __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, 173 uint32_t *oldceiling) 174 { 175 return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0); 176 } 177 178 int 179 _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout) 180 { 181 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 182 timeout->tv_nsec <= 0))) 183 return (ETIMEDOUT); 184 return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, 185 __DECONST(void*, timeout)); 186 } 187 188 int 189 _thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared) 190 { 191 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 192 timeout->tv_nsec <= 0))) 193 return (ETIMEDOUT); 194 return _umtx_op_err(__DEVOLATILE(void *, mtx), 195 shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, 196 __DECONST(void*, timeout)); 197 } 198 199 int 200 _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid, 201 const struct timespec *abstime, int shared) 202 { 203 return _umtx_op_err(__DEVOLATILE(void *, mtx), 204 shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 205 abstime != NULL ? (void *)(uintptr_t)((clockid << 16) | UMTX_WAIT_ABSTIME) : 0, 206 __DECONST(void *, abstime)); 207 } 208 209 int 210 _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) 211 { 212 return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, 213 nr_wakeup, 0, 0); 214 } 215 216 void 217 _thr_ucond_init(struct ucond *cv) 218 { 219 bzero(cv, sizeof(struct ucond)); 220 } 221 222 int 223 _thr_ucond_wait(struct ucond *cv, struct umutex *m, 224 const struct timespec *timeout, int flags) 225 { 226 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 227 timeout->tv_nsec <= 0))) { 228 struct pthread *curthread = _get_curthread(); 229 _thr_umutex_unlock(m, TID(curthread)); 230 return (ETIMEDOUT); 231 } 232 return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, 233 m, __DECONST(void*, timeout)); 234 } 235 236 int 237 _thr_ucond_signal(struct ucond *cv) 238 { 239 if (!cv->c_has_waiters) 240 return (0); 241 return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL); 242 } 243 244 int 245 _thr_ucond_broadcast(struct ucond *cv) 246 { 247 if (!cv->c_has_waiters) 248 return (0); 249 return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL); 250 } 251 252 int 253 __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) 254 { 255 return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, NULL, tsp); 256 } 257 258 int 259 __thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) 260 { 261 return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, NULL, tsp); 262 } 263 264 int 265 __thr_rwlock_unlock(struct urwlock *rwlock) 266 { 267 return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL); 268 } 269 270 void 271 _thr_rwl_rdlock(struct urwlock *rwlock) 272 { 273 int ret; 274 275 for (;;) { 276 if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0) 277 return; 278 ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL); 279 if (ret == 0) 280 return; 281 if (ret != EINTR) 282 PANIC("rdlock error"); 283 } 284 } 285 286 void 287 _thr_rwl_wrlock(struct urwlock *rwlock) 288 { 289 int ret; 290 291 for (;;) { 292 if (_thr_rwlock_trywrlock(rwlock) == 0) 293 return; 294 ret = __thr_rwlock_wrlock(rwlock, NULL); 295 if (ret == 0) 296 return; 297 if (ret != EINTR) 298 PANIC("wrlock error"); 299 } 300 } 301 302 void 303 _thr_rwl_unlock(struct urwlock *rwlock) 304 { 305 if (_thr_rwlock_unlock(rwlock)) 306 PANIC("unlock error"); 307 } 308