1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #ifndef _THR_FBSD_UMTX_H_ 30 #define _THR_FBSD_UMTX_H_ 31 32 #include <strings.h> 33 #include <sys/umtx.h> 34 35 #ifdef __LP64__ 36 #define DEFAULT_UMUTEX {0,0,{0,0},0,{0,0}} 37 #else 38 #define DEFAULT_UMUTEX {0,0,{0,0},0,0,{0,0}} 39 #endif 40 #define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}} 41 42 int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden; 43 int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden; 44 int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) __hidden; 45 int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 46 const struct timespec *timeout) __hidden; 47 int __thr_umutex_unlock(struct umutex *mtx) __hidden; 48 int __thr_umutex_trylock(struct umutex *mtx) __hidden; 49 int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, 50 uint32_t *oldceiling) __hidden; 51 52 void _thr_umutex_init(struct umutex *mtx) __hidden; 53 void _thr_urwlock_init(struct urwlock *rwl) __hidden; 54 55 int _thr_umtx_wait(volatile long *mtx, long exp, 56 const struct timespec *timeout) __hidden; 57 int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp, 58 const struct timespec *timeout, int shared) __hidden; 59 int _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int exp, int clockid, 60 const struct timespec *timeout, int shared) __hidden; 61 int _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden; 62 int _thr_ucond_wait(struct ucond *cv, struct umutex *m, 63 const struct timespec *timeout, int flags) __hidden; 64 void _thr_ucond_init(struct ucond *cv) __hidden; 65 int _thr_ucond_signal(struct ucond *cv) __hidden; 66 int _thr_ucond_broadcast(struct ucond *cv) __hidden; 67 68 int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, 69 const struct timespec *tsp) __hidden; 70 int __thr_rwlock_wrlock(struct urwlock *rwlock, 71 const struct timespec *tsp) __hidden; 72 int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden; 73 74 /* Internal used only */ 75 void _thr_rwl_rdlock(struct urwlock *rwlock) __hidden; 76 void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden; 77 void _thr_rwl_unlock(struct urwlock *rwlock) __hidden; 78 79 static inline int 80 _thr_umutex_trylock(struct umutex *mtx, uint32_t id) 81 { 82 83 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) 84 return (0); 85 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) && 86 atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD, 87 id | UMUTEX_CONTESTED)) 88 return (EOWNERDEAD); 89 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV)) 90 return (ENOTRECOVERABLE); 91 if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) 92 return (EBUSY); 93 return (__thr_umutex_trylock(mtx)); 94 } 95 96 static inline int 97 _thr_umutex_trylock2(struct umutex *mtx, uint32_t id) 98 { 99 100 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0) 101 return (0); 102 if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED && 103 __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | 104 UMUTEX_PRIO_INHERIT)) == 0) && 105 atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, 106 id | UMUTEX_CONTESTED)) 107 return (0); 108 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) && 109 atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD, 110 id | UMUTEX_CONTESTED)) 111 return (EOWNERDEAD); 112 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV)) 113 return (ENOTRECOVERABLE); 114 return (EBUSY); 115 } 116 117 static inline int 118 _thr_umutex_lock(struct umutex *mtx, uint32_t id) 119 { 120 121 if (_thr_umutex_trylock2(mtx, id) == 0) 122 return (0); 123 return (__thr_umutex_lock(mtx, id)); 124 } 125 126 static inline int 127 _thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) 128 { 129 130 if (_thr_umutex_trylock2(mtx, id) == 0) 131 return (0); 132 return (__thr_umutex_lock_spin(mtx, id)); 133 } 134 135 static inline int 136 _thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 137 const struct timespec *timeout) 138 { 139 140 if (_thr_umutex_trylock2(mtx, id) == 0) 141 return (0); 142 return (__thr_umutex_timedlock(mtx, id, timeout)); 143 } 144 145 static inline int 146 _thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer) 147 { 148 uint32_t flags, owner; 149 bool noncst; 150 151 flags = mtx->m_flags; 152 noncst = (flags & UMUTEX_NONCONSISTENT) != 0; 153 154 if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) { 155 if (atomic_cmpset_rel_32(&mtx->m_owner, id, noncst ? 156 UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED)) 157 return (0); 158 return (__thr_umutex_unlock(mtx)); 159 } 160 161 do { 162 owner = mtx->m_owner; 163 if (__predict_false((owner & ~UMUTEX_CONTESTED) != id)) 164 return (EPERM); 165 } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, owner, 166 noncst ? UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED))); 167 if ((owner & UMUTEX_CONTESTED) != 0) { 168 if (defer == NULL || noncst) 169 (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, 170 flags, 0, 0); 171 else 172 *defer = 1; 173 } 174 return (0); 175 } 176 177 static inline int 178 _thr_umutex_unlock(struct umutex *mtx, uint32_t id) 179 { 180 181 return (_thr_umutex_unlock2(mtx, id, NULL)); 182 } 183 184 static inline int 185 _thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags) 186 { 187 int32_t state, wrflags; 188 189 if ((flags & URWLOCK_PREFER_READER) != 0 || 190 (rwlock->rw_flags & URWLOCK_PREFER_READER) != 0) 191 wrflags = URWLOCK_WRITE_OWNER; 192 else 193 wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS; 194 state = rwlock->rw_state; 195 while (!(state & wrflags)) { 196 if (__predict_false(URWLOCK_READER_COUNT(state) == 197 URWLOCK_MAX_READERS)) 198 return (EAGAIN); 199 if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1)) 200 return (0); 201 state = rwlock->rw_state; 202 } 203 204 return (EBUSY); 205 } 206 207 static inline int 208 _thr_rwlock_trywrlock(struct urwlock *rwlock) 209 { 210 int32_t state; 211 212 state = rwlock->rw_state; 213 while ((state & URWLOCK_WRITE_OWNER) == 0 && 214 URWLOCK_READER_COUNT(state) == 0) { 215 if (atomic_cmpset_acq_32(&rwlock->rw_state, state, 216 state | URWLOCK_WRITE_OWNER)) 217 return (0); 218 state = rwlock->rw_state; 219 } 220 221 return (EBUSY); 222 } 223 224 static inline int 225 _thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) 226 { 227 228 if (_thr_rwlock_tryrdlock(rwlock, flags) == 0) 229 return (0); 230 return (__thr_rwlock_rdlock(rwlock, flags, tsp)); 231 } 232 233 static inline int 234 _thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) 235 { 236 237 if (_thr_rwlock_trywrlock(rwlock) == 0) 238 return (0); 239 return (__thr_rwlock_wrlock(rwlock, tsp)); 240 } 241 242 static inline int 243 _thr_rwlock_unlock(struct urwlock *rwlock) 244 { 245 int32_t state; 246 247 state = rwlock->rw_state; 248 if ((state & URWLOCK_WRITE_OWNER) != 0) { 249 if (atomic_cmpset_rel_32(&rwlock->rw_state, 250 URWLOCK_WRITE_OWNER, 0)) 251 return (0); 252 } else { 253 for (;;) { 254 if (__predict_false(URWLOCK_READER_COUNT(state) == 0)) 255 return (EPERM); 256 if (!((state & (URWLOCK_WRITE_WAITERS | 257 URWLOCK_READ_WAITERS)) != 0 && 258 URWLOCK_READER_COUNT(state) == 1)) { 259 if (atomic_cmpset_rel_32(&rwlock->rw_state, 260 state, state - 1)) 261 return (0); 262 state = rwlock->rw_state; 263 } else { 264 break; 265 } 266 } 267 } 268 return (__thr_rwlock_unlock(rwlock)); 269 } 270 #endif 271