1 /*- 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef _THR_FBSD_UMTX_H_ 30 #define _THR_FBSD_UMTX_H_ 31 32 #include <strings.h> 33 #include <sys/umtx.h> 34 35 #define DEFAULT_UMUTEX {0,0,{0,0},{0,0,0,0}} 36 #define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}} 37 38 int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden; 39 int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden; 40 int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) __hidden; 41 int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 42 const struct timespec *timeout) __hidden; 43 int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden; 44 int __thr_umutex_trylock(struct umutex *mtx) __hidden; 45 int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, 46 uint32_t *oldceiling) __hidden; 47 48 void _thr_umutex_init(struct umutex *mtx) __hidden; 49 void _thr_urwlock_init(struct urwlock *rwl) __hidden; 50 51 int _thr_umtx_wait(volatile long *mtx, long exp, 52 const struct timespec *timeout) __hidden; 53 int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp, 54 const struct timespec *timeout, int shared) __hidden; 55 int _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int exp, int clockid, 56 const struct timespec *timeout, int shared) __hidden; 57 int _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden; 58 int _thr_ucond_wait(struct ucond *cv, struct umutex *m, 59 const struct timespec *timeout, int check_unpaking) __hidden; 60 void _thr_ucond_init(struct ucond *cv) __hidden; 61 int _thr_ucond_signal(struct ucond *cv) __hidden; 62 int _thr_ucond_broadcast(struct ucond *cv) __hidden; 63 64 int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, 65 const struct timespec *tsp) __hidden; 66 int __thr_rwlock_wrlock(struct urwlock *rwlock, 67 const struct timespec *tsp) __hidden; 68 int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden; 69 70 /* Internal used only */ 71 void _thr_rwl_rdlock(struct urwlock *rwlock) __hidden; 72 void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden; 73 void _thr_rwl_unlock(struct urwlock *rwlock) __hidden; 74 75 static inline int 76 _thr_umutex_trylock(struct umutex *mtx, uint32_t id) 77 { 78 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) 79 return (0); 80 if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) 81 return (EBUSY); 82 return (__thr_umutex_trylock(mtx)); 83 } 84 85 static inline int 86 _thr_umutex_trylock2(struct umutex *mtx, uint32_t id) 87 { 88 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0) 89 return (0); 90 if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED && 91 __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0)) 92 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED)) 93 return (0); 94 return (EBUSY); 95 } 96 97 static inline int 98 _thr_umutex_lock(struct umutex *mtx, uint32_t id) 99 { 100 if (_thr_umutex_trylock2(mtx, id) == 0) 101 return (0); 102 return (__thr_umutex_lock(mtx, id)); 103 } 104 105 static inline int 106 _thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) 107 { 108 if (_thr_umutex_trylock2(mtx, id) == 0) 109 return (0); 110 return (__thr_umutex_lock_spin(mtx, id)); 111 } 112 113 static inline int 114 _thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 115 const struct timespec *timeout) 116 { 117 if (_thr_umutex_trylock2(mtx, id) == 0) 118 return (0); 119 return (__thr_umutex_timedlock(mtx, id, timeout)); 120 } 121 122 static inline int 123 _thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer) 124 { 125 uint32_t flags = mtx->m_flags; 126 127 if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 128 uint32_t owner; 129 do { 130 owner = mtx->m_owner; 131 if (__predict_false((owner & ~UMUTEX_CONTESTED) != id)) 132 return (EPERM); 133 } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, 134 owner, UMUTEX_UNOWNED))); 135 if ((owner & UMUTEX_CONTESTED)) { 136 if (defer == NULL) 137 (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0); 138 else 139 *defer = 1; 140 } 141 return (0); 142 } 143 if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED)) 144 return (0); 145 return (__thr_umutex_unlock(mtx, id)); 146 } 147 148 static inline int 149 _thr_umutex_unlock(struct umutex *mtx, uint32_t id) 150 { 151 return _thr_umutex_unlock2(mtx, id, NULL); 152 } 153 154 static inline int 155 _thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags) 156 { 157 int32_t state; 158 int32_t wrflags; 159 160 if (flags & URWLOCK_PREFER_READER || rwlock->rw_flags & URWLOCK_PREFER_READER) 161 wrflags = URWLOCK_WRITE_OWNER; 162 else 163 wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS; 164 state = rwlock->rw_state; 165 while (!(state & wrflags)) { 166 if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) 167 return (EAGAIN); 168 if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1)) 169 return (0); 170 state = rwlock->rw_state; 171 } 172 173 return (EBUSY); 174 } 175 176 static inline int 177 _thr_rwlock_trywrlock(struct urwlock *rwlock) 178 { 179 int32_t state; 180 181 state = rwlock->rw_state; 182 while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) { 183 if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER)) 184 return (0); 185 state = rwlock->rw_state; 186 } 187 188 return (EBUSY); 189 } 190 191 static inline int 192 _thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) 193 { 194 if (_thr_rwlock_tryrdlock(rwlock, flags) == 0) 195 return (0); 196 return (__thr_rwlock_rdlock(rwlock, flags, tsp)); 197 } 198 199 static inline int 200 _thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) 201 { 202 if (_thr_rwlock_trywrlock(rwlock) == 0) 203 return (0); 204 return (__thr_rwlock_wrlock(rwlock, tsp)); 205 } 206 207 static inline int 208 _thr_rwlock_unlock(struct urwlock *rwlock) 209 { 210 int32_t state; 211 212 state = rwlock->rw_state; 213 if (state & URWLOCK_WRITE_OWNER) { 214 if (atomic_cmpset_rel_32(&rwlock->rw_state, URWLOCK_WRITE_OWNER, 0)) 215 return (0); 216 } else { 217 for (;;) { 218 if (__predict_false(URWLOCK_READER_COUNT(state) == 0)) 219 return (EPERM); 220 if (!((state & (URWLOCK_WRITE_WAITERS | 221 URWLOCK_READ_WAITERS)) && 222 URWLOCK_READER_COUNT(state) == 1)) { 223 if (atomic_cmpset_rel_32(&rwlock->rw_state, 224 state, state-1)) 225 return (0); 226 state = rwlock->rw_state; 227 } else { 228 break; 229 } 230 } 231 } 232 return (__thr_rwlock_unlock(rwlock)); 233 } 234 #endif 235