1 /*- 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef _THR_FBSD_UMTX_H_ 30 #define _THR_FBSD_UMTX_H_ 31 32 #include <strings.h> 33 #include <sys/umtx.h> 34 35 #define DEFAULT_UMUTEX {0,0, {0,0},{0,0,0,0}} 36 37 int __thr_umutex_lock(struct umutex *mtx) __hidden; 38 int __thr_umutex_timedlock(struct umutex *mtx, 39 const struct timespec *timeout) __hidden; 40 int __thr_umutex_unlock(struct umutex *mtx) __hidden; 41 int __thr_umutex_trylock(struct umutex *mtx) __hidden; 42 int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, 43 uint32_t *oldceiling) __hidden; 44 45 void _thr_umutex_init(struct umutex *mtx) __hidden; 46 int _thr_umtx_wait(volatile long *mtx, long exp, 47 const struct timespec *timeout) __hidden; 48 int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp, 49 const struct timespec *timeout) __hidden; 50 int _thr_umtx_wake(volatile void *mtx, int count) __hidden; 51 int _thr_ucond_wait(struct ucond *cv, struct umutex *m, 52 const struct timespec *timeout, int check_unpaking) __hidden; 53 void _thr_ucond_init(struct ucond *cv) __hidden; 54 int _thr_ucond_signal(struct ucond *cv) __hidden; 55 int _thr_ucond_broadcast(struct ucond *cv) __hidden; 56 57 int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) __hidden; 58 int __thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) __hidden; 59 int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden; 60 61 static inline int 62 _thr_umutex_trylock(struct umutex *mtx, uint32_t id) 63 { 64 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) 65 return (0); 66 if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) 67 return (EBUSY); 68 return (__thr_umutex_trylock(mtx)); 69 } 70 71 static inline int 72 _thr_umutex_trylock2(struct umutex *mtx, uint32_t id) 73 { 74 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) 75 return (0); 76 return (EBUSY); 77 } 78 79 static inline int 80 _thr_umutex_lock(struct umutex *mtx, uint32_t id) 81 { 82 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) 83 return (0); 84 return (__thr_umutex_lock(mtx)); 85 } 86 87 static inline int 88 _thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 89 const struct timespec *timeout) 90 { 91 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) 92 return (0); 93 return (__thr_umutex_timedlock(mtx, timeout)); 94 } 95 96 static inline int 97 _thr_umutex_unlock(struct umutex *mtx, uint32_t id) 98 { 99 if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED)) 100 return (0); 101 return (__thr_umutex_unlock(mtx)); 102 } 103 104 static inline int 105 _thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags) 106 { 107 int32_t state; 108 int32_t wrflags; 109 110 if (flags & URWLOCK_PREFER_READER || rwlock->rw_flags & URWLOCK_PREFER_READER) 111 wrflags = URWLOCK_WRITE_OWNER; 112 else 113 wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS; 114 state = rwlock->rw_state; 115 while (!(state & wrflags)) { 116 if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) 117 return (EAGAIN); 118 if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1)) 119 return (0); 120 state = rwlock->rw_state; 121 } 122 123 return (EBUSY); 124 } 125 126 static inline int 127 _thr_rwlock_trywrlock(struct urwlock *rwlock) 128 { 129 int32_t state; 130 131 state = rwlock->rw_state; 132 while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) { 133 if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER)) 134 return (0); 135 state = rwlock->rw_state; 136 } 137 138 return (EBUSY); 139 } 140 141 static inline int 142 _thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp) 143 { 144 if (_thr_rwlock_tryrdlock(rwlock, flags) == 0) 145 return (0); 146 return (__thr_rwlock_rdlock(rwlock, flags, tsp)); 147 } 148 149 static inline int 150 _thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp) 151 { 152 if (_thr_rwlock_trywrlock(rwlock) == 0) 153 return (0); 154 return (__thr_rwlock_wrlock(rwlock, tsp)); 155 } 156 157 static inline int 158 _thr_rwlock_unlock(struct urwlock *rwlock) 159 { 160 int32_t state; 161 162 state = rwlock->rw_state; 163 if (state & URWLOCK_WRITE_OWNER) { 164 if (atomic_cmpset_rel_32(&rwlock->rw_state, URWLOCK_WRITE_OWNER, 0)) 165 return (0); 166 } else { 167 for (;;) { 168 if (__predict_false(URWLOCK_READER_COUNT(state) == 0)) 169 return (EPERM); 170 if (!((state & URWLOCK_WRITE_WAITERS) && URWLOCK_READER_COUNT(state) == 1)) { 171 if (atomic_cmpset_rel_32(&rwlock->rw_state, state, state-1)) 172 return (0); 173 state = rwlock->rw_state; 174 } else { 175 break; 176 } 177 } 178 } 179 return (__thr_rwlock_unlock(rwlock)); 180 } 181 #endif 182