1a091d823SDavid Xu /* 2a091d823SDavid Xu * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3a091d823SDavid Xu * All rights reserved. 4a091d823SDavid Xu * 5a091d823SDavid Xu * Redistribution and use in source and binary forms, with or without 6a091d823SDavid Xu * modification, are permitted provided that the following conditions 7a091d823SDavid Xu * are met: 8a091d823SDavid Xu * 1. Redistributions of source code must retain the above copyright 9a091d823SDavid Xu * notice unmodified, this list of conditions, and the following 10a091d823SDavid Xu * disclaimer. 11a091d823SDavid Xu * 2. Redistributions in binary form must reproduce the above copyright 12a091d823SDavid Xu * notice, this list of conditions and the following disclaimer in the 13a091d823SDavid Xu * documentation and/or other materials provided with the distribution. 14a091d823SDavid Xu * 15a091d823SDavid Xu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16a091d823SDavid Xu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17a091d823SDavid Xu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18a091d823SDavid Xu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19a091d823SDavid Xu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20a091d823SDavid Xu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21a091d823SDavid Xu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22a091d823SDavid Xu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23a091d823SDavid Xu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24a091d823SDavid Xu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25a091d823SDavid Xu */ 26a091d823SDavid Xu 2732793011SKonstantin Belousov #include <sys/cdefs.h> 2832793011SKonstantin Belousov __FBSDID("$FreeBSD$"); 2932793011SKonstantin Belousov 30a091d823SDavid Xu #include "thr_private.h" 31a091d823SDavid Xu #include "thr_umtx.h" 32a091d823SDavid Xu 33d6e0eb0aSDavid Xu #ifndef HAS__UMTX_OP_ERR 34d6e0eb0aSDavid Xu int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2) 35d6e0eb0aSDavid Xu { 36*2a339d9eSKonstantin Belousov 37d6e0eb0aSDavid Xu if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1) 38d6e0eb0aSDavid Xu return (errno); 39d6e0eb0aSDavid Xu return (0); 40d6e0eb0aSDavid Xu } 41d6e0eb0aSDavid Xu #endif 42d6e0eb0aSDavid Xu 438042f26dSDavid Xu void 448042f26dSDavid Xu _thr_umutex_init(struct umutex *mtx) 458042f26dSDavid Xu { 468e402f34SEric van Gyzen static const struct umutex default_mtx = DEFAULT_UMUTEX; 478042f26dSDavid Xu 488042f26dSDavid Xu *mtx = default_mtx; 498042f26dSDavid Xu } 508042f26dSDavid Xu 51ada33a6eSDavid Xu void 52ada33a6eSDavid Xu _thr_urwlock_init(struct urwlock *rwl) 53ada33a6eSDavid Xu { 548e402f34SEric van Gyzen static const struct urwlock default_rwl = DEFAULT_URWLOCK; 558e402f34SEric van Gyzen 56ada33a6eSDavid Xu *rwl = default_rwl; 57ada33a6eSDavid Xu } 58ada33a6eSDavid Xu 59a091d823SDavid Xu int 607de1ecefSDavid Xu __thr_umutex_lock(struct umutex *mtx, uint32_t id) 61cf13ecdaSDavid Xu { 627de1ecefSDavid Xu uint32_t owner; 637de1ecefSDavid Xu 64*2a339d9eSKonstantin Belousov if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) 65*2a339d9eSKonstantin Belousov return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0)); 667de1ecefSDavid Xu 67*2a339d9eSKonstantin Belousov for (;;) { 687de1ecefSDavid Xu owner = mtx->m_owner; 697de1ecefSDavid Xu if ((owner & ~UMUTEX_CONTESTED) == 0 && 707de1ecefSDavid Xu atomic_cmpset_acq_32(&mtx->m_owner, owner, id | owner)) 717de1ecefSDavid Xu return (0); 72*2a339d9eSKonstantin Belousov if (owner == UMUTEX_RB_OWNERDEAD && 73*2a339d9eSKonstantin Belousov atomic_cmpset_acq_32(&mtx->m_owner, owner, 74*2a339d9eSKonstantin Belousov id | UMUTEX_CONTESTED)) 75*2a339d9eSKonstantin Belousov return (EOWNERDEAD); 76*2a339d9eSKonstantin Belousov if (owner == UMUTEX_RB_NOTRECOV) 77*2a339d9eSKonstantin Belousov return (ENOTRECOVERABLE); 787de1ecefSDavid Xu 79*2a339d9eSKonstantin Belousov /* wait in kernel */ 80*2a339d9eSKonstantin Belousov _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 81*2a339d9eSKonstantin Belousov } 82cf13ecdaSDavid Xu } 83cf13ecdaSDavid Xu 84d1078b0bSDavid Xu #define SPINLOOPS 1000 85d1078b0bSDavid Xu 86d1078b0bSDavid Xu int 87d1078b0bSDavid Xu __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) 88d1078b0bSDavid Xu { 89d1078b0bSDavid Xu uint32_t owner; 90*2a339d9eSKonstantin Belousov int count; 91d1078b0bSDavid Xu 92d1078b0bSDavid Xu if (!_thr_is_smp) 93*2a339d9eSKonstantin Belousov return (__thr_umutex_lock(mtx, id)); 94*2a339d9eSKonstantin Belousov if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) 95*2a339d9eSKonstantin Belousov return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0)); 96d1078b0bSDavid Xu 97d1078b0bSDavid Xu for (;;) { 98*2a339d9eSKonstantin Belousov count = SPINLOOPS; 99d1078b0bSDavid Xu while (count--) { 100d1078b0bSDavid Xu owner = mtx->m_owner; 101*2a339d9eSKonstantin Belousov if ((owner & ~UMUTEX_CONTESTED) == 0 && 102*2a339d9eSKonstantin Belousov atomic_cmpset_acq_32(&mtx->m_owner, owner, 103*2a339d9eSKonstantin Belousov id | owner)) 104d1078b0bSDavid Xu return (0); 105*2a339d9eSKonstantin Belousov if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) && 106*2a339d9eSKonstantin Belousov atomic_cmpset_acq_32(&mtx->m_owner, owner, 107*2a339d9eSKonstantin Belousov id | UMUTEX_CONTESTED)) 108*2a339d9eSKonstantin Belousov return (EOWNERDEAD); 109*2a339d9eSKonstantin Belousov if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) 110*2a339d9eSKonstantin Belousov return (ENOTRECOVERABLE); 111d1078b0bSDavid Xu CPU_SPINWAIT; 112d1078b0bSDavid Xu } 113d1078b0bSDavid Xu 114d1078b0bSDavid Xu /* wait in kernel */ 115d1078b0bSDavid Xu _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 116d1078b0bSDavid Xu } 117d1078b0bSDavid Xu } 118d1078b0bSDavid Xu 119cf13ecdaSDavid Xu int 1207de1ecefSDavid Xu __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 121df1f1baeSDavid Xu const struct timespec *abstime) 122cf13ecdaSDavid Xu { 123df1f1baeSDavid Xu struct _umtx_time *tm_p, timeout; 124df1f1baeSDavid Xu size_t tm_size; 1257de1ecefSDavid Xu uint32_t owner; 1267de1ecefSDavid Xu int ret; 1277de1ecefSDavid Xu 128df1f1baeSDavid Xu if (abstime == NULL) { 129df1f1baeSDavid Xu tm_p = NULL; 130df1f1baeSDavid Xu tm_size = 0; 131df1f1baeSDavid Xu } else { 132df1f1baeSDavid Xu timeout._clockid = CLOCK_REALTIME; 133df1f1baeSDavid Xu timeout._flags = UMTX_ABSTIME; 134df1f1baeSDavid Xu timeout._timeout = *abstime; 135df1f1baeSDavid Xu tm_p = &timeout; 136df1f1baeSDavid Xu tm_size = sizeof(timeout); 137df1f1baeSDavid Xu } 1387de1ecefSDavid Xu 1397de1ecefSDavid Xu for (;;) { 140*2a339d9eSKonstantin Belousov if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | 141*2a339d9eSKonstantin Belousov UMUTEX_PRIO_INHERIT)) == 0) { 142*2a339d9eSKonstantin Belousov /* try to lock it */ 143*2a339d9eSKonstantin Belousov owner = mtx->m_owner; 144*2a339d9eSKonstantin Belousov if ((owner & ~UMUTEX_CONTESTED) == 0 && 145*2a339d9eSKonstantin Belousov atomic_cmpset_acq_32(&mtx->m_owner, owner, 146*2a339d9eSKonstantin Belousov id | owner)) 147*2a339d9eSKonstantin Belousov return (0); 148*2a339d9eSKonstantin Belousov if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) && 149*2a339d9eSKonstantin Belousov atomic_cmpset_acq_32(&mtx->m_owner, owner, 150*2a339d9eSKonstantin Belousov id | UMUTEX_CONTESTED)) 151*2a339d9eSKonstantin Belousov return (EOWNERDEAD); 152*2a339d9eSKonstantin Belousov if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) 153*2a339d9eSKonstantin Belousov return (ENOTRECOVERABLE); 1547de1ecefSDavid Xu /* wait in kernel */ 155df1f1baeSDavid Xu ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 156df1f1baeSDavid Xu (void *)tm_size, __DECONST(void *, tm_p)); 1577de1ecefSDavid Xu } else { 158df1f1baeSDavid Xu ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 159df1f1baeSDavid Xu (void *)tm_size, __DECONST(void *, tm_p)); 160*2a339d9eSKonstantin Belousov if (ret == 0 || ret == EOWNERDEAD || 161*2a339d9eSKonstantin Belousov ret == ENOTRECOVERABLE) 1627de1ecefSDavid Xu break; 1637de1ecefSDavid Xu } 1647de1ecefSDavid Xu if (ret == ETIMEDOUT) 1657de1ecefSDavid Xu break; 1667de1ecefSDavid Xu } 1677de1ecefSDavid Xu return (ret); 168cf13ecdaSDavid Xu } 169cf13ecdaSDavid Xu 170cf13ecdaSDavid Xu int 1717de1ecefSDavid Xu __thr_umutex_unlock(struct umutex *mtx, uint32_t id) 172cf13ecdaSDavid Xu { 173*2a339d9eSKonstantin Belousov 174*2a339d9eSKonstantin Belousov return (_umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0)); 175cf13ecdaSDavid Xu } 176cf13ecdaSDavid Xu 177cf13ecdaSDavid Xu int 1788042f26dSDavid Xu __thr_umutex_trylock(struct umutex *mtx) 179cf13ecdaSDavid Xu { 180*2a339d9eSKonstantin Belousov 181*2a339d9eSKonstantin Belousov return (_umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0)); 182cf13ecdaSDavid Xu } 183cf13ecdaSDavid Xu 184cf13ecdaSDavid Xu int 185cf13ecdaSDavid Xu __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, 186cf13ecdaSDavid Xu uint32_t *oldceiling) 187cf13ecdaSDavid Xu { 188*2a339d9eSKonstantin Belousov 189*2a339d9eSKonstantin Belousov return (_umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0)); 190cf13ecdaSDavid Xu } 191cf13ecdaSDavid Xu 192cf13ecdaSDavid Xu int 1936fdfcacbSDavid Xu _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout) 194a091d823SDavid Xu { 195*2a339d9eSKonstantin Belousov 196a091d823SDavid Xu if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 197a091d823SDavid Xu timeout->tv_nsec <= 0))) 198a091d823SDavid Xu return (ETIMEDOUT); 199*2a339d9eSKonstantin Belousov return (_umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, 200*2a339d9eSKonstantin Belousov __DECONST(void*, timeout))); 201a091d823SDavid Xu } 202a091d823SDavid Xu 203a091d823SDavid Xu int 204*2a339d9eSKonstantin Belousov _thr_umtx_wait_uint(volatile u_int *mtx, u_int id, 205*2a339d9eSKonstantin Belousov const struct timespec *timeout, int shared) 2066fdfcacbSDavid Xu { 207*2a339d9eSKonstantin Belousov 2086fdfcacbSDavid Xu if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 2096fdfcacbSDavid Xu timeout->tv_nsec <= 0))) 2106fdfcacbSDavid Xu return (ETIMEDOUT); 211*2a339d9eSKonstantin Belousov return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 212*2a339d9eSKonstantin Belousov UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, 213*2a339d9eSKonstantin Belousov __DECONST(void*, timeout))); 2146fdfcacbSDavid Xu } 2156fdfcacbSDavid Xu 2166fdfcacbSDavid Xu int 217d1078b0bSDavid Xu _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid, 218d1078b0bSDavid Xu const struct timespec *abstime, int shared) 219d1078b0bSDavid Xu { 220df1f1baeSDavid Xu struct _umtx_time *tm_p, timeout; 221df1f1baeSDavid Xu size_t tm_size; 222df1f1baeSDavid Xu 223df1f1baeSDavid Xu if (abstime == NULL) { 224df1f1baeSDavid Xu tm_p = NULL; 225df1f1baeSDavid Xu tm_size = 0; 226df1f1baeSDavid Xu } else { 22781cd726aSDavid Xu timeout._clockid = clockid; 228df1f1baeSDavid Xu timeout._flags = UMTX_ABSTIME; 229df1f1baeSDavid Xu timeout._timeout = *abstime; 230df1f1baeSDavid Xu tm_p = &timeout; 231df1f1baeSDavid Xu tm_size = sizeof(timeout); 232df1f1baeSDavid Xu } 233df1f1baeSDavid Xu 234*2a339d9eSKonstantin Belousov return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 235*2a339d9eSKonstantin Belousov UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 236*2a339d9eSKonstantin Belousov (void *)tm_size, __DECONST(void *, tm_p))); 237d1078b0bSDavid Xu } 238d1078b0bSDavid Xu 239d1078b0bSDavid Xu int 2408d6a11a0SDavid Xu _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) 241a091d823SDavid Xu { 242*2a339d9eSKonstantin Belousov 243*2a339d9eSKonstantin Belousov return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ? 244*2a339d9eSKonstantin Belousov UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, nr_wakeup, 0, 0)); 245a091d823SDavid Xu } 2462bd2c907SDavid Xu 247670b44d6SDavid Xu void 248670b44d6SDavid Xu _thr_ucond_init(struct ucond *cv) 249670b44d6SDavid Xu { 250*2a339d9eSKonstantin Belousov 251670b44d6SDavid Xu bzero(cv, sizeof(struct ucond)); 252670b44d6SDavid Xu } 253670b44d6SDavid Xu 2542bd2c907SDavid Xu int 2552bd2c907SDavid Xu _thr_ucond_wait(struct ucond *cv, struct umutex *m, 2567859df8eSDavid Xu const struct timespec *timeout, int flags) 2572bd2c907SDavid Xu { 258*2a339d9eSKonstantin Belousov struct pthread *curthread; 259*2a339d9eSKonstantin Belousov 2602bd2c907SDavid Xu if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 2612bd2c907SDavid Xu timeout->tv_nsec <= 0))) { 262*2a339d9eSKonstantin Belousov curthread = _get_curthread(); 2637de1ecefSDavid Xu _thr_umutex_unlock(m, TID(curthread)); 2642bd2c907SDavid Xu return (ETIMEDOUT); 2652bd2c907SDavid Xu } 266*2a339d9eSKonstantin Belousov return (_umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m, 267*2a339d9eSKonstantin Belousov __DECONST(void*, timeout))); 2682bd2c907SDavid Xu } 2692bd2c907SDavid Xu 2702bd2c907SDavid Xu int 2712bd2c907SDavid Xu _thr_ucond_signal(struct ucond *cv) 2722bd2c907SDavid Xu { 273*2a339d9eSKonstantin Belousov 274347126a2SDavid Xu if (!cv->c_has_waiters) 275347126a2SDavid Xu return (0); 276*2a339d9eSKonstantin Belousov return (_umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL)); 2772bd2c907SDavid Xu } 2782bd2c907SDavid Xu 2792bd2c907SDavid Xu int 2802bd2c907SDavid Xu _thr_ucond_broadcast(struct ucond *cv) 2812bd2c907SDavid Xu { 282*2a339d9eSKonstantin Belousov 283347126a2SDavid Xu if (!cv->c_has_waiters) 284347126a2SDavid Xu return (0); 285*2a339d9eSKonstantin Belousov return (_umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL)); 2862bd2c907SDavid Xu } 2878bf1a48cSDavid Xu 2888bf1a48cSDavid Xu int 28924c20949SDavid Xu __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, 29024c20949SDavid Xu const struct timespec *tsp) 2918bf1a48cSDavid Xu { 29224c20949SDavid Xu struct _umtx_time timeout, *tm_p; 29324c20949SDavid Xu size_t tm_size; 29424c20949SDavid Xu 29524c20949SDavid Xu if (tsp == NULL) { 29624c20949SDavid Xu tm_p = NULL; 29724c20949SDavid Xu tm_size = 0; 29824c20949SDavid Xu } else { 29924c20949SDavid Xu timeout._timeout = *tsp; 30024c20949SDavid Xu timeout._flags = UMTX_ABSTIME; 30124c20949SDavid Xu timeout._clockid = CLOCK_REALTIME; 30224c20949SDavid Xu tm_p = &timeout; 30324c20949SDavid Xu tm_size = sizeof(timeout); 30424c20949SDavid Xu } 305*2a339d9eSKonstantin Belousov return (_umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, 306*2a339d9eSKonstantin Belousov (void *)tm_size, tm_p)); 3078bf1a48cSDavid Xu } 3088bf1a48cSDavid Xu 3098bf1a48cSDavid Xu int 31024c20949SDavid Xu __thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp) 3118bf1a48cSDavid Xu { 31224c20949SDavid Xu struct _umtx_time timeout, *tm_p; 31324c20949SDavid Xu size_t tm_size; 31424c20949SDavid Xu 31524c20949SDavid Xu if (tsp == NULL) { 31624c20949SDavid Xu tm_p = NULL; 31724c20949SDavid Xu tm_size = 0; 31824c20949SDavid Xu } else { 31924c20949SDavid Xu timeout._timeout = *tsp; 32024c20949SDavid Xu timeout._flags = UMTX_ABSTIME; 32124c20949SDavid Xu timeout._clockid = CLOCK_REALTIME; 32224c20949SDavid Xu tm_p = &timeout; 32324c20949SDavid Xu tm_size = sizeof(timeout); 32424c20949SDavid Xu } 325*2a339d9eSKonstantin Belousov return (_umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, 326*2a339d9eSKonstantin Belousov tm_p)); 3278bf1a48cSDavid Xu } 3288bf1a48cSDavid Xu 3298bf1a48cSDavid Xu int 3308bf1a48cSDavid Xu __thr_rwlock_unlock(struct urwlock *rwlock) 3318bf1a48cSDavid Xu { 332*2a339d9eSKonstantin Belousov 333*2a339d9eSKonstantin Belousov return (_umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL)); 3348bf1a48cSDavid Xu } 33502c3c858SDavid Xu 33602c3c858SDavid Xu void 33702c3c858SDavid Xu _thr_rwl_rdlock(struct urwlock *rwlock) 33802c3c858SDavid Xu { 33902c3c858SDavid Xu int ret; 34002c3c858SDavid Xu 34102c3c858SDavid Xu for (;;) { 34202c3c858SDavid Xu if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0) 34302c3c858SDavid Xu return; 34402c3c858SDavid Xu ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL); 34502c3c858SDavid Xu if (ret == 0) 34602c3c858SDavid Xu return; 34702c3c858SDavid Xu if (ret != EINTR) 34802c3c858SDavid Xu PANIC("rdlock error"); 34902c3c858SDavid Xu } 35002c3c858SDavid Xu } 35102c3c858SDavid Xu 35202c3c858SDavid Xu void 35302c3c858SDavid Xu _thr_rwl_wrlock(struct urwlock *rwlock) 35402c3c858SDavid Xu { 35502c3c858SDavid Xu int ret; 35602c3c858SDavid Xu 35702c3c858SDavid Xu for (;;) { 35802c3c858SDavid Xu if (_thr_rwlock_trywrlock(rwlock) == 0) 35902c3c858SDavid Xu return; 36002c3c858SDavid Xu ret = __thr_rwlock_wrlock(rwlock, NULL); 36102c3c858SDavid Xu if (ret == 0) 36202c3c858SDavid Xu return; 36302c3c858SDavid Xu if (ret != EINTR) 36402c3c858SDavid Xu PANIC("wrlock error"); 36502c3c858SDavid Xu } 36602c3c858SDavid Xu } 36702c3c858SDavid Xu 36802c3c858SDavid Xu void 36902c3c858SDavid Xu _thr_rwl_unlock(struct urwlock *rwlock) 37002c3c858SDavid Xu { 371*2a339d9eSKonstantin Belousov 37202c3c858SDavid Xu if (_thr_rwlock_unlock(rwlock)) 37302c3c858SDavid Xu PANIC("unlock error"); 37402c3c858SDavid Xu } 375