1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 1998 Alex Nash 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <errno.h> 33 #include <limits.h> 34 #include <stdlib.h> 35 #include <string.h> 36 37 #include "namespace.h" 38 #include <pthread.h> 39 #include "un-namespace.h" 40 #include "thr_private.h" 41 42 _Static_assert(sizeof(struct pthread_rwlock) <= THR_PAGE_SIZE_MIN, 43 "pthread_rwlock is too large for off-page"); 44 45 __weak_reference(_thr_rwlock_destroy, pthread_rwlock_destroy); 46 __weak_reference(_thr_rwlock_destroy, _pthread_rwlock_destroy); 47 __weak_reference(_thr_rwlock_init, pthread_rwlock_init); 48 __weak_reference(_thr_rwlock_init, _pthread_rwlock_init); 49 __weak_reference(_Tthr_rwlock_rdlock, pthread_rwlock_rdlock); 50 __weak_reference(_Tthr_rwlock_rdlock, _pthread_rwlock_rdlock); 51 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock); 52 __weak_reference(_Tthr_rwlock_tryrdlock, pthread_rwlock_tryrdlock); 53 __weak_reference(_Tthr_rwlock_tryrdlock, _pthread_rwlock_tryrdlock); 54 __weak_reference(_Tthr_rwlock_trywrlock, pthread_rwlock_trywrlock); 55 __weak_reference(_Tthr_rwlock_trywrlock, _pthread_rwlock_trywrlock); 56 __weak_reference(_Tthr_rwlock_unlock, pthread_rwlock_unlock); 57 __weak_reference(_Tthr_rwlock_unlock, _pthread_rwlock_unlock); 58 __weak_reference(_Tthr_rwlock_wrlock, pthread_rwlock_wrlock); 59 __weak_reference(_Tthr_rwlock_wrlock, _pthread_rwlock_wrlock); 60 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock); 61 62 static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock); 63 static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out); 64 65 static int __always_inline 66 check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out) 67 { 68 if (__predict_false(*rwlock == THR_PSHARED_PTR || 69 *rwlock <= THR_RWLOCK_DESTROYED)) 70 return (init_rwlock(rwlock, rwlock_out)); 71 *rwlock_out = *rwlock; 72 return (0); 73 } 74 75 static int __noinline 76 init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out) 77 { 78 pthread_rwlock_t prwlock; 79 int ret; 80 81 if (*rwlock == THR_PSHARED_PTR) { 82 prwlock = __thr_pshared_offpage(rwlock, 0); 83 if (prwlock == NULL) 84 return (EINVAL); 85 } else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) { 86 if (prwlock == THR_RWLOCK_INITIALIZER) { 87 ret = init_static(_get_curthread(), rwlock); 88 if (ret != 0) 89 return (ret); 90 } else if (prwlock == THR_RWLOCK_DESTROYED) { 91 return (EINVAL); 92 } 93 prwlock = *rwlock; 94 } 95 *rwlock_out = prwlock; 96 return (0); 97 } 98 99 static int 100 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) 101 { 102 pthread_rwlock_t prwlock; 103 104 if (attr == NULL || *attr == NULL || 105 (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) { 106 prwlock = aligned_alloc(CACHE_LINE_SIZE, 107 roundup(sizeof(struct pthread_rwlock), CACHE_LINE_SIZE)); 108 if (prwlock == NULL) 109 return (ENOMEM); 110 memset(prwlock, 0, sizeof(struct pthread_rwlock)); 111 *rwlock = prwlock; 112 } else { 113 prwlock = __thr_pshared_offpage(rwlock, 1); 114 if (prwlock == NULL) 115 return (EFAULT); 116 prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED; 117 *rwlock = THR_PSHARED_PTR; 118 } 119 return (0); 120 } 121 122 int 123 _thr_rwlock_destroy(pthread_rwlock_t *rwlock) 124 { 125 pthread_rwlock_t prwlock; 126 int ret; 127 128 prwlock = *rwlock; 129 if (prwlock == THR_RWLOCK_INITIALIZER) 130 ret = 0; 131 else if (prwlock == THR_RWLOCK_DESTROYED) 132 ret = EINVAL; 133 else if (prwlock == THR_PSHARED_PTR) { 134 *rwlock = THR_RWLOCK_DESTROYED; 135 __thr_pshared_destroy(rwlock); 136 ret = 0; 137 } else { 138 *rwlock = THR_RWLOCK_DESTROYED; 139 free(prwlock); 140 ret = 0; 141 } 142 return (ret); 143 } 144 145 static int 146 init_static(struct pthread *thread, pthread_rwlock_t *rwlock) 147 { 148 int ret; 149 150 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); 151 152 if (*rwlock == THR_RWLOCK_INITIALIZER) 153 ret = rwlock_init(rwlock, NULL); 154 else 155 ret = 0; 156 157 THR_LOCK_RELEASE(thread, &_rwlock_static_lock); 158 159 return (ret); 160 } 161 162 int 163 _thr_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) 164 { 165 166 *rwlock = NULL; 167 return (rwlock_init(rwlock, attr)); 168 } 169 170 static int 171 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime) 172 { 173 struct pthread *curthread = _get_curthread(); 174 pthread_rwlock_t prwlock; 175 int flags; 176 int ret; 177 178 ret = check_and_init_rwlock(rwlock, &prwlock); 179 if (ret != 0) 180 return (ret); 181 182 if (curthread->rdlock_count) { 183 /* 184 * To avoid having to track all the rdlocks held by 185 * a thread or all of the threads that hold a rdlock, 186 * we keep a simple count of all the rdlocks held by 187 * a thread. If a thread holds any rdlocks it is 188 * possible that it is attempting to take a recursive 189 * rdlock. If there are blocked writers and precedence 190 * is given to them, then that would result in the thread 191 * deadlocking. So allowing a thread to take the rdlock 192 * when it already has one or more rdlocks avoids the 193 * deadlock. I hope the reader can follow that logic ;-) 194 */ 195 flags = URWLOCK_PREFER_READER; 196 } else { 197 flags = 0; 198 } 199 200 /* 201 * POSIX said the validity of the abstimeout parameter need 202 * not be checked if the lock can be immediately acquired. 203 */ 204 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); 205 if (ret == 0) { 206 curthread->rdlock_count++; 207 return (ret); 208 } 209 210 if (__predict_false(abstime && 211 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0))) 212 return (EINVAL); 213 214 for (;;) { 215 /* goto kernel and lock it */ 216 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime); 217 if (ret != EINTR) 218 break; 219 220 /* if interrupted, try to lock it in userland again. */ 221 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) { 222 ret = 0; 223 break; 224 } 225 } 226 if (ret == 0) 227 curthread->rdlock_count++; 228 return (ret); 229 } 230 231 int 232 _Tthr_rwlock_rdlock(pthread_rwlock_t *rwlock) 233 { 234 return (rwlock_rdlock_common(rwlock, NULL)); 235 } 236 237 int 238 _pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock, 239 const struct timespec * __restrict abstime) 240 { 241 return (rwlock_rdlock_common(rwlock, abstime)); 242 } 243 244 int 245 _Tthr_rwlock_tryrdlock(pthread_rwlock_t *rwlock) 246 { 247 struct pthread *curthread = _get_curthread(); 248 pthread_rwlock_t prwlock; 249 int flags; 250 int ret; 251 252 ret = check_and_init_rwlock(rwlock, &prwlock); 253 if (ret != 0) 254 return (ret); 255 256 if (curthread->rdlock_count) { 257 /* 258 * To avoid having to track all the rdlocks held by 259 * a thread or all of the threads that hold a rdlock, 260 * we keep a simple count of all the rdlocks held by 261 * a thread. If a thread holds any rdlocks it is 262 * possible that it is attempting to take a recursive 263 * rdlock. If there are blocked writers and precedence 264 * is given to them, then that would result in the thread 265 * deadlocking. So allowing a thread to take the rdlock 266 * when it already has one or more rdlocks avoids the 267 * deadlock. I hope the reader can follow that logic ;-) 268 */ 269 flags = URWLOCK_PREFER_READER; 270 } else { 271 flags = 0; 272 } 273 274 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); 275 if (ret == 0) 276 curthread->rdlock_count++; 277 return (ret); 278 } 279 280 int 281 _Tthr_rwlock_trywrlock(pthread_rwlock_t *rwlock) 282 { 283 struct pthread *curthread = _get_curthread(); 284 pthread_rwlock_t prwlock; 285 int ret; 286 287 ret = check_and_init_rwlock(rwlock, &prwlock); 288 if (ret != 0) 289 return (ret); 290 291 ret = _thr_rwlock_trywrlock(&prwlock->lock); 292 if (ret == 0) 293 prwlock->owner = TID(curthread); 294 return (ret); 295 } 296 297 static int 298 rwlock_wrlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime) 299 { 300 struct pthread *curthread = _get_curthread(); 301 pthread_rwlock_t prwlock; 302 int ret; 303 304 ret = check_and_init_rwlock(rwlock, &prwlock); 305 if (ret != 0) 306 return (ret); 307 308 /* 309 * POSIX said the validity of the abstimeout parameter need 310 * not be checked if the lock can be immediately acquired. 311 */ 312 ret = _thr_rwlock_trywrlock(&prwlock->lock); 313 if (ret == 0) { 314 prwlock->owner = TID(curthread); 315 return (ret); 316 } 317 318 if (__predict_false(abstime && 319 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0))) 320 return (EINVAL); 321 322 for (;;) { 323 /* goto kernel and lock it */ 324 ret = __thr_rwlock_wrlock(&prwlock->lock, abstime); 325 if (ret == 0) { 326 prwlock->owner = TID(curthread); 327 break; 328 } 329 330 if (ret != EINTR) 331 break; 332 333 /* if interrupted, try to lock it in userland again. */ 334 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) { 335 ret = 0; 336 prwlock->owner = TID(curthread); 337 break; 338 } 339 } 340 return (ret); 341 } 342 343 int 344 _Tthr_rwlock_wrlock(pthread_rwlock_t *rwlock) 345 { 346 return (rwlock_wrlock_common (rwlock, NULL)); 347 } 348 349 int 350 _pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock, 351 const struct timespec * __restrict abstime) 352 { 353 return (rwlock_wrlock_common (rwlock, abstime)); 354 } 355 356 int 357 _Tthr_rwlock_unlock(pthread_rwlock_t *rwlock) 358 { 359 struct pthread *curthread = _get_curthread(); 360 pthread_rwlock_t prwlock; 361 int ret; 362 int32_t state; 363 364 if (*rwlock == THR_PSHARED_PTR) { 365 prwlock = __thr_pshared_offpage(rwlock, 0); 366 if (prwlock == NULL) 367 return (EINVAL); 368 } else { 369 prwlock = *rwlock; 370 } 371 372 if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED)) 373 return (EINVAL); 374 375 state = prwlock->lock.rw_state; 376 if (state & URWLOCK_WRITE_OWNER) { 377 if (__predict_false(prwlock->owner != TID(curthread))) 378 return (EPERM); 379 prwlock->owner = 0; 380 } 381 382 ret = _thr_rwlock_unlock(&prwlock->lock); 383 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0) 384 curthread->rdlock_count--; 385 386 return (ret); 387 } 388