1 /*- 2 * Copyright (c) 1998 Alex Nash 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <errno.h> 30 #include <limits.h> 31 #include <stdlib.h> 32 33 #include "namespace.h" 34 #include <pthread.h> 35 #include "un-namespace.h" 36 #include "thr_private.h" 37 38 /* maximum number of times a read lock may be obtained */ 39 #define MAX_READ_LOCKS (INT_MAX - 1) 40 41 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy); 42 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init); 43 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock); 44 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock); 45 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock); 46 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock); 47 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock); 48 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock); 49 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock); 50 51 /* 52 * Prototypes 53 */ 54 55 static int 56 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) 57 { 58 pthread_rwlock_t prwlock; 59 int ret; 60 61 /* allocate rwlock object */ 62 prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock)); 63 64 if (prwlock == NULL) 65 return (ENOMEM); 66 67 /* initialize the lock */ 68 if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0) 69 free(prwlock); 70 else { 71 /* initialize the read condition signal */ 72 ret = _pthread_cond_init(&prwlock->read_signal, NULL); 73 74 if (ret != 0) { 75 _pthread_mutex_destroy(&prwlock->lock); 76 free(prwlock); 77 } else { 78 /* initialize the write condition signal */ 79 ret = _pthread_cond_init(&prwlock->write_signal, NULL); 80 81 if (ret != 0) { 82 _pthread_cond_destroy(&prwlock->read_signal); 83 _pthread_mutex_destroy(&prwlock->lock); 84 free(prwlock); 85 } else { 86 /* success */ 87 prwlock->state = 0; 88 prwlock->blocked_writers = 0; 89 *rwlock = prwlock; 90 } 91 } 92 } 93 94 return (ret); 95 } 96 97 int 98 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock) 99 { 100 int ret; 101 102 if (rwlock == NULL) 103 ret = EINVAL; 104 else { 105 pthread_rwlock_t prwlock; 106 107 prwlock = *rwlock; 108 109 _pthread_mutex_destroy(&prwlock->lock); 110 _pthread_cond_destroy(&prwlock->read_signal); 111 _pthread_cond_destroy(&prwlock->write_signal); 112 free(prwlock); 113 114 *rwlock = NULL; 115 116 ret = 0; 117 } 118 return (ret); 119 } 120 121 static int 122 init_static(struct pthread *thread, pthread_rwlock_t *rwlock) 123 { 124 int ret; 125 126 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); 127 128 if (*rwlock == NULL) 129 ret = rwlock_init(rwlock, NULL); 130 else 131 ret = 0; 132 133 THR_LOCK_RELEASE(thread, &_rwlock_static_lock); 134 135 return (ret); 136 } 137 138 int 139 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) 140 { 141 *rwlock = NULL; 142 return (rwlock_init(rwlock, attr)); 143 } 144 145 static int 146 rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) 147 { 148 struct pthread *curthread = _get_curthread(); 149 pthread_rwlock_t prwlock; 150 int ret; 151 152 if (rwlock == NULL) 153 return (EINVAL); 154 155 prwlock = *rwlock; 156 157 /* check for static initialization */ 158 if (prwlock == NULL) { 159 if ((ret = init_static(curthread, rwlock)) != 0) 160 return (ret); 161 162 prwlock = *rwlock; 163 } 164 165 /* grab the monitor lock */ 166 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) 167 return (ret); 168 169 /* check lock count */ 170 if (prwlock->state == MAX_READ_LOCKS) { 171 _pthread_mutex_unlock(&prwlock->lock); 172 return (EAGAIN); 173 } 174 175 curthread = _get_curthread(); 176 if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) { 177 /* 178 * To avoid having to track all the rdlocks held by 179 * a thread or all of the threads that hold a rdlock, 180 * we keep a simple count of all the rdlocks held by 181 * a thread. If a thread holds any rdlocks it is 182 * possible that it is attempting to take a recursive 183 * rdlock. If there are blocked writers and precedence 184 * is given to them, then that would result in the thread 185 * deadlocking. So allowing a thread to take the rdlock 186 * when it already has one or more rdlocks avoids the 187 * deadlock. I hope the reader can follow that logic ;-) 188 */ 189 ; /* nothing needed */ 190 } else { 191 /* give writers priority over readers */ 192 while (prwlock->blocked_writers || prwlock->state < 0) { 193 if (abstime) 194 ret = _pthread_cond_timedwait 195 (&prwlock->read_signal, 196 &prwlock->lock, abstime); 197 else 198 ret = _pthread_cond_wait(&prwlock->read_signal, 199 &prwlock->lock); 200 if (ret != 0) { 201 /* can't do a whole lot if this fails */ 202 _pthread_mutex_unlock(&prwlock->lock); 203 return (ret); 204 } 205 } 206 } 207 208 curthread->rdlock_count++; 209 prwlock->state++; /* indicate we are locked for reading */ 210 211 /* 212 * Something is really wrong if this call fails. Returning 213 * error won't do because we've already obtained the read 214 * lock. Decrementing 'state' is no good because we probably 215 * don't have the monitor lock. 216 */ 217 _pthread_mutex_unlock(&prwlock->lock); 218 219 return (ret); 220 } 221 222 int 223 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) 224 { 225 return (rwlock_rdlock_common(rwlock, NULL)); 226 } 227 228 int 229 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock, 230 const struct timespec *abstime) 231 { 232 return (rwlock_rdlock_common(rwlock, abstime)); 233 } 234 235 int 236 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) 237 { 238 struct pthread *curthread = _get_curthread(); 239 pthread_rwlock_t prwlock; 240 int ret; 241 242 if (rwlock == NULL) 243 return (EINVAL); 244 245 prwlock = *rwlock; 246 247 /* check for static initialization */ 248 if (prwlock == NULL) { 249 if ((ret = init_static(curthread, rwlock)) != 0) 250 return (ret); 251 252 prwlock = *rwlock; 253 } 254 255 /* grab the monitor lock */ 256 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) 257 return (ret); 258 259 curthread = _get_curthread(); 260 if (prwlock->state == MAX_READ_LOCKS) 261 ret = EAGAIN; 262 else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) { 263 /* see comment for pthread_rwlock_rdlock() */ 264 curthread->rdlock_count++; 265 prwlock->state++; 266 } 267 /* give writers priority over readers */ 268 else if (prwlock->blocked_writers || prwlock->state < 0) 269 ret = EBUSY; 270 else { 271 curthread->rdlock_count++; 272 prwlock->state++; /* indicate we are locked for reading */ 273 } 274 275 /* see the comment on this in pthread_rwlock_rdlock */ 276 _pthread_mutex_unlock(&prwlock->lock); 277 278 return (ret); 279 } 280 281 int 282 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) 283 { 284 struct pthread *curthread = _get_curthread(); 285 pthread_rwlock_t prwlock; 286 int ret; 287 288 if (rwlock == NULL) 289 return (EINVAL); 290 291 prwlock = *rwlock; 292 293 /* check for static initialization */ 294 if (prwlock == NULL) { 295 if ((ret = init_static(curthread, rwlock)) != 0) 296 return (ret); 297 298 prwlock = *rwlock; 299 } 300 301 /* grab the monitor lock */ 302 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) 303 return (ret); 304 305 if (prwlock->state != 0) 306 ret = EBUSY; 307 else 308 /* indicate we are locked for writing */ 309 prwlock->state = -1; 310 311 /* see the comment on this in pthread_rwlock_rdlock */ 312 _pthread_mutex_unlock(&prwlock->lock); 313 314 return (ret); 315 } 316 317 int 318 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock) 319 { 320 struct pthread *curthread; 321 pthread_rwlock_t prwlock; 322 int ret; 323 324 if (rwlock == NULL) 325 return (EINVAL); 326 327 prwlock = *rwlock; 328 329 if (prwlock == NULL) 330 return (EINVAL); 331 332 /* grab the monitor lock */ 333 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) 334 return (ret); 335 336 curthread = _get_curthread(); 337 if (prwlock->state > 0) { 338 curthread->rdlock_count--; 339 prwlock->state--; 340 if (prwlock->state == 0 && prwlock->blocked_writers) 341 ret = _pthread_cond_signal(&prwlock->write_signal); 342 } else if (prwlock->state < 0) { 343 prwlock->state = 0; 344 345 if (prwlock->blocked_writers) 346 ret = _pthread_cond_signal(&prwlock->write_signal); 347 else 348 ret = _pthread_cond_broadcast(&prwlock->read_signal); 349 } else 350 ret = EINVAL; 351 352 /* see the comment on this in pthread_rwlock_rdlock */ 353 _pthread_mutex_unlock(&prwlock->lock); 354 355 return (ret); 356 } 357 358 static int 359 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) 360 { 361 struct pthread *curthread = _get_curthread(); 362 pthread_rwlock_t prwlock; 363 int ret; 364 365 if (rwlock == NULL) 366 return (EINVAL); 367 368 prwlock = *rwlock; 369 370 /* check for static initialization */ 371 if (prwlock == NULL) { 372 if ((ret = init_static(curthread, rwlock)) != 0) 373 return (ret); 374 375 prwlock = *rwlock; 376 } 377 378 /* grab the monitor lock */ 379 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) 380 return (ret); 381 382 while (prwlock->state != 0) { 383 prwlock->blocked_writers++; 384 385 if (abstime != NULL) 386 ret = _pthread_cond_timedwait(&prwlock->write_signal, 387 &prwlock->lock, abstime); 388 else 389 ret = _pthread_cond_wait(&prwlock->write_signal, 390 &prwlock->lock); 391 if (ret != 0) { 392 prwlock->blocked_writers--; 393 _pthread_mutex_unlock(&prwlock->lock); 394 return (ret); 395 } 396 397 prwlock->blocked_writers--; 398 } 399 400 /* indicate we are locked for writing */ 401 prwlock->state = -1; 402 403 /* see the comment on this in pthread_rwlock_rdlock */ 404 _pthread_mutex_unlock(&prwlock->lock); 405 406 return (ret); 407 } 408 409 int 410 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) 411 { 412 return (rwlock_wrlock_common (rwlock, NULL)); 413 } 414 415 int 416 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock, 417 const struct timespec *abstime) 418 { 419 return (rwlock_wrlock_common (rwlock, abstime)); 420 } 421