1 /*- 2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice(s), this list of conditions and the following disclaimer as 9 * the first lines of this file unmodified other than the possible 10 * addition of one or more copyright notices. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice(s), this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 25 * DAMAGE. 26 */ 27 28 /* 29 * Shared/exclusive locks. This implementation assures deterministic lock 30 * granting behavior, so that slocks and xlocks are interleaved. 31 * 32 * Priority propagation will not generally raise the priority of lock holders, 33 * so should not be relied upon in combination with sx locks. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_ddb.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/ktr.h> 44 #include <sys/linker_set.h> 45 #include <sys/condvar.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/sx.h> 50 #include <sys/lock_profile.h> 51 52 #ifdef DDB 53 #include <ddb/ddb.h> 54 55 static void db_show_sx(struct lock_object *lock); 56 #endif 57 58 struct lock_class lock_class_sx = { 59 "sx", 60 LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 61 #ifdef DDB 62 db_show_sx 63 #endif 64 }; 65 66 #ifndef INVARIANTS 67 #define _sx_assert(sx, what, file, line) 68 #endif 69 70 void 71 sx_sysinit(void *arg) 72 { 73 struct sx_args *sargs = arg; 74 75 sx_init(sargs->sa_sx, sargs->sa_desc); 76 } 77 78 void 79 sx_init(struct sx *sx, const char *description) 80 { 81 82 sx->sx_lock = mtx_pool_find(mtxpool_lockbuilder, sx); 83 sx->sx_cnt = 0; 84 cv_init(&sx->sx_shrd_cv, description); 85 sx->sx_shrd_wcnt = 0; 86 cv_init(&sx->sx_excl_cv, description); 87 sx->sx_excl_wcnt = 0; 88 sx->sx_xholder = NULL; 89 lock_profile_object_init(&sx->sx_object, &lock_class_sx, description); 90 lock_init(&sx->sx_object, &lock_class_sx, description, NULL, 91 LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE); 92 } 93 94 void 95 sx_destroy(struct sx *sx) 96 { 97 98 KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt == 99 0), ("%s (%s): holders or waiters\n", __func__, 100 sx->sx_object.lo_name)); 101 102 sx->sx_lock = NULL; 103 cv_destroy(&sx->sx_shrd_cv); 104 cv_destroy(&sx->sx_excl_cv); 105 106 lock_profile_object_destroy(&sx->sx_object); 107 lock_destroy(&sx->sx_object); 108 } 109 110 void 111 _sx_slock(struct sx *sx, const char *file, int line) 112 { 113 uint64_t waittime = 0; 114 int contested; 115 116 mtx_lock(sx->sx_lock); 117 KASSERT(sx->sx_xholder != curthread, 118 ("%s (%s): slock while xlock is held @ %s:%d\n", __func__, 119 sx->sx_object.lo_name, file, line)); 120 WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER, file, line); 121 122 /* 123 * Loop in case we lose the race for lock acquisition. 124 */ 125 if (sx->sx_cnt < 0) 126 lock_profile_waitstart(&waittime); 127 while (sx->sx_cnt < 0) { 128 sx->sx_shrd_wcnt++; 129 lock_profile_obtain_lock_failed(&sx->sx_object, &contested); 130 cv_wait(&sx->sx_shrd_cv, sx->sx_lock); 131 sx->sx_shrd_wcnt--; 132 } 133 134 /* Acquire a shared lock. */ 135 sx->sx_cnt++; 136 137 if (sx->sx_cnt == 1) 138 lock_profile_obtain_lock_success(&sx->sx_object, waittime, file, line); 139 140 LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line); 141 WITNESS_LOCK(&sx->sx_object, 0, file, line); 142 curthread->td_locks++; 143 144 mtx_unlock(sx->sx_lock); 145 } 146 147 int 148 _sx_try_slock(struct sx *sx, const char *file, int line) 149 { 150 151 mtx_lock(sx->sx_lock); 152 if (sx->sx_cnt >= 0) { 153 sx->sx_cnt++; 154 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line); 155 WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line); 156 curthread->td_locks++; 157 mtx_unlock(sx->sx_lock); 158 return (1); 159 } else { 160 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line); 161 mtx_unlock(sx->sx_lock); 162 return (0); 163 } 164 } 165 166 void 167 _sx_xlock(struct sx *sx, const char *file, int line) 168 { 169 int contested; 170 uint64_t waittime = 0; 171 172 mtx_lock(sx->sx_lock); 173 174 /* 175 * With sx locks, we're absolutely not permitted to recurse on 176 * xlocks, as it is fatal (deadlock). Normally, recursion is handled 177 * by WITNESS, but as it is not semantically correct to hold the 178 * xlock while in here, we consider it API abuse and put it under 179 * INVARIANTS. 180 */ 181 KASSERT(sx->sx_xholder != curthread, 182 ("%s (%s): xlock already held @ %s:%d", __func__, 183 sx->sx_object.lo_name, file, line)); 184 WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 185 line); 186 187 if (sx->sx_cnt) 188 lock_profile_waitstart(&waittime); 189 /* Loop in case we lose the race for lock acquisition. */ 190 while (sx->sx_cnt != 0) { 191 sx->sx_excl_wcnt++; 192 lock_profile_obtain_lock_failed(&sx->sx_object, &contested); 193 cv_wait(&sx->sx_excl_cv, sx->sx_lock); 194 sx->sx_excl_wcnt--; 195 } 196 197 MPASS(sx->sx_cnt == 0); 198 199 /* Acquire an exclusive lock. */ 200 sx->sx_cnt--; 201 sx->sx_xholder = curthread; 202 203 lock_profile_obtain_lock_success(&sx->sx_object, waittime, file, line); 204 LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line); 205 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line); 206 curthread->td_locks++; 207 208 mtx_unlock(sx->sx_lock); 209 } 210 211 int 212 _sx_try_xlock(struct sx *sx, const char *file, int line) 213 { 214 215 mtx_lock(sx->sx_lock); 216 if (sx->sx_cnt == 0) { 217 sx->sx_cnt--; 218 sx->sx_xholder = curthread; 219 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line); 220 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, 221 line); 222 curthread->td_locks++; 223 mtx_unlock(sx->sx_lock); 224 return (1); 225 } else { 226 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line); 227 mtx_unlock(sx->sx_lock); 228 return (0); 229 } 230 } 231 232 void 233 _sx_sunlock(struct sx *sx, const char *file, int line) 234 { 235 236 _sx_assert(sx, SX_SLOCKED, file, line); 237 mtx_lock(sx->sx_lock); 238 239 curthread->td_locks--; 240 WITNESS_UNLOCK(&sx->sx_object, 0, file, line); 241 242 /* Release. */ 243 sx->sx_cnt--; 244 245 if (sx->sx_cnt == 0) 246 lock_profile_release_lock(&sx->sx_object); 247 /* 248 * If we just released the last shared lock, wake any waiters up, giving 249 * exclusive lockers precedence. In order to make sure that exclusive 250 * lockers won't be blocked forever, don't wake shared lock waiters if 251 * there are exclusive lock waiters. 252 */ 253 if (sx->sx_excl_wcnt > 0) { 254 if (sx->sx_cnt == 0) 255 cv_signal(&sx->sx_excl_cv); 256 } else if (sx->sx_shrd_wcnt > 0) 257 cv_broadcast(&sx->sx_shrd_cv); 258 259 LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line); 260 261 mtx_unlock(sx->sx_lock); 262 } 263 264 void 265 _sx_xunlock(struct sx *sx, const char *file, int line) 266 { 267 268 _sx_assert(sx, SX_XLOCKED, file, line); 269 mtx_lock(sx->sx_lock); 270 MPASS(sx->sx_cnt == -1); 271 272 curthread->td_locks--; 273 WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line); 274 275 /* Release. */ 276 sx->sx_cnt++; 277 sx->sx_xholder = NULL; 278 279 lock_profile_release_lock(&sx->sx_object); 280 /* 281 * Wake up waiters if there are any. Give precedence to slock waiters. 282 */ 283 if (sx->sx_shrd_wcnt > 0) 284 cv_broadcast(&sx->sx_shrd_cv); 285 else if (sx->sx_excl_wcnt > 0) 286 cv_signal(&sx->sx_excl_cv); 287 288 LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line); 289 290 mtx_unlock(sx->sx_lock); 291 } 292 293 int 294 _sx_try_upgrade(struct sx *sx, const char *file, int line) 295 { 296 297 _sx_assert(sx, SX_SLOCKED, file, line); 298 mtx_lock(sx->sx_lock); 299 300 if (sx->sx_cnt == 1) { 301 sx->sx_cnt = -1; 302 sx->sx_xholder = curthread; 303 304 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line); 305 WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 306 file, line); 307 308 mtx_unlock(sx->sx_lock); 309 return (1); 310 } else { 311 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line); 312 mtx_unlock(sx->sx_lock); 313 return (0); 314 } 315 } 316 317 void 318 _sx_downgrade(struct sx *sx, const char *file, int line) 319 { 320 321 _sx_assert(sx, SX_XLOCKED, file, line); 322 mtx_lock(sx->sx_lock); 323 MPASS(sx->sx_cnt == -1); 324 325 WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line); 326 327 sx->sx_cnt = 1; 328 sx->sx_xholder = NULL; 329 if (sx->sx_shrd_wcnt > 0) 330 cv_broadcast(&sx->sx_shrd_cv); 331 332 LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line); 333 334 mtx_unlock(sx->sx_lock); 335 } 336 337 #ifdef INVARIANT_SUPPORT 338 #ifndef INVARIANTS 339 #undef _sx_assert 340 #endif 341 342 /* 343 * In the non-WITNESS case, sx_assert() can only detect that at least 344 * *some* thread owns an slock, but it cannot guarantee that *this* 345 * thread owns an slock. 346 */ 347 void 348 _sx_assert(struct sx *sx, int what, const char *file, int line) 349 { 350 351 if (panicstr != NULL) 352 return; 353 switch (what) { 354 case SX_LOCKED: 355 case SX_SLOCKED: 356 #ifdef WITNESS 357 witness_assert(&sx->sx_object, what, file, line); 358 #else 359 mtx_lock(sx->sx_lock); 360 if (sx->sx_cnt <= 0 && 361 (what == SX_SLOCKED || sx->sx_xholder != curthread)) 362 panic("Lock %s not %slocked @ %s:%d\n", 363 sx->sx_object.lo_name, (what == SX_SLOCKED) ? 364 "share " : "", file, line); 365 mtx_unlock(sx->sx_lock); 366 #endif 367 break; 368 case SX_XLOCKED: 369 mtx_lock(sx->sx_lock); 370 if (sx->sx_xholder != curthread) 371 panic("Lock %s not exclusively locked @ %s:%d\n", 372 sx->sx_object.lo_name, file, line); 373 mtx_unlock(sx->sx_lock); 374 break; 375 case SX_UNLOCKED: 376 #ifdef WITNESS 377 witness_assert(&sx->sx_object, what, file, line); 378 #else 379 /* 380 * We are able to check only exclusive lock here, 381 * we cannot assert that *this* thread owns slock. 382 */ 383 mtx_lock(sx->sx_lock); 384 if (sx->sx_xholder == curthread) 385 panic("Lock %s exclusively locked @ %s:%d\n", 386 sx->sx_object.lo_name, file, line); 387 mtx_unlock(sx->sx_lock); 388 #endif 389 break; 390 default: 391 panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 392 line); 393 } 394 } 395 #endif /* INVARIANT_SUPPORT */ 396 397 #ifdef DDB 398 void 399 db_show_sx(struct lock_object *lock) 400 { 401 struct thread *td; 402 struct sx *sx; 403 404 sx = (struct sx *)lock; 405 406 db_printf(" state: "); 407 if (sx->sx_cnt < 0) { 408 td = sx->sx_xholder; 409 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 410 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 411 } else if (sx->sx_cnt > 0) 412 db_printf("SLOCK: %d locks\n", sx->sx_cnt); 413 else 414 db_printf("UNLOCKED\n"); 415 db_printf(" waiters: %d shared, %d exclusive\n", sx->sx_shrd_wcnt, 416 sx->sx_excl_wcnt); 417 } 418 419 /* 420 * Check to see if a thread that is blocked on a sleep queue is actually 421 * blocked on an sx lock. If so, output some details and return true. 422 * If the lock has an exclusive owner, return that in *ownerp. 423 */ 424 int 425 sx_chain(struct thread *td, struct thread **ownerp) 426 { 427 struct sx *sx; 428 struct cv *cv; 429 430 /* 431 * First, see if it looks like td is blocked on a condition 432 * variable. 433 */ 434 cv = td->td_wchan; 435 if (cv->cv_description != td->td_wmesg) 436 return (0); 437 438 /* 439 * Ok, see if it looks like td is blocked on the exclusive 440 * condition variable. 441 */ 442 sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_excl_cv)); 443 if (LOCK_CLASS(&sx->sx_object) == &lock_class_sx && 444 sx->sx_excl_wcnt > 0) 445 goto ok; 446 447 /* 448 * Second, see if it looks like td is blocked on the shared 449 * condition variable. 450 */ 451 sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_shrd_cv)); 452 if (LOCK_CLASS(&sx->sx_object) == &lock_class_sx && 453 sx->sx_shrd_wcnt > 0) 454 goto ok; 455 456 /* Doesn't seem to be an sx lock. */ 457 return (0); 458 459 ok: 460 /* We think we have an sx lock, so output some details. */ 461 db_printf("blocked on sx \"%s\" ", td->td_wmesg); 462 if (sx->sx_cnt >= 0) { 463 db_printf("SLOCK (count %d)\n", sx->sx_cnt); 464 *ownerp = NULL; 465 } else { 466 db_printf("XLOCK\n"); 467 *ownerp = sx->sx_xholder; 468 } 469 return (1); 470 } 471 #endif 472