19454b2d8SWarner Losh /*- 24e7f640dSJohn Baldwin * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 34e7f640dSJohn Baldwin * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 44e7f640dSJohn Baldwin * All rights reserved. 56281b30aSJason Evans * 66281b30aSJason Evans * Redistribution and use in source and binary forms, with or without 76281b30aSJason Evans * modification, are permitted provided that the following conditions 86281b30aSJason Evans * are met: 96281b30aSJason Evans * 1. Redistributions of source code must retain the above copyright 106281b30aSJason Evans * notice(s), this list of conditions and the following disclaimer as 116281b30aSJason Evans * the first lines of this file unmodified other than the possible 126281b30aSJason Evans * addition of one or more copyright notices. 136281b30aSJason Evans * 2. Redistributions in binary form must reproduce the above copyright 146281b30aSJason Evans * notice(s), this list of conditions and the following disclaimer in the 156281b30aSJason Evans * documentation and/or other materials provided with the distribution. 166281b30aSJason Evans * 176281b30aSJason Evans * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 186281b30aSJason Evans * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 196281b30aSJason Evans * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 206281b30aSJason Evans * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 216281b30aSJason Evans * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 226281b30aSJason Evans * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 236281b30aSJason Evans * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 246281b30aSJason Evans * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 256281b30aSJason Evans * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 266281b30aSJason Evans * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 276281b30aSJason Evans * DAMAGE. 286281b30aSJason Evans */ 296281b30aSJason Evans 306281b30aSJason Evans /* 314e7f640dSJohn Baldwin * Shared/exclusive locks. This implementation attempts to ensure 324e7f640dSJohn Baldwin * deterministic lock granting behavior, so that slocks and xlocks are 334e7f640dSJohn Baldwin * interleaved. 346281b30aSJason Evans * 356281b30aSJason Evans * Priority propagation will not generally raise the priority of lock holders, 366281b30aSJason Evans * so should not be relied upon in combination with sx locks. 376281b30aSJason Evans */ 386281b30aSJason Evans 394e7f640dSJohn Baldwin #include "opt_adaptive_sx.h" 404e7f640dSJohn Baldwin #include "opt_ddb.h" 414e7f640dSJohn Baldwin 42677b542eSDavid E. O'Brien #include <sys/cdefs.h> 43677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 44677b542eSDavid E. O'Brien 456281b30aSJason Evans #include <sys/param.h> 466281b30aSJason Evans #include <sys/ktr.h> 4719284646SJohn Baldwin #include <sys/lock.h> 486281b30aSJason Evans #include <sys/mutex.h> 49d272fe53SJohn Baldwin #include <sys/proc.h> 504e7f640dSJohn Baldwin #include <sys/sleepqueue.h> 516281b30aSJason Evans #include <sys/sx.h> 524e7f640dSJohn Baldwin #include <sys/systm.h> 534e7f640dSJohn Baldwin 544e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 554e7f640dSJohn Baldwin #include <machine/cpu.h> 564e7f640dSJohn Baldwin #endif 576281b30aSJason Evans 58462a7addSJohn Baldwin #ifdef DDB 59d272fe53SJohn Baldwin #include <ddb/ddb.h> 604e7f640dSJohn Baldwin #endif 61d272fe53SJohn Baldwin 624e7f640dSJohn Baldwin #if !defined(SMP) && defined(ADAPTIVE_SX) 634e7f640dSJohn Baldwin #error "You must have SMP to enable the ADAPTIVE_SX option" 644e7f640dSJohn Baldwin #endif 654e7f640dSJohn Baldwin 66c1a6d9faSAttilio Rao CTASSERT(((SX_ADAPTIVESPIN | SX_RECURSE) & LO_CLASSFLAGS) == 67c1a6d9faSAttilio Rao (SX_ADAPTIVESPIN | SX_RECURSE)); 68c1a6d9faSAttilio Rao 694e7f640dSJohn Baldwin /* Handy macros for sleep queues. */ 704e7f640dSJohn Baldwin #define SQ_EXCLUSIVE_QUEUE 0 714e7f640dSJohn Baldwin #define SQ_SHARED_QUEUE 1 724e7f640dSJohn Baldwin 734e7f640dSJohn Baldwin /* 744e7f640dSJohn Baldwin * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 754e7f640dSJohn Baldwin * drop Giant anytime we have to sleep or if we adaptively spin. 764e7f640dSJohn Baldwin */ 774e7f640dSJohn Baldwin #define GIANT_DECLARE \ 784e7f640dSJohn Baldwin int _giantcnt = 0; \ 794e7f640dSJohn Baldwin WITNESS_SAVE_DECL(Giant) \ 804e7f640dSJohn Baldwin 814e7f640dSJohn Baldwin #define GIANT_SAVE() do { \ 824e7f640dSJohn Baldwin if (mtx_owned(&Giant)) { \ 834e7f640dSJohn Baldwin WITNESS_SAVE(&Giant.lock_object, Giant); \ 844e7f640dSJohn Baldwin while (mtx_owned(&Giant)) { \ 854e7f640dSJohn Baldwin _giantcnt++; \ 864e7f640dSJohn Baldwin mtx_unlock(&Giant); \ 874e7f640dSJohn Baldwin } \ 884e7f640dSJohn Baldwin } \ 894e7f640dSJohn Baldwin } while (0) 904e7f640dSJohn Baldwin 914e7f640dSJohn Baldwin #define GIANT_RESTORE() do { \ 924e7f640dSJohn Baldwin if (_giantcnt > 0) { \ 934e7f640dSJohn Baldwin mtx_assert(&Giant, MA_NOTOWNED); \ 944e7f640dSJohn Baldwin while (_giantcnt--) \ 954e7f640dSJohn Baldwin mtx_lock(&Giant); \ 964e7f640dSJohn Baldwin WITNESS_RESTORE(&Giant.lock_object, Giant); \ 974e7f640dSJohn Baldwin } \ 984e7f640dSJohn Baldwin } while (0) 994e7f640dSJohn Baldwin 1004e7f640dSJohn Baldwin /* 101da7d0d1eSJohn Baldwin * Returns true if an exclusive lock is recursed. It assumes 102da7d0d1eSJohn Baldwin * curthread currently has an exclusive lock. 1034e7f640dSJohn Baldwin */ 10490356491SAttilio Rao #define sx_recurse lock_object.lo_data 1054e7f640dSJohn Baldwin #define sx_recursed(sx) ((sx)->sx_recurse != 0) 1064e7f640dSJohn Baldwin 107f9721b43SAttilio Rao static void assert_sx(struct lock_object *lock, int what); 1084e7f640dSJohn Baldwin #ifdef DDB 109d272fe53SJohn Baldwin static void db_show_sx(struct lock_object *lock); 110d272fe53SJohn Baldwin #endif 1116e21afd4SJohn Baldwin static void lock_sx(struct lock_object *lock, int how); 1126e21afd4SJohn Baldwin static int unlock_sx(struct lock_object *lock); 113d272fe53SJohn Baldwin 11419284646SJohn Baldwin struct lock_class lock_class_sx = { 115ae8dde30SJohn Baldwin .lc_name = "sx", 116ae8dde30SJohn Baldwin .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 117f9721b43SAttilio Rao .lc_assert = assert_sx, 118d272fe53SJohn Baldwin #ifdef DDB 119ae8dde30SJohn Baldwin .lc_ddb_show = db_show_sx, 120d272fe53SJohn Baldwin #endif 1216e21afd4SJohn Baldwin .lc_lock = lock_sx, 1226e21afd4SJohn Baldwin .lc_unlock = unlock_sx, 12319284646SJohn Baldwin }; 12419284646SJohn Baldwin 125781a35dfSJohn Baldwin #ifndef INVARIANTS 126781a35dfSJohn Baldwin #define _sx_assert(sx, what, file, line) 127781a35dfSJohn Baldwin #endif 128781a35dfSJohn Baldwin 1296281b30aSJason Evans void 130f9721b43SAttilio Rao assert_sx(struct lock_object *lock, int what) 131f9721b43SAttilio Rao { 132f9721b43SAttilio Rao 133f9721b43SAttilio Rao sx_assert((struct sx *)lock, what); 134f9721b43SAttilio Rao } 135f9721b43SAttilio Rao 136f9721b43SAttilio Rao void 1376e21afd4SJohn Baldwin lock_sx(struct lock_object *lock, int how) 1386e21afd4SJohn Baldwin { 1396e21afd4SJohn Baldwin struct sx *sx; 1406e21afd4SJohn Baldwin 1416e21afd4SJohn Baldwin sx = (struct sx *)lock; 1426e21afd4SJohn Baldwin if (how) 1436e21afd4SJohn Baldwin sx_xlock(sx); 1446e21afd4SJohn Baldwin else 1456e21afd4SJohn Baldwin sx_slock(sx); 1466e21afd4SJohn Baldwin } 1476e21afd4SJohn Baldwin 1486e21afd4SJohn Baldwin int 1496e21afd4SJohn Baldwin unlock_sx(struct lock_object *lock) 1506e21afd4SJohn Baldwin { 1516e21afd4SJohn Baldwin struct sx *sx; 1526e21afd4SJohn Baldwin 1536e21afd4SJohn Baldwin sx = (struct sx *)lock; 1547ec137e5SJohn Baldwin sx_assert(sx, SA_LOCKED | SA_NOTRECURSED); 1556e21afd4SJohn Baldwin if (sx_xlocked(sx)) { 1566e21afd4SJohn Baldwin sx_xunlock(sx); 1576e21afd4SJohn Baldwin return (1); 1586e21afd4SJohn Baldwin } else { 1596e21afd4SJohn Baldwin sx_sunlock(sx); 1606e21afd4SJohn Baldwin return (0); 1616e21afd4SJohn Baldwin } 1626e21afd4SJohn Baldwin } 1636e21afd4SJohn Baldwin 1646e21afd4SJohn Baldwin void 165c27b5699SAndrew R. Reiter sx_sysinit(void *arg) 166c27b5699SAndrew R. Reiter { 167c27b5699SAndrew R. Reiter struct sx_args *sargs = arg; 168c27b5699SAndrew R. Reiter 169c27b5699SAndrew R. Reiter sx_init(sargs->sa_sx, sargs->sa_desc); 170c27b5699SAndrew R. Reiter } 171c27b5699SAndrew R. Reiter 172c27b5699SAndrew R. Reiter void 1734e7f640dSJohn Baldwin sx_init_flags(struct sx *sx, const char *description, int opts) 1746281b30aSJason Evans { 1754e7f640dSJohn Baldwin int flags; 1766281b30aSJason Evans 177b0d67325SJohn Baldwin MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | 17846a8b9cbSJohn Baldwin SX_NOPROFILE | SX_ADAPTIVESPIN)) == 0); 179b0d67325SJohn Baldwin 1802c7289cbSAttilio Rao flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE; 1814e7f640dSJohn Baldwin if (opts & SX_DUPOK) 1824e7f640dSJohn Baldwin flags |= LO_DUPOK; 1834e7f640dSJohn Baldwin if (opts & SX_NOPROFILE) 1844e7f640dSJohn Baldwin flags |= LO_NOPROFILE; 1854e7f640dSJohn Baldwin if (!(opts & SX_NOWITNESS)) 1864e7f640dSJohn Baldwin flags |= LO_WITNESS; 1874e7f640dSJohn Baldwin if (opts & SX_QUIET) 1884e7f640dSJohn Baldwin flags |= LO_QUIET; 1894e7f640dSJohn Baldwin 190b0d67325SJohn Baldwin flags |= opts & (SX_ADAPTIVESPIN | SX_RECURSE); 1914e7f640dSJohn Baldwin sx->sx_lock = SX_LOCK_UNLOCKED; 1924e7f640dSJohn Baldwin sx->sx_recurse = 0; 1934e7f640dSJohn Baldwin lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); 1946281b30aSJason Evans } 1956281b30aSJason Evans 1966281b30aSJason Evans void 1976281b30aSJason Evans sx_destroy(struct sx *sx) 1986281b30aSJason Evans { 1996281b30aSJason Evans 2004e7f640dSJohn Baldwin KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 2014e7f640dSJohn Baldwin KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); 2020026c92cSJohn Baldwin sx->sx_lock = SX_LOCK_DESTROYED; 203aa89d8cdSJohn Baldwin lock_destroy(&sx->lock_object); 2046281b30aSJason Evans } 2056281b30aSJason Evans 206f9819486SAttilio Rao int 207f9819486SAttilio Rao _sx_slock(struct sx *sx, int opts, const char *file, int line) 2086281b30aSJason Evans { 209f9819486SAttilio Rao int error = 0; 2106281b30aSJason Evans 2114e7f640dSJohn Baldwin MPASS(curthread != NULL); 2120026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 2130026c92cSJohn Baldwin ("sx_slock() of destroyed sx @ %s:%d", file, line)); 21441313430SJohn Baldwin WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); 215f9819486SAttilio Rao error = __sx_slock(sx, opts, file, line); 216f9819486SAttilio Rao if (!error) { 217aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 218aa89d8cdSJohn Baldwin WITNESS_LOCK(&sx->lock_object, 0, file, line); 219764e4d54SJohn Baldwin curthread->td_locks++; 2206281b30aSJason Evans } 2216281b30aSJason Evans 222f9819486SAttilio Rao return (error); 223f9819486SAttilio Rao } 224f9819486SAttilio Rao 2255f36700aSJohn Baldwin int 2265f36700aSJohn Baldwin _sx_try_slock(struct sx *sx, const char *file, int line) 2275f36700aSJohn Baldwin { 2284e7f640dSJohn Baldwin uintptr_t x; 2295f36700aSJohn Baldwin 230764a938bSPawel Jakub Dawidek for (;;) { 2314e7f640dSJohn Baldwin x = sx->sx_lock; 2320026c92cSJohn Baldwin KASSERT(x != SX_LOCK_DESTROYED, 2330026c92cSJohn Baldwin ("sx_try_slock() of destroyed sx @ %s:%d", file, line)); 234764a938bSPawel Jakub Dawidek if (!(x & SX_LOCK_SHARED)) 235764a938bSPawel Jakub Dawidek break; 236764a938bSPawel Jakub Dawidek if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) { 237aa89d8cdSJohn Baldwin LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); 238aa89d8cdSJohn Baldwin WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); 239764e4d54SJohn Baldwin curthread->td_locks++; 2405f36700aSJohn Baldwin return (1); 2415f36700aSJohn Baldwin } 242764a938bSPawel Jakub Dawidek } 2434e7f640dSJohn Baldwin 2444e7f640dSJohn Baldwin LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 2454e7f640dSJohn Baldwin return (0); 2465f36700aSJohn Baldwin } 2475f36700aSJohn Baldwin 248f9819486SAttilio Rao int 249f9819486SAttilio Rao _sx_xlock(struct sx *sx, int opts, const char *file, int line) 2506281b30aSJason Evans { 251f9819486SAttilio Rao int error = 0; 2526281b30aSJason Evans 2534e7f640dSJohn Baldwin MPASS(curthread != NULL); 2540026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 2550026c92cSJohn Baldwin ("sx_xlock() of destroyed sx @ %s:%d", file, line)); 256aa89d8cdSJohn Baldwin WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 25741313430SJohn Baldwin line, NULL); 258f9819486SAttilio Rao error = __sx_xlock(sx, curthread, opts, file, line); 259f9819486SAttilio Rao if (!error) { 260f9819486SAttilio Rao LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, 261f9819486SAttilio Rao file, line); 262aa89d8cdSJohn Baldwin WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 263764e4d54SJohn Baldwin curthread->td_locks++; 2646281b30aSJason Evans } 2656281b30aSJason Evans 266f9819486SAttilio Rao return (error); 267f9819486SAttilio Rao } 268f9819486SAttilio Rao 2695f36700aSJohn Baldwin int 2705f36700aSJohn Baldwin _sx_try_xlock(struct sx *sx, const char *file, int line) 2715f36700aSJohn Baldwin { 2724e7f640dSJohn Baldwin int rval; 2735f36700aSJohn Baldwin 2744e7f640dSJohn Baldwin MPASS(curthread != NULL); 2750026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 2760026c92cSJohn Baldwin ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); 2774e7f640dSJohn Baldwin 278b0d67325SJohn Baldwin if (sx_xlocked(sx) && (sx->lock_object.lo_flags & SX_RECURSE) != 0) { 2794e7f640dSJohn Baldwin sx->sx_recurse++; 2804e7f640dSJohn Baldwin atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 2814e7f640dSJohn Baldwin rval = 1; 2824e7f640dSJohn Baldwin } else 2834e7f640dSJohn Baldwin rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, 2844e7f640dSJohn Baldwin (uintptr_t)curthread); 2854e7f640dSJohn Baldwin LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); 2864e7f640dSJohn Baldwin if (rval) { 2874e7f640dSJohn Baldwin WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 2884e7f640dSJohn Baldwin file, line); 289764e4d54SJohn Baldwin curthread->td_locks++; 2905f36700aSJohn Baldwin } 2914e7f640dSJohn Baldwin 2924e7f640dSJohn Baldwin return (rval); 2935f36700aSJohn Baldwin } 2945f36700aSJohn Baldwin 2956281b30aSJason Evans void 29619284646SJohn Baldwin _sx_sunlock(struct sx *sx, const char *file, int line) 2976281b30aSJason Evans { 2986281b30aSJason Evans 2994e7f640dSJohn Baldwin MPASS(curthread != NULL); 3000026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 3010026c92cSJohn Baldwin ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); 3027ec137e5SJohn Baldwin _sx_assert(sx, SA_SLOCKED, file, line); 303764e4d54SJohn Baldwin curthread->td_locks--; 304aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 305aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 3064e7f640dSJohn Baldwin __sx_sunlock(sx, file, line); 307eea4f254SJeff Roberson lock_profile_release_lock(&sx->lock_object); 3086281b30aSJason Evans } 3096281b30aSJason Evans 3106281b30aSJason Evans void 31119284646SJohn Baldwin _sx_xunlock(struct sx *sx, const char *file, int line) 3126281b30aSJason Evans { 3136281b30aSJason Evans 3144e7f640dSJohn Baldwin MPASS(curthread != NULL); 3150026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 3160026c92cSJohn Baldwin ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); 3177ec137e5SJohn Baldwin _sx_assert(sx, SA_XLOCKED, file, line); 318764e4d54SJohn Baldwin curthread->td_locks--; 319aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 3204e7f640dSJohn Baldwin LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, 3214e7f640dSJohn Baldwin line); 322afc0bfbdSKip Macy if (!sx_recursed(sx)) 323aa89d8cdSJohn Baldwin lock_profile_release_lock(&sx->lock_object); 3244e7f640dSJohn Baldwin __sx_xunlock(sx, curthread, file, line); 3256281b30aSJason Evans } 326d55229b7SJason Evans 3274e7f640dSJohn Baldwin /* 3284e7f640dSJohn Baldwin * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 3294e7f640dSJohn Baldwin * This will only succeed if this thread holds a single shared lock. 3304e7f640dSJohn Baldwin * Return 1 if if the upgrade succeed, 0 otherwise. 3314e7f640dSJohn Baldwin */ 332d55229b7SJason Evans int 333d55229b7SJason Evans _sx_try_upgrade(struct sx *sx, const char *file, int line) 334d55229b7SJason Evans { 3354e7f640dSJohn Baldwin uintptr_t x; 3364e7f640dSJohn Baldwin int success; 337d55229b7SJason Evans 3380026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 3390026c92cSJohn Baldwin ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line)); 3407ec137e5SJohn Baldwin _sx_assert(sx, SA_SLOCKED, file, line); 341d55229b7SJason Evans 3424e7f640dSJohn Baldwin /* 3434e7f640dSJohn Baldwin * Try to switch from one shared lock to an exclusive lock. We need 3444e7f640dSJohn Baldwin * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that 3454e7f640dSJohn Baldwin * we will wake up the exclusive waiters when we drop the lock. 3464e7f640dSJohn Baldwin */ 3474e7f640dSJohn Baldwin x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS; 3484e7f640dSJohn Baldwin success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x, 3494e7f640dSJohn Baldwin (uintptr_t)curthread | x); 3504e7f640dSJohn Baldwin LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); 3514e7f640dSJohn Baldwin if (success) 352aa89d8cdSJohn Baldwin WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 353b0b7cb50SJohn Baldwin file, line); 3544e7f640dSJohn Baldwin return (success); 355d55229b7SJason Evans } 356d55229b7SJason Evans 3574e7f640dSJohn Baldwin /* 3584e7f640dSJohn Baldwin * Downgrade an unrecursed exclusive lock into a single shared lock. 3594e7f640dSJohn Baldwin */ 360d55229b7SJason Evans void 361d55229b7SJason Evans _sx_downgrade(struct sx *sx, const char *file, int line) 362d55229b7SJason Evans { 3634e7f640dSJohn Baldwin uintptr_t x; 364da7bbd2cSJohn Baldwin int wakeup_swapper; 365d55229b7SJason Evans 3660026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 3670026c92cSJohn Baldwin ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); 3687ec137e5SJohn Baldwin _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); 3694e7f640dSJohn Baldwin #ifndef INVARIANTS 3704e7f640dSJohn Baldwin if (sx_recursed(sx)) 3714e7f640dSJohn Baldwin panic("downgrade of a recursed lock"); 3724e7f640dSJohn Baldwin #endif 373d55229b7SJason Evans 374aa89d8cdSJohn Baldwin WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); 375d55229b7SJason Evans 3764e7f640dSJohn Baldwin /* 3774e7f640dSJohn Baldwin * Try to switch from an exclusive lock with no shared waiters 3784e7f640dSJohn Baldwin * to one sharer with no shared waiters. If there are 3794e7f640dSJohn Baldwin * exclusive waiters, we don't need to lock the sleep queue so 3804e7f640dSJohn Baldwin * long as we preserve the flag. We do one quick try and if 3814e7f640dSJohn Baldwin * that fails we grab the sleepq lock to keep the flags from 3824e7f640dSJohn Baldwin * changing and do it the slow way. 3834e7f640dSJohn Baldwin * 3844e7f640dSJohn Baldwin * We have to lock the sleep queue if there are shared waiters 3854e7f640dSJohn Baldwin * so we can wake them up. 3864e7f640dSJohn Baldwin */ 3874e7f640dSJohn Baldwin x = sx->sx_lock; 3884e7f640dSJohn Baldwin if (!(x & SX_LOCK_SHARED_WAITERS) && 3894e7f640dSJohn Baldwin atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | 3904e7f640dSJohn Baldwin (x & SX_LOCK_EXCLUSIVE_WAITERS))) { 3914e7f640dSJohn Baldwin LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 3924e7f640dSJohn Baldwin return; 3934e7f640dSJohn Baldwin } 3944e7f640dSJohn Baldwin 3954e7f640dSJohn Baldwin /* 3964e7f640dSJohn Baldwin * Lock the sleep queue so we can read the waiters bits 3974e7f640dSJohn Baldwin * without any races and wakeup any shared waiters. 3984e7f640dSJohn Baldwin */ 3994e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 4004e7f640dSJohn Baldwin 4014e7f640dSJohn Baldwin /* 4024e7f640dSJohn Baldwin * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single 4034e7f640dSJohn Baldwin * shared lock. If there are any shared waiters, wake them up. 4044e7f640dSJohn Baldwin */ 405da7bbd2cSJohn Baldwin wakeup_swapper = 0; 4064e7f640dSJohn Baldwin x = sx->sx_lock; 4074e7f640dSJohn Baldwin atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | 4084e7f640dSJohn Baldwin (x & SX_LOCK_EXCLUSIVE_WAITERS)); 4094e7f640dSJohn Baldwin if (x & SX_LOCK_SHARED_WAITERS) 410da7bbd2cSJohn Baldwin wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 411da7bbd2cSJohn Baldwin 0, SQ_SHARED_QUEUE); 4124e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 413d55229b7SJason Evans 414aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 415da7bbd2cSJohn Baldwin 416da7bbd2cSJohn Baldwin if (wakeup_swapper) 417da7bbd2cSJohn Baldwin kick_proc0(); 4184e7f640dSJohn Baldwin } 419d55229b7SJason Evans 4204e7f640dSJohn Baldwin /* 4214e7f640dSJohn Baldwin * This function represents the so-called 'hard case' for sx_xlock 4224e7f640dSJohn Baldwin * operation. All 'easy case' failures are redirected to this. Note 4234e7f640dSJohn Baldwin * that ideally this would be a static function, but it needs to be 4244e7f640dSJohn Baldwin * accessible from at least sx.h. 4254e7f640dSJohn Baldwin */ 426f9819486SAttilio Rao int 427f9819486SAttilio Rao _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, 428f9819486SAttilio Rao int line) 4294e7f640dSJohn Baldwin { 4304e7f640dSJohn Baldwin GIANT_DECLARE; 4314e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 4324e7f640dSJohn Baldwin volatile struct thread *owner; 4334e7f640dSJohn Baldwin #endif 4344e7f640dSJohn Baldwin uintptr_t x; 4351723a064SJeff Roberson #ifdef LOCK_PROFILING 4361723a064SJeff Roberson uint64_t waittime = 0; 4371723a064SJeff Roberson int contested = 0; 4381723a064SJeff Roberson #endif 4391723a064SJeff Roberson int error = 0; 4404e7f640dSJohn Baldwin 4414e7f640dSJohn Baldwin /* If we already hold an exclusive lock, then recurse. */ 4424e7f640dSJohn Baldwin if (sx_xlocked(sx)) { 443b0d67325SJohn Baldwin KASSERT((sx->lock_object.lo_flags & SX_RECURSE) != 0, 444b0d67325SJohn Baldwin ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", 445b0d67325SJohn Baldwin sx->lock_object.lo_name, file, line)); 4464e7f640dSJohn Baldwin sx->sx_recurse++; 4474e7f640dSJohn Baldwin atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 4484e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 4494e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 450f9819486SAttilio Rao return (0); 4514e7f640dSJohn Baldwin } 4524e7f640dSJohn Baldwin 4534e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 4544e7f640dSJohn Baldwin CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 4554e7f640dSJohn Baldwin sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 4564e7f640dSJohn Baldwin 4574e7f640dSJohn Baldwin while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) { 458eea4f254SJeff Roberson lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 459eea4f254SJeff Roberson &waittime); 4604e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 4614e7f640dSJohn Baldwin /* 4624e7f640dSJohn Baldwin * If the lock is write locked and the owner is 4634e7f640dSJohn Baldwin * running on another CPU, spin until the owner stops 4644e7f640dSJohn Baldwin * running or the state of the lock changes. 4654e7f640dSJohn Baldwin */ 4664e7f640dSJohn Baldwin x = sx->sx_lock; 4674e7f640dSJohn Baldwin if (!(x & SX_LOCK_SHARED) && 4684e7f640dSJohn Baldwin (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) { 4694e7f640dSJohn Baldwin x = SX_OWNER(x); 4704e7f640dSJohn Baldwin owner = (struct thread *)x; 4714e7f640dSJohn Baldwin if (TD_IS_RUNNING(owner)) { 4724e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 4734e7f640dSJohn Baldwin CTR3(KTR_LOCK, 4744e7f640dSJohn Baldwin "%s: spinning on %p held by %p", 4754e7f640dSJohn Baldwin __func__, sx, owner); 4764e7f640dSJohn Baldwin GIANT_SAVE(); 4774e7f640dSJohn Baldwin while (SX_OWNER(sx->sx_lock) == x && 4784e7f640dSJohn Baldwin TD_IS_RUNNING(owner)) 4794e7f640dSJohn Baldwin cpu_spinwait(); 4804e7f640dSJohn Baldwin continue; 4814e7f640dSJohn Baldwin } 4824e7f640dSJohn Baldwin } 4834e7f640dSJohn Baldwin #endif 4844e7f640dSJohn Baldwin 4854e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 4864e7f640dSJohn Baldwin x = sx->sx_lock; 4874e7f640dSJohn Baldwin 4884e7f640dSJohn Baldwin /* 4894e7f640dSJohn Baldwin * If the lock was released while spinning on the 4904e7f640dSJohn Baldwin * sleep queue chain lock, try again. 4914e7f640dSJohn Baldwin */ 4924e7f640dSJohn Baldwin if (x == SX_LOCK_UNLOCKED) { 4934e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 4944e7f640dSJohn Baldwin continue; 4954e7f640dSJohn Baldwin } 4964e7f640dSJohn Baldwin 4974e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 4984e7f640dSJohn Baldwin /* 4994e7f640dSJohn Baldwin * The current lock owner might have started executing 5004e7f640dSJohn Baldwin * on another CPU (or the lock could have changed 5014e7f640dSJohn Baldwin * owners) while we were waiting on the sleep queue 5024e7f640dSJohn Baldwin * chain lock. If so, drop the sleep queue lock and try 5034e7f640dSJohn Baldwin * again. 5044e7f640dSJohn Baldwin */ 5054e7f640dSJohn Baldwin if (!(x & SX_LOCK_SHARED) && 5064e7f640dSJohn Baldwin (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) { 5074e7f640dSJohn Baldwin owner = (struct thread *)SX_OWNER(x); 5084e7f640dSJohn Baldwin if (TD_IS_RUNNING(owner)) { 5094e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 5104e7f640dSJohn Baldwin continue; 5114e7f640dSJohn Baldwin } 5124e7f640dSJohn Baldwin } 5134e7f640dSJohn Baldwin #endif 5144e7f640dSJohn Baldwin 5154e7f640dSJohn Baldwin /* 5164e7f640dSJohn Baldwin * If an exclusive lock was released with both shared 5174e7f640dSJohn Baldwin * and exclusive waiters and a shared waiter hasn't 5184e7f640dSJohn Baldwin * woken up and acquired the lock yet, sx_lock will be 5194e7f640dSJohn Baldwin * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 5204e7f640dSJohn Baldwin * If we see that value, try to acquire it once. Note 5214e7f640dSJohn Baldwin * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 5224e7f640dSJohn Baldwin * as there are other exclusive waiters still. If we 5234e7f640dSJohn Baldwin * fail, restart the loop. 5244e7f640dSJohn Baldwin */ 5254e7f640dSJohn Baldwin if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) { 5264e7f640dSJohn Baldwin if (atomic_cmpset_acq_ptr(&sx->sx_lock, 5274e7f640dSJohn Baldwin SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS, 5284e7f640dSJohn Baldwin tid | SX_LOCK_EXCLUSIVE_WAITERS)) { 5294e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 5304e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p claimed by new writer", 5314e7f640dSJohn Baldwin __func__, sx); 5324e7f640dSJohn Baldwin break; 5334e7f640dSJohn Baldwin } 5344e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 5354e7f640dSJohn Baldwin continue; 5364e7f640dSJohn Baldwin } 5374e7f640dSJohn Baldwin 5384e7f640dSJohn Baldwin /* 5394e7f640dSJohn Baldwin * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 5404e7f640dSJohn Baldwin * than loop back and retry. 5414e7f640dSJohn Baldwin */ 5424e7f640dSJohn Baldwin if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 5434e7f640dSJohn Baldwin if (!atomic_cmpset_ptr(&sx->sx_lock, x, 5444e7f640dSJohn Baldwin x | SX_LOCK_EXCLUSIVE_WAITERS)) { 5454e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 5464e7f640dSJohn Baldwin continue; 5474e7f640dSJohn Baldwin } 5484e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 5494e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 5504e7f640dSJohn Baldwin __func__, sx); 5514e7f640dSJohn Baldwin } 5524e7f640dSJohn Baldwin 5534e7f640dSJohn Baldwin /* 5544e7f640dSJohn Baldwin * Since we have been unable to acquire the exclusive 5554e7f640dSJohn Baldwin * lock and the exclusive waiters flag is set, we have 5564e7f640dSJohn Baldwin * to sleep. 5574e7f640dSJohn Baldwin */ 5584e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 5594e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 5604e7f640dSJohn Baldwin __func__, sx); 5614e7f640dSJohn Baldwin 5624e7f640dSJohn Baldwin GIANT_SAVE(); 5634e7f640dSJohn Baldwin sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 564f9819486SAttilio Rao SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 565f9819486SAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); 566f9819486SAttilio Rao if (!(opts & SX_INTERRUPTIBLE)) 567c5aa6b58SJeff Roberson sleepq_wait(&sx->lock_object, 0); 568f9819486SAttilio Rao else 569c5aa6b58SJeff Roberson error = sleepq_wait_sig(&sx->lock_object, 0); 5704e7f640dSJohn Baldwin 571f9819486SAttilio Rao if (error) { 572f9819486SAttilio Rao if (LOCK_LOG_TEST(&sx->lock_object, 0)) 573f9819486SAttilio Rao CTR2(KTR_LOCK, 574f9819486SAttilio Rao "%s: interruptible sleep by %p suspended by signal", 575f9819486SAttilio Rao __func__, sx); 576f9819486SAttilio Rao break; 577f9819486SAttilio Rao } 5784e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 5794e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 5804e7f640dSJohn Baldwin __func__, sx); 5814e7f640dSJohn Baldwin } 5824e7f640dSJohn Baldwin 5834e7f640dSJohn Baldwin GIANT_RESTORE(); 584f9819486SAttilio Rao if (!error) 585c1a6d9faSAttilio Rao lock_profile_obtain_lock_success(&sx->lock_object, contested, 586c1a6d9faSAttilio Rao waittime, file, line); 587f9819486SAttilio Rao return (error); 5884e7f640dSJohn Baldwin } 5894e7f640dSJohn Baldwin 5904e7f640dSJohn Baldwin /* 5914e7f640dSJohn Baldwin * This function represents the so-called 'hard case' for sx_xunlock 5924e7f640dSJohn Baldwin * operation. All 'easy case' failures are redirected to this. Note 5934e7f640dSJohn Baldwin * that ideally this would be a static function, but it needs to be 5944e7f640dSJohn Baldwin * accessible from at least sx.h. 5954e7f640dSJohn Baldwin */ 5964e7f640dSJohn Baldwin void 5974e7f640dSJohn Baldwin _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) 5984e7f640dSJohn Baldwin { 5994e7f640dSJohn Baldwin uintptr_t x; 600da7bbd2cSJohn Baldwin int queue, wakeup_swapper; 6014e7f640dSJohn Baldwin 6024e7f640dSJohn Baldwin MPASS(!(sx->sx_lock & SX_LOCK_SHARED)); 6034e7f640dSJohn Baldwin 6044e7f640dSJohn Baldwin /* If the lock is recursed, then unrecurse one level. */ 6054e7f640dSJohn Baldwin if (sx_xlocked(sx) && sx_recursed(sx)) { 6064e7f640dSJohn Baldwin if ((--sx->sx_recurse) == 0) 6074e7f640dSJohn Baldwin atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 6084e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 6094e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 6104e7f640dSJohn Baldwin return; 6114e7f640dSJohn Baldwin } 6124e7f640dSJohn Baldwin MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS | 6134e7f640dSJohn Baldwin SX_LOCK_EXCLUSIVE_WAITERS)); 6144e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 6154e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 6164e7f640dSJohn Baldwin 6174e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 6184e7f640dSJohn Baldwin x = SX_LOCK_UNLOCKED; 6194e7f640dSJohn Baldwin 6204e7f640dSJohn Baldwin /* 6214e7f640dSJohn Baldwin * The wake up algorithm here is quite simple and probably not 6224e7f640dSJohn Baldwin * ideal. It gives precedence to shared waiters if they are 6234e7f640dSJohn Baldwin * present. For this condition, we have to preserve the 6244e7f640dSJohn Baldwin * state of the exclusive waiters flag. 6254e7f640dSJohn Baldwin */ 6264e7f640dSJohn Baldwin if (sx->sx_lock & SX_LOCK_SHARED_WAITERS) { 6274e7f640dSJohn Baldwin queue = SQ_SHARED_QUEUE; 6284e7f640dSJohn Baldwin x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS); 6294e7f640dSJohn Baldwin } else 6304e7f640dSJohn Baldwin queue = SQ_EXCLUSIVE_QUEUE; 6314e7f640dSJohn Baldwin 6324e7f640dSJohn Baldwin /* Wake up all the waiters for the specific queue. */ 6334e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 6344e7f640dSJohn Baldwin CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 6354e7f640dSJohn Baldwin __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 6364e7f640dSJohn Baldwin "exclusive"); 6374e7f640dSJohn Baldwin atomic_store_rel_ptr(&sx->sx_lock, x); 638da7bbd2cSJohn Baldwin wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, 639da7bbd2cSJohn Baldwin queue); 640c5aa6b58SJeff Roberson sleepq_release(&sx->lock_object); 641da7bbd2cSJohn Baldwin if (wakeup_swapper) 642da7bbd2cSJohn Baldwin kick_proc0(); 6434e7f640dSJohn Baldwin } 6444e7f640dSJohn Baldwin 6454e7f640dSJohn Baldwin /* 6464e7f640dSJohn Baldwin * This function represents the so-called 'hard case' for sx_slock 6474e7f640dSJohn Baldwin * operation. All 'easy case' failures are redirected to this. Note 6484e7f640dSJohn Baldwin * that ideally this would be a static function, but it needs to be 6494e7f640dSJohn Baldwin * accessible from at least sx.h. 6504e7f640dSJohn Baldwin */ 651f9819486SAttilio Rao int 652f9819486SAttilio Rao _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) 6534e7f640dSJohn Baldwin { 6544e7f640dSJohn Baldwin GIANT_DECLARE; 6554e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 6564e7f640dSJohn Baldwin volatile struct thread *owner; 6574e7f640dSJohn Baldwin #endif 6581723a064SJeff Roberson #ifdef LOCK_PROFILING 659c1a6d9faSAttilio Rao uint64_t waittime = 0; 660c1a6d9faSAttilio Rao int contested = 0; 6611723a064SJeff Roberson #endif 6624e7f640dSJohn Baldwin uintptr_t x; 663c1a6d9faSAttilio Rao int error = 0; 664c1a6d9faSAttilio Rao 6654e7f640dSJohn Baldwin /* 6664e7f640dSJohn Baldwin * As with rwlocks, we don't make any attempt to try to block 6674e7f640dSJohn Baldwin * shared locks once there is an exclusive waiter. 6684e7f640dSJohn Baldwin */ 6694e7f640dSJohn Baldwin for (;;) { 6704e7f640dSJohn Baldwin x = sx->sx_lock; 6714e7f640dSJohn Baldwin 6724e7f640dSJohn Baldwin /* 6734e7f640dSJohn Baldwin * If no other thread has an exclusive lock then try to bump up 6744e7f640dSJohn Baldwin * the count of sharers. Since we have to preserve the state 6754e7f640dSJohn Baldwin * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 6764e7f640dSJohn Baldwin * shared lock loop back and retry. 6774e7f640dSJohn Baldwin */ 6784e7f640dSJohn Baldwin if (x & SX_LOCK_SHARED) { 6794e7f640dSJohn Baldwin MPASS(!(x & SX_LOCK_SHARED_WAITERS)); 6804e7f640dSJohn Baldwin if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, 6814e7f640dSJohn Baldwin x + SX_ONE_SHARER)) { 6824e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 6834e7f640dSJohn Baldwin CTR4(KTR_LOCK, 6844e7f640dSJohn Baldwin "%s: %p succeed %p -> %p", __func__, 6854e7f640dSJohn Baldwin sx, (void *)x, 6864e7f640dSJohn Baldwin (void *)(x + SX_ONE_SHARER)); 6874e7f640dSJohn Baldwin break; 6884e7f640dSJohn Baldwin } 6894e7f640dSJohn Baldwin continue; 6904e7f640dSJohn Baldwin } 691eea4f254SJeff Roberson lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 692eea4f254SJeff Roberson &waittime); 6934e7f640dSJohn Baldwin 6944e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 6954e7f640dSJohn Baldwin /* 6964e7f640dSJohn Baldwin * If the owner is running on another CPU, spin until 6974e7f640dSJohn Baldwin * the owner stops running or the state of the lock 6984e7f640dSJohn Baldwin * changes. 6994e7f640dSJohn Baldwin */ 700eea4f254SJeff Roberson if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) { 7014e7f640dSJohn Baldwin x = SX_OWNER(x); 7024e7f640dSJohn Baldwin owner = (struct thread *)x; 7034e7f640dSJohn Baldwin if (TD_IS_RUNNING(owner)) { 7044e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 7054e7f640dSJohn Baldwin CTR3(KTR_LOCK, 7064e7f640dSJohn Baldwin "%s: spinning on %p held by %p", 7074e7f640dSJohn Baldwin __func__, sx, owner); 7084e7f640dSJohn Baldwin GIANT_SAVE(); 7094e7f640dSJohn Baldwin while (SX_OWNER(sx->sx_lock) == x && 7104e7f640dSJohn Baldwin TD_IS_RUNNING(owner)) 7114e7f640dSJohn Baldwin cpu_spinwait(); 7124e7f640dSJohn Baldwin continue; 7134e7f640dSJohn Baldwin } 7144e7f640dSJohn Baldwin } 7154e7f640dSJohn Baldwin #endif 7164e7f640dSJohn Baldwin 7174e7f640dSJohn Baldwin /* 7184e7f640dSJohn Baldwin * Some other thread already has an exclusive lock, so 7194e7f640dSJohn Baldwin * start the process of blocking. 7204e7f640dSJohn Baldwin */ 7214e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 7224e7f640dSJohn Baldwin x = sx->sx_lock; 7234e7f640dSJohn Baldwin 7244e7f640dSJohn Baldwin /* 7254e7f640dSJohn Baldwin * The lock could have been released while we spun. 7264e7f640dSJohn Baldwin * In this case loop back and retry. 7274e7f640dSJohn Baldwin */ 7284e7f640dSJohn Baldwin if (x & SX_LOCK_SHARED) { 7294e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 7304e7f640dSJohn Baldwin continue; 7314e7f640dSJohn Baldwin } 7324e7f640dSJohn Baldwin 7334e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 7344e7f640dSJohn Baldwin /* 7354e7f640dSJohn Baldwin * If the owner is running on another CPU, spin until 7364e7f640dSJohn Baldwin * the owner stops running or the state of the lock 7374e7f640dSJohn Baldwin * changes. 7384e7f640dSJohn Baldwin */ 7394e7f640dSJohn Baldwin if (!(x & SX_LOCK_SHARED) && 7404e7f640dSJohn Baldwin (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) { 7414e7f640dSJohn Baldwin owner = (struct thread *)SX_OWNER(x); 7424e7f640dSJohn Baldwin if (TD_IS_RUNNING(owner)) { 7434e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 7444e7f640dSJohn Baldwin continue; 7454e7f640dSJohn Baldwin } 7464e7f640dSJohn Baldwin } 7474e7f640dSJohn Baldwin #endif 7484e7f640dSJohn Baldwin 7494e7f640dSJohn Baldwin /* 7504e7f640dSJohn Baldwin * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 7514e7f640dSJohn Baldwin * fail to set it drop the sleep queue lock and loop 7524e7f640dSJohn Baldwin * back. 7534e7f640dSJohn Baldwin */ 7544e7f640dSJohn Baldwin if (!(x & SX_LOCK_SHARED_WAITERS)) { 7554e7f640dSJohn Baldwin if (!atomic_cmpset_ptr(&sx->sx_lock, x, 7564e7f640dSJohn Baldwin x | SX_LOCK_SHARED_WAITERS)) { 7574e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 7584e7f640dSJohn Baldwin continue; 7594e7f640dSJohn Baldwin } 7604e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 7614e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 7624e7f640dSJohn Baldwin __func__, sx); 7634e7f640dSJohn Baldwin } 7644e7f640dSJohn Baldwin 7654e7f640dSJohn Baldwin /* 7664e7f640dSJohn Baldwin * Since we have been unable to acquire the shared lock, 7674e7f640dSJohn Baldwin * we have to sleep. 7684e7f640dSJohn Baldwin */ 7694e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 7704e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 7714e7f640dSJohn Baldwin __func__, sx); 7724e7f640dSJohn Baldwin 7734e7f640dSJohn Baldwin GIANT_SAVE(); 7744e7f640dSJohn Baldwin sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 775f9819486SAttilio Rao SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 776f9819486SAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); 777f9819486SAttilio Rao if (!(opts & SX_INTERRUPTIBLE)) 778c5aa6b58SJeff Roberson sleepq_wait(&sx->lock_object, 0); 779f9819486SAttilio Rao else 780c5aa6b58SJeff Roberson error = sleepq_wait_sig(&sx->lock_object, 0); 7814e7f640dSJohn Baldwin 782f9819486SAttilio Rao if (error) { 783f9819486SAttilio Rao if (LOCK_LOG_TEST(&sx->lock_object, 0)) 784f9819486SAttilio Rao CTR2(KTR_LOCK, 785f9819486SAttilio Rao "%s: interruptible sleep by %p suspended by signal", 786f9819486SAttilio Rao __func__, sx); 787f9819486SAttilio Rao break; 788f9819486SAttilio Rao } 7894e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 7904e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 7914e7f640dSJohn Baldwin __func__, sx); 7924e7f640dSJohn Baldwin } 793eea4f254SJeff Roberson if (error == 0) 794eea4f254SJeff Roberson lock_profile_obtain_lock_success(&sx->lock_object, contested, 795eea4f254SJeff Roberson waittime, file, line); 7964e7f640dSJohn Baldwin 7974e7f640dSJohn Baldwin GIANT_RESTORE(); 798f9819486SAttilio Rao return (error); 7994e7f640dSJohn Baldwin } 8004e7f640dSJohn Baldwin 8014e7f640dSJohn Baldwin /* 8024e7f640dSJohn Baldwin * This function represents the so-called 'hard case' for sx_sunlock 8034e7f640dSJohn Baldwin * operation. All 'easy case' failures are redirected to this. Note 8044e7f640dSJohn Baldwin * that ideally this would be a static function, but it needs to be 8054e7f640dSJohn Baldwin * accessible from at least sx.h. 8064e7f640dSJohn Baldwin */ 8074e7f640dSJohn Baldwin void 8084e7f640dSJohn Baldwin _sx_sunlock_hard(struct sx *sx, const char *file, int line) 8094e7f640dSJohn Baldwin { 8104e7f640dSJohn Baldwin uintptr_t x; 811da7bbd2cSJohn Baldwin int wakeup_swapper; 8124e7f640dSJohn Baldwin 8134e7f640dSJohn Baldwin for (;;) { 8144e7f640dSJohn Baldwin x = sx->sx_lock; 8154e7f640dSJohn Baldwin 8164e7f640dSJohn Baldwin /* 8174e7f640dSJohn Baldwin * We should never have sharers while at least one thread 8184e7f640dSJohn Baldwin * holds a shared lock. 8194e7f640dSJohn Baldwin */ 8204e7f640dSJohn Baldwin KASSERT(!(x & SX_LOCK_SHARED_WAITERS), 8214e7f640dSJohn Baldwin ("%s: waiting sharers", __func__)); 8224e7f640dSJohn Baldwin 8234e7f640dSJohn Baldwin /* 8244e7f640dSJohn Baldwin * See if there is more than one shared lock held. If 8254e7f640dSJohn Baldwin * so, just drop one and return. 8264e7f640dSJohn Baldwin */ 8274e7f640dSJohn Baldwin if (SX_SHARERS(x) > 1) { 8284e7f640dSJohn Baldwin if (atomic_cmpset_ptr(&sx->sx_lock, x, 8294e7f640dSJohn Baldwin x - SX_ONE_SHARER)) { 8304e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 8314e7f640dSJohn Baldwin CTR4(KTR_LOCK, 8324e7f640dSJohn Baldwin "%s: %p succeeded %p -> %p", 8334e7f640dSJohn Baldwin __func__, sx, (void *)x, 8344e7f640dSJohn Baldwin (void *)(x - SX_ONE_SHARER)); 8354e7f640dSJohn Baldwin break; 8364e7f640dSJohn Baldwin } 8374e7f640dSJohn Baldwin continue; 8384e7f640dSJohn Baldwin } 8394e7f640dSJohn Baldwin 8404e7f640dSJohn Baldwin /* 8414e7f640dSJohn Baldwin * If there aren't any waiters for an exclusive lock, 8424e7f640dSJohn Baldwin * then try to drop it quickly. 8434e7f640dSJohn Baldwin */ 8444e7f640dSJohn Baldwin if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 8454e7f640dSJohn Baldwin MPASS(x == SX_SHARERS_LOCK(1)); 8464e7f640dSJohn Baldwin if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1), 8474e7f640dSJohn Baldwin SX_LOCK_UNLOCKED)) { 8484e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 8494e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p last succeeded", 8504e7f640dSJohn Baldwin __func__, sx); 8514e7f640dSJohn Baldwin break; 8524e7f640dSJohn Baldwin } 8534e7f640dSJohn Baldwin continue; 8544e7f640dSJohn Baldwin } 8554e7f640dSJohn Baldwin 8564e7f640dSJohn Baldwin /* 8574e7f640dSJohn Baldwin * At this point, there should just be one sharer with 8584e7f640dSJohn Baldwin * exclusive waiters. 8594e7f640dSJohn Baldwin */ 8604e7f640dSJohn Baldwin MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); 8614e7f640dSJohn Baldwin 8624e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 8634e7f640dSJohn Baldwin 8644e7f640dSJohn Baldwin /* 8654e7f640dSJohn Baldwin * Wake up semantic here is quite simple: 8664e7f640dSJohn Baldwin * Just wake up all the exclusive waiters. 8674e7f640dSJohn Baldwin * Note that the state of the lock could have changed, 8684e7f640dSJohn Baldwin * so if it fails loop back and retry. 8694e7f640dSJohn Baldwin */ 8704e7f640dSJohn Baldwin if (!atomic_cmpset_ptr(&sx->sx_lock, 8714e7f640dSJohn Baldwin SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS, 8724e7f640dSJohn Baldwin SX_LOCK_UNLOCKED)) { 8734e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 8744e7f640dSJohn Baldwin continue; 8754e7f640dSJohn Baldwin } 8764e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 8774e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p waking up all thread on" 8784e7f640dSJohn Baldwin "exclusive queue", __func__, sx); 879da7bbd2cSJohn Baldwin wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 880da7bbd2cSJohn Baldwin 0, SQ_EXCLUSIVE_QUEUE); 881c5aa6b58SJeff Roberson sleepq_release(&sx->lock_object); 882da7bbd2cSJohn Baldwin if (wakeup_swapper) 883da7bbd2cSJohn Baldwin kick_proc0(); 8844e7f640dSJohn Baldwin break; 8854e7f640dSJohn Baldwin } 886d55229b7SJason Evans } 8874e5e677bSJohn Baldwin 8884e5e677bSJohn Baldwin #ifdef INVARIANT_SUPPORT 889781a35dfSJohn Baldwin #ifndef INVARIANTS 890781a35dfSJohn Baldwin #undef _sx_assert 891781a35dfSJohn Baldwin #endif 892781a35dfSJohn Baldwin 8934e5e677bSJohn Baldwin /* 8944e5e677bSJohn Baldwin * In the non-WITNESS case, sx_assert() can only detect that at least 8954e5e677bSJohn Baldwin * *some* thread owns an slock, but it cannot guarantee that *this* 8964e5e677bSJohn Baldwin * thread owns an slock. 8974e5e677bSJohn Baldwin */ 8984e5e677bSJohn Baldwin void 8994e5e677bSJohn Baldwin _sx_assert(struct sx *sx, int what, const char *file, int line) 9004e5e677bSJohn Baldwin { 9014e7f640dSJohn Baldwin #ifndef WITNESS 9024e7f640dSJohn Baldwin int slocked = 0; 9034e7f640dSJohn Baldwin #endif 9044e5e677bSJohn Baldwin 90503129ba9SJohn Baldwin if (panicstr != NULL) 90603129ba9SJohn Baldwin return; 9074e5e677bSJohn Baldwin switch (what) { 9087ec137e5SJohn Baldwin case SA_SLOCKED: 9097ec137e5SJohn Baldwin case SA_SLOCKED | SA_NOTRECURSED: 9107ec137e5SJohn Baldwin case SA_SLOCKED | SA_RECURSED: 9114e7f640dSJohn Baldwin #ifndef WITNESS 9124e7f640dSJohn Baldwin slocked = 1; 9134e7f640dSJohn Baldwin /* FALLTHROUGH */ 9144e7f640dSJohn Baldwin #endif 9157ec137e5SJohn Baldwin case SA_LOCKED: 9167ec137e5SJohn Baldwin case SA_LOCKED | SA_NOTRECURSED: 9177ec137e5SJohn Baldwin case SA_LOCKED | SA_RECURSED: 9184e5e677bSJohn Baldwin #ifdef WITNESS 919aa89d8cdSJohn Baldwin witness_assert(&sx->lock_object, what, file, line); 9204e5e677bSJohn Baldwin #else 9214e7f640dSJohn Baldwin /* 9224e7f640dSJohn Baldwin * If some other thread has an exclusive lock or we 9234e7f640dSJohn Baldwin * have one and are asserting a shared lock, fail. 9244e7f640dSJohn Baldwin * Also, if no one has a lock at all, fail. 9254e7f640dSJohn Baldwin */ 9264e7f640dSJohn Baldwin if (sx->sx_lock == SX_LOCK_UNLOCKED || 9274e7f640dSJohn Baldwin (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || 9284e7f640dSJohn Baldwin sx_xholder(sx) != curthread))) 92903129ba9SJohn Baldwin panic("Lock %s not %slocked @ %s:%d\n", 9304e7f640dSJohn Baldwin sx->lock_object.lo_name, slocked ? "share " : "", 9314e7f640dSJohn Baldwin file, line); 9324e7f640dSJohn Baldwin 9334e7f640dSJohn Baldwin if (!(sx->sx_lock & SX_LOCK_SHARED)) { 9344e7f640dSJohn Baldwin if (sx_recursed(sx)) { 9357ec137e5SJohn Baldwin if (what & SA_NOTRECURSED) 9364e7f640dSJohn Baldwin panic("Lock %s recursed @ %s:%d\n", 9374e7f640dSJohn Baldwin sx->lock_object.lo_name, file, 9384e7f640dSJohn Baldwin line); 9397ec137e5SJohn Baldwin } else if (what & SA_RECURSED) 9404e7f640dSJohn Baldwin panic("Lock %s not recursed @ %s:%d\n", 9414e7f640dSJohn Baldwin sx->lock_object.lo_name, file, line); 9424e7f640dSJohn Baldwin } 9434e5e677bSJohn Baldwin #endif 9444e5e677bSJohn Baldwin break; 9457ec137e5SJohn Baldwin case SA_XLOCKED: 9467ec137e5SJohn Baldwin case SA_XLOCKED | SA_NOTRECURSED: 9477ec137e5SJohn Baldwin case SA_XLOCKED | SA_RECURSED: 9484e7f640dSJohn Baldwin if (sx_xholder(sx) != curthread) 94903129ba9SJohn Baldwin panic("Lock %s not exclusively locked @ %s:%d\n", 950aa89d8cdSJohn Baldwin sx->lock_object.lo_name, file, line); 9514e7f640dSJohn Baldwin if (sx_recursed(sx)) { 9527ec137e5SJohn Baldwin if (what & SA_NOTRECURSED) 9534e7f640dSJohn Baldwin panic("Lock %s recursed @ %s:%d\n", 9544e7f640dSJohn Baldwin sx->lock_object.lo_name, file, line); 9557ec137e5SJohn Baldwin } else if (what & SA_RECURSED) 9564e7f640dSJohn Baldwin panic("Lock %s not recursed @ %s:%d\n", 9574e7f640dSJohn Baldwin sx->lock_object.lo_name, file, line); 9584e5e677bSJohn Baldwin break; 9597ec137e5SJohn Baldwin case SA_UNLOCKED: 96019b0efd3SPawel Jakub Dawidek #ifdef WITNESS 961aa89d8cdSJohn Baldwin witness_assert(&sx->lock_object, what, file, line); 96219b0efd3SPawel Jakub Dawidek #else 963f6739b1dSPawel Jakub Dawidek /* 9644e7f640dSJohn Baldwin * If we hold an exclusve lock fail. We can't 9654e7f640dSJohn Baldwin * reliably check to see if we hold a shared lock or 9664e7f640dSJohn Baldwin * not. 967f6739b1dSPawel Jakub Dawidek */ 9684e7f640dSJohn Baldwin if (sx_xholder(sx) == curthread) 96903129ba9SJohn Baldwin panic("Lock %s exclusively locked @ %s:%d\n", 970aa89d8cdSJohn Baldwin sx->lock_object.lo_name, file, line); 97119b0efd3SPawel Jakub Dawidek #endif 97219b0efd3SPawel Jakub Dawidek break; 9734e5e677bSJohn Baldwin default: 9744e5e677bSJohn Baldwin panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 9754e5e677bSJohn Baldwin line); 9764e5e677bSJohn Baldwin } 9774e5e677bSJohn Baldwin } 9784e5e677bSJohn Baldwin #endif /* INVARIANT_SUPPORT */ 979d272fe53SJohn Baldwin 980d272fe53SJohn Baldwin #ifdef DDB 9814e7f640dSJohn Baldwin static void 982d272fe53SJohn Baldwin db_show_sx(struct lock_object *lock) 983d272fe53SJohn Baldwin { 984d272fe53SJohn Baldwin struct thread *td; 985d272fe53SJohn Baldwin struct sx *sx; 986d272fe53SJohn Baldwin 987d272fe53SJohn Baldwin sx = (struct sx *)lock; 988d272fe53SJohn Baldwin 989d272fe53SJohn Baldwin db_printf(" state: "); 9904e7f640dSJohn Baldwin if (sx->sx_lock == SX_LOCK_UNLOCKED) 9914e7f640dSJohn Baldwin db_printf("UNLOCKED\n"); 9920026c92cSJohn Baldwin else if (sx->sx_lock == SX_LOCK_DESTROYED) { 9930026c92cSJohn Baldwin db_printf("DESTROYED\n"); 9940026c92cSJohn Baldwin return; 9950026c92cSJohn Baldwin } else if (sx->sx_lock & SX_LOCK_SHARED) 9964e7f640dSJohn Baldwin db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); 9974e7f640dSJohn Baldwin else { 9984e7f640dSJohn Baldwin td = sx_xholder(sx); 999d272fe53SJohn Baldwin db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1000431f8906SJulian Elischer td->td_tid, td->td_proc->p_pid, td->td_name); 10014e7f640dSJohn Baldwin if (sx_recursed(sx)) 10024e7f640dSJohn Baldwin db_printf(" recursed: %d\n", sx->sx_recurse); 10034e7f640dSJohn Baldwin } 10044e7f640dSJohn Baldwin 10054e7f640dSJohn Baldwin db_printf(" waiters: "); 10064e7f640dSJohn Baldwin switch(sx->sx_lock & 10074e7f640dSJohn Baldwin (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { 10084e7f640dSJohn Baldwin case SX_LOCK_SHARED_WAITERS: 10094e7f640dSJohn Baldwin db_printf("shared\n"); 10104e7f640dSJohn Baldwin break; 10114e7f640dSJohn Baldwin case SX_LOCK_EXCLUSIVE_WAITERS: 10124e7f640dSJohn Baldwin db_printf("exclusive\n"); 10134e7f640dSJohn Baldwin break; 10144e7f640dSJohn Baldwin case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: 10154e7f640dSJohn Baldwin db_printf("exclusive and shared\n"); 10164e7f640dSJohn Baldwin break; 10174e7f640dSJohn Baldwin default: 10184e7f640dSJohn Baldwin db_printf("none\n"); 10194e7f640dSJohn Baldwin } 1020d272fe53SJohn Baldwin } 1021462a7addSJohn Baldwin 1022462a7addSJohn Baldwin /* 1023462a7addSJohn Baldwin * Check to see if a thread that is blocked on a sleep queue is actually 1024462a7addSJohn Baldwin * blocked on an sx lock. If so, output some details and return true. 1025462a7addSJohn Baldwin * If the lock has an exclusive owner, return that in *ownerp. 1026462a7addSJohn Baldwin */ 1027462a7addSJohn Baldwin int 1028462a7addSJohn Baldwin sx_chain(struct thread *td, struct thread **ownerp) 1029462a7addSJohn Baldwin { 1030462a7addSJohn Baldwin struct sx *sx; 1031462a7addSJohn Baldwin 1032462a7addSJohn Baldwin /* 10334e7f640dSJohn Baldwin * Check to see if this thread is blocked on an sx lock. 10344e7f640dSJohn Baldwin * First, we check the lock class. If that is ok, then we 10354e7f640dSJohn Baldwin * compare the lock name against the wait message. 1036462a7addSJohn Baldwin */ 10374e7f640dSJohn Baldwin sx = td->td_wchan; 10384e7f640dSJohn Baldwin if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || 10394e7f640dSJohn Baldwin sx->lock_object.lo_name != td->td_wmesg) 1040462a7addSJohn Baldwin return (0); 1041462a7addSJohn Baldwin 1042462a7addSJohn Baldwin /* We think we have an sx lock, so output some details. */ 1043462a7addSJohn Baldwin db_printf("blocked on sx \"%s\" ", td->td_wmesg); 10444e7f640dSJohn Baldwin *ownerp = sx_xholder(sx); 10454e7f640dSJohn Baldwin if (sx->sx_lock & SX_LOCK_SHARED) 10464e7f640dSJohn Baldwin db_printf("SLOCK (count %ju)\n", 10474e7f640dSJohn Baldwin (uintmax_t)SX_SHARERS(sx->sx_lock)); 10484e7f640dSJohn Baldwin else 1049462a7addSJohn Baldwin db_printf("XLOCK\n"); 1050462a7addSJohn Baldwin return (1); 1051462a7addSJohn Baldwin } 1052d272fe53SJohn Baldwin #endif 1053