19454b2d8SWarner Losh /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 44e7f640dSJohn Baldwin * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 54e7f640dSJohn Baldwin * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 64e7f640dSJohn Baldwin * All rights reserved. 76281b30aSJason Evans * 86281b30aSJason Evans * Redistribution and use in source and binary forms, with or without 96281b30aSJason Evans * modification, are permitted provided that the following conditions 106281b30aSJason Evans * are met: 116281b30aSJason Evans * 1. Redistributions of source code must retain the above copyright 126281b30aSJason Evans * notice(s), this list of conditions and the following disclaimer as 136281b30aSJason Evans * the first lines of this file unmodified other than the possible 146281b30aSJason Evans * addition of one or more copyright notices. 156281b30aSJason Evans * 2. Redistributions in binary form must reproduce the above copyright 166281b30aSJason Evans * notice(s), this list of conditions and the following disclaimer in the 176281b30aSJason Evans * documentation and/or other materials provided with the distribution. 186281b30aSJason Evans * 196281b30aSJason Evans * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 206281b30aSJason Evans * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 216281b30aSJason Evans * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 226281b30aSJason Evans * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 236281b30aSJason Evans * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 246281b30aSJason Evans * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 256281b30aSJason Evans * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 266281b30aSJason Evans * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 276281b30aSJason Evans * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 286281b30aSJason Evans * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 296281b30aSJason Evans * DAMAGE. 306281b30aSJason Evans */ 316281b30aSJason Evans 326281b30aSJason Evans /* 334e7f640dSJohn Baldwin * Shared/exclusive locks. This implementation attempts to ensure 344e7f640dSJohn Baldwin * deterministic lock granting behavior, so that slocks and xlocks are 354e7f640dSJohn Baldwin * interleaved. 366281b30aSJason Evans * 376281b30aSJason Evans * Priority propagation will not generally raise the priority of lock holders, 386281b30aSJason Evans * so should not be relied upon in combination with sx locks. 396281b30aSJason Evans */ 406281b30aSJason Evans 414e7f640dSJohn Baldwin #include "opt_ddb.h" 42f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h" 43e31d0833SAttilio Rao #include "opt_no_adaptive_sx.h" 444e7f640dSJohn Baldwin 45677b542eSDavid E. O'Brien #include <sys/cdefs.h> 46677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 47677b542eSDavid E. O'Brien 486281b30aSJason Evans #include <sys/param.h> 497a7ce668SAndriy Gapon #include <sys/systm.h> 50cd2fe4e6SAttilio Rao #include <sys/kdb.h> 510453ade5SMateusz Guzik #include <sys/kernel.h> 526281b30aSJason Evans #include <sys/ktr.h> 5319284646SJohn Baldwin #include <sys/lock.h> 546281b30aSJason Evans #include <sys/mutex.h> 55d272fe53SJohn Baldwin #include <sys/proc.h> 562cba8dd3SJohn Baldwin #include <sys/sched.h> 574e7f640dSJohn Baldwin #include <sys/sleepqueue.h> 586281b30aSJason Evans #include <sys/sx.h> 591ada9041SMateusz Guzik #include <sys/smp.h> 60e31d0833SAttilio Rao #include <sys/sysctl.h> 614e7f640dSJohn Baldwin 62e31d0833SAttilio Rao #if defined(SMP) && !defined(NO_ADAPTIVE_SX) 634e7f640dSJohn Baldwin #include <machine/cpu.h> 644e7f640dSJohn Baldwin #endif 656281b30aSJason Evans 66462a7addSJohn Baldwin #ifdef DDB 67d272fe53SJohn Baldwin #include <ddb/ddb.h> 684e7f640dSJohn Baldwin #endif 69d272fe53SJohn Baldwin 701ae1c2a3SAttilio Rao #if defined(SMP) && !defined(NO_ADAPTIVE_SX) 711ae1c2a3SAttilio Rao #define ADAPTIVE_SX 724e7f640dSJohn Baldwin #endif 734e7f640dSJohn Baldwin 74f0830182SAttilio Rao CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE); 75c1a6d9faSAttilio Rao 76f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 77f5f9340bSFabien Thomas #include <sys/pmckern.h> 78f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed); 79f5f9340bSFabien Thomas #endif 80f5f9340bSFabien Thomas 814e7f640dSJohn Baldwin /* Handy macros for sleep queues. */ 824e7f640dSJohn Baldwin #define SQ_EXCLUSIVE_QUEUE 0 834e7f640dSJohn Baldwin #define SQ_SHARED_QUEUE 1 844e7f640dSJohn Baldwin 854e7f640dSJohn Baldwin /* 864e7f640dSJohn Baldwin * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 874e7f640dSJohn Baldwin * drop Giant anytime we have to sleep or if we adaptively spin. 884e7f640dSJohn Baldwin */ 894e7f640dSJohn Baldwin #define GIANT_DECLARE \ 904e7f640dSJohn Baldwin int _giantcnt = 0; \ 914e7f640dSJohn Baldwin WITNESS_SAVE_DECL(Giant) \ 924e7f640dSJohn Baldwin 93e41d6166SMateusz Guzik #define GIANT_SAVE(work) do { \ 94fb106123SMateusz Guzik if (__predict_false(mtx_owned(&Giant))) { \ 95e41d6166SMateusz Guzik work++; \ 964e7f640dSJohn Baldwin WITNESS_SAVE(&Giant.lock_object, Giant); \ 974e7f640dSJohn Baldwin while (mtx_owned(&Giant)) { \ 984e7f640dSJohn Baldwin _giantcnt++; \ 994e7f640dSJohn Baldwin mtx_unlock(&Giant); \ 1004e7f640dSJohn Baldwin } \ 1014e7f640dSJohn Baldwin } \ 1024e7f640dSJohn Baldwin } while (0) 1034e7f640dSJohn Baldwin 1044e7f640dSJohn Baldwin #define GIANT_RESTORE() do { \ 1054e7f640dSJohn Baldwin if (_giantcnt > 0) { \ 1064e7f640dSJohn Baldwin mtx_assert(&Giant, MA_NOTOWNED); \ 1074e7f640dSJohn Baldwin while (_giantcnt--) \ 1084e7f640dSJohn Baldwin mtx_lock(&Giant); \ 1094e7f640dSJohn Baldwin WITNESS_RESTORE(&Giant.lock_object, Giant); \ 1104e7f640dSJohn Baldwin } \ 1114e7f640dSJohn Baldwin } while (0) 1124e7f640dSJohn Baldwin 1134e7f640dSJohn Baldwin /* 114da7d0d1eSJohn Baldwin * Returns true if an exclusive lock is recursed. It assumes 115da7d0d1eSJohn Baldwin * curthread currently has an exclusive lock. 1164e7f640dSJohn Baldwin */ 1174e7f640dSJohn Baldwin #define sx_recursed(sx) ((sx)->sx_recurse != 0) 1184e7f640dSJohn Baldwin 119d576deedSPawel Jakub Dawidek static void assert_sx(const struct lock_object *lock, int what); 1204e7f640dSJohn Baldwin #ifdef DDB 121d576deedSPawel Jakub Dawidek static void db_show_sx(const struct lock_object *lock); 122d272fe53SJohn Baldwin #endif 1237faf4d90SDavide Italiano static void lock_sx(struct lock_object *lock, uintptr_t how); 124a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 125d576deedSPawel Jakub Dawidek static int owner_sx(const struct lock_object *lock, struct thread **owner); 126a5aedd68SStacey Son #endif 1277faf4d90SDavide Italiano static uintptr_t unlock_sx(struct lock_object *lock); 128d272fe53SJohn Baldwin 12919284646SJohn Baldwin struct lock_class lock_class_sx = { 130ae8dde30SJohn Baldwin .lc_name = "sx", 131ae8dde30SJohn Baldwin .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 132f9721b43SAttilio Rao .lc_assert = assert_sx, 133d272fe53SJohn Baldwin #ifdef DDB 134ae8dde30SJohn Baldwin .lc_ddb_show = db_show_sx, 135d272fe53SJohn Baldwin #endif 1366e21afd4SJohn Baldwin .lc_lock = lock_sx, 1376e21afd4SJohn Baldwin .lc_unlock = unlock_sx, 138a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 139a5aedd68SStacey Son .lc_owner = owner_sx, 140a5aedd68SStacey Son #endif 14119284646SJohn Baldwin }; 14219284646SJohn Baldwin 143781a35dfSJohn Baldwin #ifndef INVARIANTS 144781a35dfSJohn Baldwin #define _sx_assert(sx, what, file, line) 145781a35dfSJohn Baldwin #endif 146781a35dfSJohn Baldwin 1471ae1c2a3SAttilio Rao #ifdef ADAPTIVE_SX 148e0e259a8SMateusz Guzik static __read_frequently u_int asx_retries; 149e0e259a8SMateusz Guzik static __read_frequently u_int asx_loops; 1506472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging"); 151fbbb13f9SMatthew D Fleming SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); 152fbbb13f9SMatthew D Fleming SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); 1531ada9041SMateusz Guzik 154574adb65SMateusz Guzik static struct lock_delay_config __read_frequently sx_delay; 1551ada9041SMateusz Guzik 1568e5a3e9aSMateusz Guzik SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base, 1571ada9041SMateusz Guzik 0, ""); 1581ada9041SMateusz Guzik SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max, 1591ada9041SMateusz Guzik 0, ""); 1601ada9041SMateusz Guzik 161e0e259a8SMateusz Guzik static void 162e0e259a8SMateusz Guzik sx_lock_delay_init(void *arg __unused) 163e0e259a8SMateusz Guzik { 164e0e259a8SMateusz Guzik 165e0e259a8SMateusz Guzik lock_delay_default_init(&sx_delay); 166e0e259a8SMateusz Guzik asx_retries = 10; 167e0e259a8SMateusz Guzik asx_loops = max(10000, sx_delay.max); 168e0e259a8SMateusz Guzik } 169e0e259a8SMateusz Guzik LOCK_DELAY_SYSINIT(sx_lock_delay_init); 1701ae1c2a3SAttilio Rao #endif 1711ae1c2a3SAttilio Rao 1726281b30aSJason Evans void 173d576deedSPawel Jakub Dawidek assert_sx(const struct lock_object *lock, int what) 174f9721b43SAttilio Rao { 175f9721b43SAttilio Rao 176d576deedSPawel Jakub Dawidek sx_assert((const struct sx *)lock, what); 177f9721b43SAttilio Rao } 178f9721b43SAttilio Rao 179f9721b43SAttilio Rao void 1807faf4d90SDavide Italiano lock_sx(struct lock_object *lock, uintptr_t how) 1816e21afd4SJohn Baldwin { 1826e21afd4SJohn Baldwin struct sx *sx; 1836e21afd4SJohn Baldwin 1846e21afd4SJohn Baldwin sx = (struct sx *)lock; 1856e21afd4SJohn Baldwin if (how) 1866e21afd4SJohn Baldwin sx_slock(sx); 187cf6b879fSDavide Italiano else 188cf6b879fSDavide Italiano sx_xlock(sx); 1896e21afd4SJohn Baldwin } 1906e21afd4SJohn Baldwin 1917faf4d90SDavide Italiano uintptr_t 1926e21afd4SJohn Baldwin unlock_sx(struct lock_object *lock) 1936e21afd4SJohn Baldwin { 1946e21afd4SJohn Baldwin struct sx *sx; 1956e21afd4SJohn Baldwin 1966e21afd4SJohn Baldwin sx = (struct sx *)lock; 1977ec137e5SJohn Baldwin sx_assert(sx, SA_LOCKED | SA_NOTRECURSED); 1986e21afd4SJohn Baldwin if (sx_xlocked(sx)) { 1996e21afd4SJohn Baldwin sx_xunlock(sx); 200cf6b879fSDavide Italiano return (0); 2016e21afd4SJohn Baldwin } else { 2026e21afd4SJohn Baldwin sx_sunlock(sx); 203cf6b879fSDavide Italiano return (1); 2046e21afd4SJohn Baldwin } 2056e21afd4SJohn Baldwin } 2066e21afd4SJohn Baldwin 207a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 208a5aedd68SStacey Son int 209d576deedSPawel Jakub Dawidek owner_sx(const struct lock_object *lock, struct thread **owner) 210a5aedd68SStacey Son { 211c365a293SMark Johnston const struct sx *sx; 212c365a293SMark Johnston uintptr_t x; 213a5aedd68SStacey Son 214c365a293SMark Johnston sx = (const struct sx *)lock; 215c365a293SMark Johnston x = sx->sx_lock; 216c365a293SMark Johnston *owner = NULL; 217a5aedd68SStacey Son return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) : 218c365a293SMark Johnston ((*owner = (struct thread *)SX_OWNER(x)) != NULL)); 219a5aedd68SStacey Son } 220a5aedd68SStacey Son #endif 221a5aedd68SStacey Son 2226e21afd4SJohn Baldwin void 223c27b5699SAndrew R. Reiter sx_sysinit(void *arg) 224c27b5699SAndrew R. Reiter { 225c27b5699SAndrew R. Reiter struct sx_args *sargs = arg; 226c27b5699SAndrew R. Reiter 227e4cd31ddSJeff Roberson sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags); 228c27b5699SAndrew R. Reiter } 229c27b5699SAndrew R. Reiter 230c27b5699SAndrew R. Reiter void 2314e7f640dSJohn Baldwin sx_init_flags(struct sx *sx, const char *description, int opts) 2326281b30aSJason Evans { 2334e7f640dSJohn Baldwin int flags; 2346281b30aSJason Evans 235b0d67325SJohn Baldwin MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | 236fd07ddcfSDmitry Chagin SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0); 237353998acSAttilio Rao ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, 238353998acSAttilio Rao ("%s: sx_lock not aligned for %s: %p", __func__, description, 239353998acSAttilio Rao &sx->sx_lock)); 240b0d67325SJohn Baldwin 241f0830182SAttilio Rao flags = LO_SLEEPABLE | LO_UPGRADABLE; 2424e7f640dSJohn Baldwin if (opts & SX_DUPOK) 2434e7f640dSJohn Baldwin flags |= LO_DUPOK; 2444e7f640dSJohn Baldwin if (opts & SX_NOPROFILE) 2454e7f640dSJohn Baldwin flags |= LO_NOPROFILE; 2464e7f640dSJohn Baldwin if (!(opts & SX_NOWITNESS)) 2474e7f640dSJohn Baldwin flags |= LO_WITNESS; 248f0830182SAttilio Rao if (opts & SX_RECURSE) 249f0830182SAttilio Rao flags |= LO_RECURSABLE; 2504e7f640dSJohn Baldwin if (opts & SX_QUIET) 2514e7f640dSJohn Baldwin flags |= LO_QUIET; 252fd07ddcfSDmitry Chagin if (opts & SX_NEW) 253fd07ddcfSDmitry Chagin flags |= LO_NEW; 2544e7f640dSJohn Baldwin 255f0830182SAttilio Rao flags |= opts & SX_NOADAPTIVE; 256b5fb43e5SJohn Baldwin lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); 2574e7f640dSJohn Baldwin sx->sx_lock = SX_LOCK_UNLOCKED; 2584e7f640dSJohn Baldwin sx->sx_recurse = 0; 2596281b30aSJason Evans } 2606281b30aSJason Evans 2616281b30aSJason Evans void 2626281b30aSJason Evans sx_destroy(struct sx *sx) 2636281b30aSJason Evans { 2646281b30aSJason Evans 2654e7f640dSJohn Baldwin KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 2664e7f640dSJohn Baldwin KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); 2670026c92cSJohn Baldwin sx->sx_lock = SX_LOCK_DESTROYED; 268aa89d8cdSJohn Baldwin lock_destroy(&sx->lock_object); 2696281b30aSJason Evans } 2706281b30aSJason Evans 271f9819486SAttilio Rao int 272013c0b49SMateusz Guzik sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 2735f36700aSJohn Baldwin { 2744e7f640dSJohn Baldwin uintptr_t x; 2755f36700aSJohn Baldwin 27635370593SAndriy Gapon if (SCHEDULER_STOPPED()) 27735370593SAndriy Gapon return (1); 27835370593SAndriy Gapon 279cd2fe4e6SAttilio Rao KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 280e3ae0dfeSAttilio Rao ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", 281e3ae0dfeSAttilio Rao curthread, sx->lock_object.lo_name, file, line)); 282e3ae0dfeSAttilio Rao 2834e7f640dSJohn Baldwin x = sx->sx_lock; 2845c5df0d9SMateusz Guzik for (;;) { 2850026c92cSJohn Baldwin KASSERT(x != SX_LOCK_DESTROYED, 2860026c92cSJohn Baldwin ("sx_try_slock() of destroyed sx @ %s:%d", file, line)); 287764a938bSPawel Jakub Dawidek if (!(x & SX_LOCK_SHARED)) 288764a938bSPawel Jakub Dawidek break; 2895c5df0d9SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) { 290aa89d8cdSJohn Baldwin LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); 291aa89d8cdSJohn Baldwin WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); 292de2c95ccSMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 293de2c95ccSMark Johnston sx, 0, 0, file, line, LOCKSTAT_READER); 294ce1c953eSMark Johnston TD_LOCKS_INC(curthread); 2952466d12bSMateusz Guzik curthread->td_sx_slocks++; 2965f36700aSJohn Baldwin return (1); 2975f36700aSJohn Baldwin } 298764a938bSPawel Jakub Dawidek } 2994e7f640dSJohn Baldwin 3004e7f640dSJohn Baldwin LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 3014e7f640dSJohn Baldwin return (0); 3025f36700aSJohn Baldwin } 3035f36700aSJohn Baldwin 304f9819486SAttilio Rao int 305013c0b49SMateusz Guzik sx_try_slock_(struct sx *sx, const char *file, int line) 306013c0b49SMateusz Guzik { 307013c0b49SMateusz Guzik 308013c0b49SMateusz Guzik return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG)); 309013c0b49SMateusz Guzik } 310013c0b49SMateusz Guzik 311013c0b49SMateusz Guzik int 312f9819486SAttilio Rao _sx_xlock(struct sx *sx, int opts, const char *file, int line) 3136281b30aSJason Evans { 3146ebb77b6SMateusz Guzik uintptr_t tid, x; 315f9819486SAttilio Rao int error = 0; 3166281b30aSJason Evans 317704cb42fSMark Johnston KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 318704cb42fSMark Johnston !TD_IS_IDLETHREAD(curthread), 319e3ae0dfeSAttilio Rao ("sx_xlock() by idle thread %p on sx %s @ %s:%d", 320e3ae0dfeSAttilio Rao curthread, sx->lock_object.lo_name, file, line)); 3210026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 3220026c92cSJohn Baldwin ("sx_xlock() of destroyed sx @ %s:%d", file, line)); 323aa89d8cdSJohn Baldwin WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 32441313430SJohn Baldwin line, NULL); 3256ebb77b6SMateusz Guzik tid = (uintptr_t)curthread; 3266ebb77b6SMateusz Guzik x = SX_LOCK_UNLOCKED; 3276ebb77b6SMateusz Guzik if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 328013c0b49SMateusz Guzik error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG); 3296ebb77b6SMateusz Guzik else 3306ebb77b6SMateusz Guzik LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 3316ebb77b6SMateusz Guzik 0, 0, file, line, LOCKSTAT_WRITER); 332f9819486SAttilio Rao if (!error) { 333f9819486SAttilio Rao LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, 334f9819486SAttilio Rao file, line); 335aa89d8cdSJohn Baldwin WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 336ce1c953eSMark Johnston TD_LOCKS_INC(curthread); 3376281b30aSJason Evans } 3386281b30aSJason Evans 339f9819486SAttilio Rao return (error); 340f9819486SAttilio Rao } 341f9819486SAttilio Rao 3425f36700aSJohn Baldwin int 343013c0b49SMateusz Guzik sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 3445f36700aSJohn Baldwin { 3455c5df0d9SMateusz Guzik struct thread *td; 3465c5df0d9SMateusz Guzik uintptr_t tid, x; 3474e7f640dSJohn Baldwin int rval; 3485c5df0d9SMateusz Guzik bool recursed; 3495f36700aSJohn Baldwin 3505c5df0d9SMateusz Guzik td = curthread; 3515c5df0d9SMateusz Guzik tid = (uintptr_t)td; 3525c5df0d9SMateusz Guzik if (SCHEDULER_STOPPED_TD(td)) 35335370593SAndriy Gapon return (1); 35435370593SAndriy Gapon 355704cb42fSMark Johnston KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), 356e3ae0dfeSAttilio Rao ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d", 357e3ae0dfeSAttilio Rao curthread, sx->lock_object.lo_name, file, line)); 3580026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 3590026c92cSJohn Baldwin ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); 3604e7f640dSJohn Baldwin 3615c5df0d9SMateusz Guzik rval = 1; 3625c5df0d9SMateusz Guzik recursed = false; 3635c5df0d9SMateusz Guzik x = SX_LOCK_UNLOCKED; 364b247fd39SMateusz Guzik for (;;) { 365b247fd39SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 366b247fd39SMateusz Guzik break; 367b247fd39SMateusz Guzik if (x == SX_LOCK_UNLOCKED) 368b247fd39SMateusz Guzik continue; 3695c5df0d9SMateusz Guzik if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) { 3704e7f640dSJohn Baldwin sx->sx_recurse++; 3714e7f640dSJohn Baldwin atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 372b247fd39SMateusz Guzik break; 3735c5df0d9SMateusz Guzik } 374b247fd39SMateusz Guzik rval = 0; 375b247fd39SMateusz Guzik break; 3765c5df0d9SMateusz Guzik } 3775c5df0d9SMateusz Guzik 3784e7f640dSJohn Baldwin LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); 3794e7f640dSJohn Baldwin if (rval) { 3804e7f640dSJohn Baldwin WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 3814e7f640dSJohn Baldwin file, line); 3825c5df0d9SMateusz Guzik if (!recursed) 383de2c95ccSMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 384de2c95ccSMark Johnston sx, 0, 0, file, line, LOCKSTAT_WRITER); 385ce1c953eSMark Johnston TD_LOCKS_INC(curthread); 3865f36700aSJohn Baldwin } 3874e7f640dSJohn Baldwin 3884e7f640dSJohn Baldwin return (rval); 3895f36700aSJohn Baldwin } 3905f36700aSJohn Baldwin 391013c0b49SMateusz Guzik int 392013c0b49SMateusz Guzik sx_try_xlock_(struct sx *sx, const char *file, int line) 393013c0b49SMateusz Guzik { 394013c0b49SMateusz Guzik 395013c0b49SMateusz Guzik return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG)); 396013c0b49SMateusz Guzik } 397013c0b49SMateusz Guzik 3986281b30aSJason Evans void 39919284646SJohn Baldwin _sx_xunlock(struct sx *sx, const char *file, int line) 4006281b30aSJason Evans { 4016281b30aSJason Evans 4020026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 4030026c92cSJohn Baldwin ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); 4047ec137e5SJohn Baldwin _sx_assert(sx, SA_XLOCKED, file, line); 405aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 4064e7f640dSJohn Baldwin LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, 4074e7f640dSJohn Baldwin line); 4080108a980SMateusz Guzik #if LOCK_DEBUG > 0 4096ebb77b6SMateusz Guzik _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line); 410ffd5c94cSMateusz Guzik #else 411ffd5c94cSMateusz Guzik __sx_xunlock(sx, curthread, file, line); 412ffd5c94cSMateusz Guzik #endif 413ce1c953eSMark Johnston TD_LOCKS_DEC(curthread); 4146281b30aSJason Evans } 415d55229b7SJason Evans 4164e7f640dSJohn Baldwin /* 4174e7f640dSJohn Baldwin * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 4184e7f640dSJohn Baldwin * This will only succeed if this thread holds a single shared lock. 4194e7f640dSJohn Baldwin * Return 1 if if the upgrade succeed, 0 otherwise. 4204e7f640dSJohn Baldwin */ 421d55229b7SJason Evans int 422013c0b49SMateusz Guzik sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 423d55229b7SJason Evans { 4244e7f640dSJohn Baldwin uintptr_t x; 425a8e747c5SMateusz Guzik uintptr_t waiters; 4264e7f640dSJohn Baldwin int success; 427d55229b7SJason Evans 42835370593SAndriy Gapon if (SCHEDULER_STOPPED()) 42935370593SAndriy Gapon return (1); 43035370593SAndriy Gapon 4310026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 4320026c92cSJohn Baldwin ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line)); 4337ec137e5SJohn Baldwin _sx_assert(sx, SA_SLOCKED, file, line); 434d55229b7SJason Evans 4354e7f640dSJohn Baldwin /* 4364e7f640dSJohn Baldwin * Try to switch from one shared lock to an exclusive lock. We need 4374e7f640dSJohn Baldwin * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that 4384e7f640dSJohn Baldwin * we will wake up the exclusive waiters when we drop the lock. 4394e7f640dSJohn Baldwin */ 440a8e747c5SMateusz Guzik success = 0; 441a8e747c5SMateusz Guzik x = SX_READ_VALUE(sx); 442a8e747c5SMateusz Guzik for (;;) { 443a8e747c5SMateusz Guzik if (SX_SHARERS(x) > 1) 444a8e747c5SMateusz Guzik break; 4452466d12bSMateusz Guzik waiters = (x & SX_LOCK_WAITERS); 446a8e747c5SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, 447a8e747c5SMateusz Guzik (uintptr_t)curthread | waiters)) { 448a8e747c5SMateusz Guzik success = 1; 449a8e747c5SMateusz Guzik break; 450a8e747c5SMateusz Guzik } 451a8e747c5SMateusz Guzik } 4524e7f640dSJohn Baldwin LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); 453a5aedd68SStacey Son if (success) { 4542466d12bSMateusz Guzik curthread->td_sx_slocks--; 455aa89d8cdSJohn Baldwin WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 456b0b7cb50SJohn Baldwin file, line); 45732cd0147SMark Johnston LOCKSTAT_RECORD0(sx__upgrade, sx); 458a5aedd68SStacey Son } 4594e7f640dSJohn Baldwin return (success); 460d55229b7SJason Evans } 461d55229b7SJason Evans 462013c0b49SMateusz Guzik int 463013c0b49SMateusz Guzik sx_try_upgrade_(struct sx *sx, const char *file, int line) 464013c0b49SMateusz Guzik { 465013c0b49SMateusz Guzik 466013c0b49SMateusz Guzik return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG)); 467013c0b49SMateusz Guzik } 468013c0b49SMateusz Guzik 4694e7f640dSJohn Baldwin /* 4704e7f640dSJohn Baldwin * Downgrade an unrecursed exclusive lock into a single shared lock. 4714e7f640dSJohn Baldwin */ 472d55229b7SJason Evans void 473013c0b49SMateusz Guzik sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 474d55229b7SJason Evans { 4754e7f640dSJohn Baldwin uintptr_t x; 476da7bbd2cSJohn Baldwin int wakeup_swapper; 477d55229b7SJason Evans 47835370593SAndriy Gapon if (SCHEDULER_STOPPED()) 47935370593SAndriy Gapon return; 48035370593SAndriy Gapon 4810026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 4820026c92cSJohn Baldwin ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); 4837ec137e5SJohn Baldwin _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); 4844e7f640dSJohn Baldwin #ifndef INVARIANTS 4854e7f640dSJohn Baldwin if (sx_recursed(sx)) 4864e7f640dSJohn Baldwin panic("downgrade of a recursed lock"); 4874e7f640dSJohn Baldwin #endif 488d55229b7SJason Evans 489aa89d8cdSJohn Baldwin WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); 490d55229b7SJason Evans 4914e7f640dSJohn Baldwin /* 4924e7f640dSJohn Baldwin * Try to switch from an exclusive lock with no shared waiters 4934e7f640dSJohn Baldwin * to one sharer with no shared waiters. If there are 4944e7f640dSJohn Baldwin * exclusive waiters, we don't need to lock the sleep queue so 4954e7f640dSJohn Baldwin * long as we preserve the flag. We do one quick try and if 4964e7f640dSJohn Baldwin * that fails we grab the sleepq lock to keep the flags from 4974e7f640dSJohn Baldwin * changing and do it the slow way. 4984e7f640dSJohn Baldwin * 4994e7f640dSJohn Baldwin * We have to lock the sleep queue if there are shared waiters 5004e7f640dSJohn Baldwin * so we can wake them up. 5014e7f640dSJohn Baldwin */ 5024e7f640dSJohn Baldwin x = sx->sx_lock; 5034e7f640dSJohn Baldwin if (!(x & SX_LOCK_SHARED_WAITERS) && 5044e7f640dSJohn Baldwin atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | 50526d94f99SMark Johnston (x & SX_LOCK_EXCLUSIVE_WAITERS))) 50626d94f99SMark Johnston goto out; 5074e7f640dSJohn Baldwin 5084e7f640dSJohn Baldwin /* 5094e7f640dSJohn Baldwin * Lock the sleep queue so we can read the waiters bits 5104e7f640dSJohn Baldwin * without any races and wakeup any shared waiters. 5114e7f640dSJohn Baldwin */ 5124e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 5134e7f640dSJohn Baldwin 5144e7f640dSJohn Baldwin /* 5154e7f640dSJohn Baldwin * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single 5164e7f640dSJohn Baldwin * shared lock. If there are any shared waiters, wake them up. 5174e7f640dSJohn Baldwin */ 518da7bbd2cSJohn Baldwin wakeup_swapper = 0; 5194e7f640dSJohn Baldwin x = sx->sx_lock; 5204e7f640dSJohn Baldwin atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | 5214e7f640dSJohn Baldwin (x & SX_LOCK_EXCLUSIVE_WAITERS)); 5224e7f640dSJohn Baldwin if (x & SX_LOCK_SHARED_WAITERS) 523da7bbd2cSJohn Baldwin wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 524da7bbd2cSJohn Baldwin 0, SQ_SHARED_QUEUE); 5254e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 526d55229b7SJason Evans 527da7bbd2cSJohn Baldwin if (wakeup_swapper) 528da7bbd2cSJohn Baldwin kick_proc0(); 52926d94f99SMark Johnston 53026d94f99SMark Johnston out: 5312466d12bSMateusz Guzik curthread->td_sx_slocks++; 53226d94f99SMark Johnston LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 53326d94f99SMark Johnston LOCKSTAT_RECORD0(sx__downgrade, sx); 5344e7f640dSJohn Baldwin } 535d55229b7SJason Evans 536013c0b49SMateusz Guzik void 537013c0b49SMateusz Guzik sx_downgrade_(struct sx *sx, const char *file, int line) 538013c0b49SMateusz Guzik { 539013c0b49SMateusz Guzik 540013c0b49SMateusz Guzik sx_downgrade_int(sx LOCK_FILE_LINE_ARG); 541013c0b49SMateusz Guzik } 542013c0b49SMateusz Guzik 5432466d12bSMateusz Guzik #ifdef ADAPTIVE_SX 5442466d12bSMateusz Guzik static inline void 5452466d12bSMateusz Guzik sx_drop_critical(uintptr_t x, bool *in_critical, int *extra_work) 5462466d12bSMateusz Guzik { 5472466d12bSMateusz Guzik 5482466d12bSMateusz Guzik if (x & SX_LOCK_WRITE_SPINNER) 5492466d12bSMateusz Guzik return; 5502466d12bSMateusz Guzik if (*in_critical) { 5512466d12bSMateusz Guzik critical_exit(); 5522466d12bSMateusz Guzik *in_critical = false; 5532466d12bSMateusz Guzik (*extra_work)--; 5542466d12bSMateusz Guzik } 5552466d12bSMateusz Guzik } 5562466d12bSMateusz Guzik #else 5572466d12bSMateusz Guzik #define sx_drop_critical(x, in_critical, extra_work) do { } while(0) 5582466d12bSMateusz Guzik #endif 5592466d12bSMateusz Guzik 5604e7f640dSJohn Baldwin /* 5614e7f640dSJohn Baldwin * This function represents the so-called 'hard case' for sx_xlock 5624e7f640dSJohn Baldwin * operation. All 'easy case' failures are redirected to this. Note 5634e7f640dSJohn Baldwin * that ideally this would be a static function, but it needs to be 5644e7f640dSJohn Baldwin * accessible from at least sx.h. 5654e7f640dSJohn Baldwin */ 566f9819486SAttilio Rao int 567013c0b49SMateusz Guzik _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF) 5684e7f640dSJohn Baldwin { 5694e7f640dSJohn Baldwin GIANT_DECLARE; 5702466d12bSMateusz Guzik uintptr_t tid, setx; 5714e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 5724e7f640dSJohn Baldwin volatile struct thread *owner; 573d07e22cdSMateusz Guzik u_int i, n, spintries = 0; 5741dce110fSMatt Macy enum { READERS, WRITER } sleep_reason = READERS; 575fb106123SMateusz Guzik bool adaptive; 5762466d12bSMateusz Guzik bool in_critical = false; 5774e7f640dSJohn Baldwin #endif 5781723a064SJeff Roberson #ifdef LOCK_PROFILING 5791723a064SJeff Roberson uint64_t waittime = 0; 5801723a064SJeff Roberson int contested = 0; 5811723a064SJeff Roberson #endif 5821723a064SJeff Roberson int error = 0; 58304126895SMateusz Guzik #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 5841ada9041SMateusz Guzik struct lock_delay_arg lda; 5851ada9041SMateusz Guzik #endif 586a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 58761852185SMateusz Guzik u_int sleep_cnt = 0; 588a5aedd68SStacey Son int64_t sleep_time = 0; 589076dd8ebSAndriy Gapon int64_t all_time = 0; 590a5aedd68SStacey Son #endif 591e41d6166SMateusz Guzik #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 5921dce110fSMatt Macy uintptr_t state = 0; 5932466d12bSMateusz Guzik int doing_lockprof = 0; 594e41d6166SMateusz Guzik #endif 595284194f1SMateusz Guzik int extra_work = 0; 5964e7f640dSJohn Baldwin 597013c0b49SMateusz Guzik tid = (uintptr_t)curthread; 59809bdec20SMateusz Guzik 59909bdec20SMateusz Guzik #ifdef KDTRACE_HOOKS 60009bdec20SMateusz Guzik if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) { 60109bdec20SMateusz Guzik while (x == SX_LOCK_UNLOCKED) { 60209bdec20SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 60309bdec20SMateusz Guzik goto out_lockstat; 60409bdec20SMateusz Guzik } 60509bdec20SMateusz Guzik extra_work = 1; 6062466d12bSMateusz Guzik doing_lockprof = 1; 60709bdec20SMateusz Guzik all_time -= lockstat_nsecs(&sx->lock_object); 60809bdec20SMateusz Guzik state = x; 60909bdec20SMateusz Guzik } 61009bdec20SMateusz Guzik #endif 61109bdec20SMateusz Guzik #ifdef LOCK_PROFILING 61209bdec20SMateusz Guzik extra_work = 1; 6132466d12bSMateusz Guzik doing_lockprof = 1; 61409bdec20SMateusz Guzik state = x; 61509bdec20SMateusz Guzik #endif 61609bdec20SMateusz Guzik 61735370593SAndriy Gapon if (SCHEDULER_STOPPED()) 61835370593SAndriy Gapon return (0); 61935370593SAndriy Gapon 620fa5000a4SMateusz Guzik #if defined(ADAPTIVE_SX) 6211ada9041SMateusz Guzik lock_delay_arg_init(&lda, &sx_delay); 622fa5000a4SMateusz Guzik #elif defined(KDTRACE_HOOKS) 623fa5000a4SMateusz Guzik lock_delay_arg_init(&lda, NULL); 6241ada9041SMateusz Guzik #endif 6251ada9041SMateusz Guzik 626c1aaf63cSMateusz Guzik if (__predict_false(x == SX_LOCK_UNLOCKED)) 627c1aaf63cSMateusz Guzik x = SX_READ_VALUE(sx); 628c1aaf63cSMateusz Guzik 6294e7f640dSJohn Baldwin /* If we already hold an exclusive lock, then recurse. */ 630c5f61e6fSMateusz Guzik if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) { 631f0830182SAttilio Rao KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, 632b0d67325SJohn Baldwin ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", 633b0d67325SJohn Baldwin sx->lock_object.lo_name, file, line)); 6344e7f640dSJohn Baldwin sx->sx_recurse++; 6354e7f640dSJohn Baldwin atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 6364e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 6374e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 638f9819486SAttilio Rao return (0); 6394e7f640dSJohn Baldwin } 6404e7f640dSJohn Baldwin 6414e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 6424e7f640dSJohn Baldwin CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 6434e7f640dSJohn Baldwin sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 6444e7f640dSJohn Baldwin 645fb106123SMateusz Guzik #ifdef ADAPTIVE_SX 646c505b599SMateusz Guzik adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0); 647fb106123SMateusz Guzik #endif 648fb106123SMateusz Guzik 649ae7d25a4SMateusz Guzik #ifdef HWPMC_HOOKS 650ae7d25a4SMateusz Guzik PMC_SOFT_CALL( , , lock, failed); 651ae7d25a4SMateusz Guzik #endif 652ae7d25a4SMateusz Guzik lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 653ae7d25a4SMateusz Guzik &waittime); 654ae7d25a4SMateusz Guzik 655fb106123SMateusz Guzik #ifndef INVARIANTS 656fb106123SMateusz Guzik GIANT_SAVE(extra_work); 657fb106123SMateusz Guzik #endif 658e41d6166SMateusz Guzik 659fc4f686dSMateusz Guzik for (;;) { 660c5f61e6fSMateusz Guzik if (x == SX_LOCK_UNLOCKED) { 661fa474043SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 662fc4f686dSMateusz Guzik break; 663c5f61e6fSMateusz Guzik continue; 664c5f61e6fSMateusz Guzik } 665fb106123SMateusz Guzik #ifdef INVARIANTS 666fb106123SMateusz Guzik GIANT_SAVE(extra_work); 667fb106123SMateusz Guzik #endif 668a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 6691ada9041SMateusz Guzik lda.spin_cnt++; 670a5aedd68SStacey Son #endif 6714e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 672fb106123SMateusz Guzik if (__predict_false(!adaptive)) 673fb106123SMateusz Guzik goto sleepq; 6744e7f640dSJohn Baldwin /* 6754e7f640dSJohn Baldwin * If the lock is write locked and the owner is 6764e7f640dSJohn Baldwin * running on another CPU, spin until the owner stops 6774e7f640dSJohn Baldwin * running or the state of the lock changes. 6784e7f640dSJohn Baldwin */ 6791ae1c2a3SAttilio Rao if ((x & SX_LOCK_SHARED) == 0) { 6802466d12bSMateusz Guzik sx_drop_critical(x, &in_critical, &extra_work); 681d94df98cSMateusz Guzik sleep_reason = WRITER; 682c5f61e6fSMateusz Guzik owner = lv_sx_owner(x); 683d94df98cSMateusz Guzik if (!TD_IS_RUNNING(owner)) 684d94df98cSMateusz Guzik goto sleepq; 6854e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 686d94df98cSMateusz Guzik CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 6874e7f640dSJohn Baldwin __func__, sx, owner); 688d94df98cSMateusz Guzik KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 689d94df98cSMateusz Guzik "spinning", "lockname:\"%s\"", 6902cba8dd3SJohn Baldwin sx->lock_object.lo_name); 691c5f61e6fSMateusz Guzik do { 6921ada9041SMateusz Guzik lock_delay(&lda); 693c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 694c5f61e6fSMateusz Guzik owner = lv_sx_owner(x); 695d94df98cSMateusz Guzik } while (owner != NULL && TD_IS_RUNNING(owner)); 696d94df98cSMateusz Guzik KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 697d94df98cSMateusz Guzik "running"); 6984e7f640dSJohn Baldwin continue; 699d94df98cSMateusz Guzik } else if (SX_SHARERS(x) > 0) { 700d94df98cSMateusz Guzik sleep_reason = READERS; 701d94df98cSMateusz Guzik if (spintries == asx_retries) 702d94df98cSMateusz Guzik goto sleepq; 7032466d12bSMateusz Guzik if (!(x & SX_LOCK_WRITE_SPINNER)) { 7042466d12bSMateusz Guzik if (!in_critical) { 7052466d12bSMateusz Guzik critical_enter(); 7062466d12bSMateusz Guzik in_critical = true; 7072466d12bSMateusz Guzik extra_work++; 7082466d12bSMateusz Guzik } 7092466d12bSMateusz Guzik if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 7102466d12bSMateusz Guzik x | SX_LOCK_WRITE_SPINNER)) { 7112466d12bSMateusz Guzik critical_exit(); 7122466d12bSMateusz Guzik in_critical = false; 7132466d12bSMateusz Guzik extra_work--; 7142466d12bSMateusz Guzik continue; 7152466d12bSMateusz Guzik } 7162466d12bSMateusz Guzik } 7171ae1c2a3SAttilio Rao spintries++; 718d94df98cSMateusz Guzik KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 719d94df98cSMateusz Guzik "spinning", "lockname:\"%s\"", 720d94df98cSMateusz Guzik sx->lock_object.lo_name); 721d07e22cdSMateusz Guzik n = SX_SHARERS(x); 7222466d12bSMateusz Guzik for (i = 0; i < asx_loops; i += n) { 723d07e22cdSMateusz Guzik lock_delay_spin(n); 72420a15d17SMateusz Guzik x = SX_READ_VALUE(sx); 7252466d12bSMateusz Guzik if (!(x & SX_LOCK_WRITE_SPINNER)) 7262466d12bSMateusz Guzik break; 7272466d12bSMateusz Guzik if (!(x & SX_LOCK_SHARED)) 7282466d12bSMateusz Guzik break; 7292466d12bSMateusz Guzik n = SX_SHARERS(x); 7302466d12bSMateusz Guzik if (n == 0) 7311ae1c2a3SAttilio Rao break; 7321ae1c2a3SAttilio Rao } 73320a15d17SMateusz Guzik #ifdef KDTRACE_HOOKS 73420a15d17SMateusz Guzik lda.spin_cnt += i; 73520a15d17SMateusz Guzik #endif 736d94df98cSMateusz Guzik KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 737d94df98cSMateusz Guzik "running"); 738efa9f177SMateusz Guzik if (i < asx_loops) 7391ae1c2a3SAttilio Rao continue; 7401ae1c2a3SAttilio Rao } 741fb106123SMateusz Guzik sleepq: 742cde25ed4SMateusz Guzik #endif 7434e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 744c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 74593118b62SMateusz Guzik retry_sleepq: 7464e7f640dSJohn Baldwin 7474e7f640dSJohn Baldwin /* 7484e7f640dSJohn Baldwin * If the lock was released while spinning on the 7494e7f640dSJohn Baldwin * sleep queue chain lock, try again. 7504e7f640dSJohn Baldwin */ 7514e7f640dSJohn Baldwin if (x == SX_LOCK_UNLOCKED) { 7524e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 7532466d12bSMateusz Guzik sx_drop_critical(x, &in_critical, &extra_work); 7544e7f640dSJohn Baldwin continue; 7554e7f640dSJohn Baldwin } 7564e7f640dSJohn Baldwin 7574e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 7584e7f640dSJohn Baldwin /* 7594e7f640dSJohn Baldwin * The current lock owner might have started executing 7604e7f640dSJohn Baldwin * on another CPU (or the lock could have changed 7614e7f640dSJohn Baldwin * owners) while we were waiting on the sleep queue 7624e7f640dSJohn Baldwin * chain lock. If so, drop the sleep queue lock and try 7634e7f640dSJohn Baldwin * again. 7644e7f640dSJohn Baldwin */ 76528f1a9e3SMateusz Guzik if (adaptive) { 76628f1a9e3SMateusz Guzik if (!(x & SX_LOCK_SHARED)) { 7674e7f640dSJohn Baldwin owner = (struct thread *)SX_OWNER(x); 7684e7f640dSJohn Baldwin if (TD_IS_RUNNING(owner)) { 7694e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 7702466d12bSMateusz Guzik sx_drop_critical(x, &in_critical, 7712466d12bSMateusz Guzik &extra_work); 7724e7f640dSJohn Baldwin continue; 7734e7f640dSJohn Baldwin } 774d94df98cSMateusz Guzik } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) { 77528f1a9e3SMateusz Guzik sleepq_release(&sx->lock_object); 7762466d12bSMateusz Guzik sx_drop_critical(x, &in_critical, &extra_work); 77728f1a9e3SMateusz Guzik continue; 77828f1a9e3SMateusz Guzik } 7794e7f640dSJohn Baldwin } 7804e7f640dSJohn Baldwin #endif 7814e7f640dSJohn Baldwin 7824e7f640dSJohn Baldwin /* 7834e7f640dSJohn Baldwin * If an exclusive lock was released with both shared 7844e7f640dSJohn Baldwin * and exclusive waiters and a shared waiter hasn't 7854e7f640dSJohn Baldwin * woken up and acquired the lock yet, sx_lock will be 7864e7f640dSJohn Baldwin * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 7874e7f640dSJohn Baldwin * If we see that value, try to acquire it once. Note 7884e7f640dSJohn Baldwin * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 7894e7f640dSJohn Baldwin * as there are other exclusive waiters still. If we 7904e7f640dSJohn Baldwin * fail, restart the loop. 7914e7f640dSJohn Baldwin */ 7922466d12bSMateusz Guzik setx = x & (SX_LOCK_WAITERS | SX_LOCK_WRITE_SPINNER); 7932466d12bSMateusz Guzik if ((x & ~setx) == SX_LOCK_SHARED) { 7942466d12bSMateusz Guzik setx &= ~SX_LOCK_WRITE_SPINNER; 7952466d12bSMateusz Guzik if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx)) 79693118b62SMateusz Guzik goto retry_sleepq; 7974e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 7984e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p claimed by new writer", 7994e7f640dSJohn Baldwin __func__, sx); 8004e7f640dSJohn Baldwin break; 8014e7f640dSJohn Baldwin } 8024e7f640dSJohn Baldwin 8032466d12bSMateusz Guzik #ifdef ADAPTIVE_SX 8042466d12bSMateusz Guzik /* 8052466d12bSMateusz Guzik * It is possible we set the SX_LOCK_WRITE_SPINNER bit. 8062466d12bSMateusz Guzik * It is an invariant that when the bit is set, there is 8072466d12bSMateusz Guzik * a writer ready to grab the lock. Thus clear the bit since 8082466d12bSMateusz Guzik * we are going to sleep. 8092466d12bSMateusz Guzik */ 8102466d12bSMateusz Guzik if (in_critical) { 8112466d12bSMateusz Guzik if ((x & SX_LOCK_WRITE_SPINNER) || 8122466d12bSMateusz Guzik !((x & SX_LOCK_EXCLUSIVE_WAITERS))) { 8132466d12bSMateusz Guzik setx = x & ~SX_LOCK_WRITE_SPINNER; 8142466d12bSMateusz Guzik setx |= SX_LOCK_EXCLUSIVE_WAITERS; 8152466d12bSMateusz Guzik if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 8162466d12bSMateusz Guzik setx)) { 8172466d12bSMateusz Guzik goto retry_sleepq; 8182466d12bSMateusz Guzik } 8192466d12bSMateusz Guzik } 8202466d12bSMateusz Guzik critical_exit(); 8212466d12bSMateusz Guzik in_critical = false; 8222466d12bSMateusz Guzik } else { 8232466d12bSMateusz Guzik #endif 8244e7f640dSJohn Baldwin /* 8254e7f640dSJohn Baldwin * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 8264e7f640dSJohn Baldwin * than loop back and retry. 8274e7f640dSJohn Baldwin */ 8284e7f640dSJohn Baldwin if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 82993118b62SMateusz Guzik if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 8304e7f640dSJohn Baldwin x | SX_LOCK_EXCLUSIVE_WAITERS)) { 83193118b62SMateusz Guzik goto retry_sleepq; 8324e7f640dSJohn Baldwin } 8334e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 8344e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 8354e7f640dSJohn Baldwin __func__, sx); 8364e7f640dSJohn Baldwin } 8372466d12bSMateusz Guzik #ifdef ADAPTIVE_SX 8382466d12bSMateusz Guzik } 8392466d12bSMateusz Guzik #endif 8404e7f640dSJohn Baldwin 8414e7f640dSJohn Baldwin /* 8424e7f640dSJohn Baldwin * Since we have been unable to acquire the exclusive 8434e7f640dSJohn Baldwin * lock and the exclusive waiters flag is set, we have 8444e7f640dSJohn Baldwin * to sleep. 8454e7f640dSJohn Baldwin */ 8464e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 8474e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 8484e7f640dSJohn Baldwin __func__, sx); 8494e7f640dSJohn Baldwin 850a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 851e2b25737SMark Johnston sleep_time -= lockstat_nsecs(&sx->lock_object); 852a5aedd68SStacey Son #endif 8534e7f640dSJohn Baldwin sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 854f9819486SAttilio Rao SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 855f9819486SAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); 856f9819486SAttilio Rao if (!(opts & SX_INTERRUPTIBLE)) 857c5aa6b58SJeff Roberson sleepq_wait(&sx->lock_object, 0); 858f9819486SAttilio Rao else 859c5aa6b58SJeff Roberson error = sleepq_wait_sig(&sx->lock_object, 0); 860a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 861e2b25737SMark Johnston sleep_time += lockstat_nsecs(&sx->lock_object); 862a5aedd68SStacey Son sleep_cnt++; 863a5aedd68SStacey Son #endif 864f9819486SAttilio Rao if (error) { 865f9819486SAttilio Rao if (LOCK_LOG_TEST(&sx->lock_object, 0)) 866f9819486SAttilio Rao CTR2(KTR_LOCK, 867f9819486SAttilio Rao "%s: interruptible sleep by %p suspended by signal", 868f9819486SAttilio Rao __func__, sx); 869f9819486SAttilio Rao break; 870f9819486SAttilio Rao } 8714e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 8724e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 8734e7f640dSJohn Baldwin __func__, sx); 874c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 8754e7f640dSJohn Baldwin } 876e41d6166SMateusz Guzik if (__predict_true(!extra_work)) 877e41d6166SMateusz Guzik return (error); 8782466d12bSMateusz Guzik #ifdef ADAPTIVE_SX 8792466d12bSMateusz Guzik if (in_critical) 8802466d12bSMateusz Guzik critical_exit(); 8812466d12bSMateusz Guzik #endif 882*ee252fc9SMateusz Guzik GIANT_RESTORE(); 8832466d12bSMateusz Guzik #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 8842466d12bSMateusz Guzik if (__predict_true(!doing_lockprof)) 8852466d12bSMateusz Guzik return (error); 886e41d6166SMateusz Guzik #endif 887076dd8ebSAndriy Gapon #ifdef KDTRACE_HOOKS 888e2b25737SMark Johnston all_time += lockstat_nsecs(&sx->lock_object); 889076dd8ebSAndriy Gapon if (sleep_time) 89032cd0147SMark Johnston LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 891076dd8ebSAndriy Gapon LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 892076dd8ebSAndriy Gapon (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 8931ada9041SMateusz Guzik if (lda.spin_cnt > sleep_cnt) 89432cd0147SMark Johnston LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 895076dd8ebSAndriy Gapon LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 896076dd8ebSAndriy Gapon (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 89709bdec20SMateusz Guzik out_lockstat: 898076dd8ebSAndriy Gapon #endif 899f9819486SAttilio Rao if (!error) 900de2c95ccSMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 901de2c95ccSMark Johnston contested, waittime, file, line, LOCKSTAT_WRITER); 902f9819486SAttilio Rao return (error); 9034e7f640dSJohn Baldwin } 9044e7f640dSJohn Baldwin 9054e7f640dSJohn Baldwin /* 9064e7f640dSJohn Baldwin * This function represents the so-called 'hard case' for sx_xunlock 9074e7f640dSJohn Baldwin * operation. All 'easy case' failures are redirected to this. Note 9084e7f640dSJohn Baldwin * that ideally this would be a static function, but it needs to be 9094e7f640dSJohn Baldwin * accessible from at least sx.h. 9104e7f640dSJohn Baldwin */ 9114e7f640dSJohn Baldwin void 912b584eb2eSMateusz Guzik _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) 9134e7f640dSJohn Baldwin { 914b584eb2eSMateusz Guzik uintptr_t tid, setx; 915da7bbd2cSJohn Baldwin int queue, wakeup_swapper; 9164e7f640dSJohn Baldwin 91735370593SAndriy Gapon if (SCHEDULER_STOPPED()) 91835370593SAndriy Gapon return; 91935370593SAndriy Gapon 920b584eb2eSMateusz Guzik tid = (uintptr_t)curthread; 9214e7f640dSJohn Baldwin 922b584eb2eSMateusz Guzik if (__predict_false(x == tid)) 9233b3cf014SMateusz Guzik x = SX_READ_VALUE(sx); 924b584eb2eSMateusz Guzik 925b584eb2eSMateusz Guzik MPASS(!(x & SX_LOCK_SHARED)); 926b584eb2eSMateusz Guzik 927b584eb2eSMateusz Guzik if (__predict_false(x & SX_LOCK_RECURSED)) { 9286ebb77b6SMateusz Guzik /* The lock is recursed, unrecurse one level. */ 9294e7f640dSJohn Baldwin if ((--sx->sx_recurse) == 0) 9304e7f640dSJohn Baldwin atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 9314e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 9324e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 9334e7f640dSJohn Baldwin return; 9344e7f640dSJohn Baldwin } 9353b3cf014SMateusz Guzik 9363b3cf014SMateusz Guzik LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); 9373b3cf014SMateusz Guzik if (x == tid && 9383b3cf014SMateusz Guzik atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) 9393b3cf014SMateusz Guzik return; 9403b3cf014SMateusz Guzik 9414e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 9424e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 9434e7f640dSJohn Baldwin 9444e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 945bc24577cSMateusz Guzik x = SX_READ_VALUE(sx); 9462d96bd88SMateusz Guzik MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)); 9474e7f640dSJohn Baldwin 9484e7f640dSJohn Baldwin /* 9494e7f640dSJohn Baldwin * The wake up algorithm here is quite simple and probably not 9504e7f640dSJohn Baldwin * ideal. It gives precedence to shared waiters if they are 9514e7f640dSJohn Baldwin * present. For this condition, we have to preserve the 9524e7f640dSJohn Baldwin * state of the exclusive waiters flag. 9532028867dSAttilio Rao * If interruptible sleeps left the shared queue empty avoid a 9542028867dSAttilio Rao * starvation for the threads sleeping on the exclusive queue by giving 9552028867dSAttilio Rao * them precedence and cleaning up the shared waiters bit anyway. 9564e7f640dSJohn Baldwin */ 957bc24577cSMateusz Guzik setx = SX_LOCK_UNLOCKED; 9584e7f640dSJohn Baldwin queue = SQ_SHARED_QUEUE; 9592466d12bSMateusz Guzik if ((x & SX_LOCK_EXCLUSIVE_WAITERS) != 0 && 9602466d12bSMateusz Guzik sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) { 9612466d12bSMateusz Guzik queue = SQ_EXCLUSIVE_QUEUE; 9622466d12bSMateusz Guzik setx |= (x & SX_LOCK_SHARED_WAITERS); 963bc24577cSMateusz Guzik } 964bc24577cSMateusz Guzik atomic_store_rel_ptr(&sx->sx_lock, setx); 9654e7f640dSJohn Baldwin 9664e7f640dSJohn Baldwin /* Wake up all the waiters for the specific queue. */ 9674e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 9684e7f640dSJohn Baldwin CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 9694e7f640dSJohn Baldwin __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 9704e7f640dSJohn Baldwin "exclusive"); 971bc24577cSMateusz Guzik 972da7bbd2cSJohn Baldwin wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, 973da7bbd2cSJohn Baldwin queue); 974c5aa6b58SJeff Roberson sleepq_release(&sx->lock_object); 975da7bbd2cSJohn Baldwin if (wakeup_swapper) 976da7bbd2cSJohn Baldwin kick_proc0(); 9774e7f640dSJohn Baldwin } 9784e7f640dSJohn Baldwin 979834f70f3SMateusz Guzik static bool __always_inline 9802466d12bSMateusz Guzik __sx_can_read(struct thread *td, uintptr_t x, bool fp) 9812466d12bSMateusz Guzik { 9822466d12bSMateusz Guzik 9832466d12bSMateusz Guzik if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER)) 9842466d12bSMateusz Guzik == SX_LOCK_SHARED) 9852466d12bSMateusz Guzik return (true); 9862466d12bSMateusz Guzik if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED)) 9872466d12bSMateusz Guzik return (true); 9882466d12bSMateusz Guzik return (false); 9892466d12bSMateusz Guzik } 9902466d12bSMateusz Guzik 9912466d12bSMateusz Guzik static bool __always_inline 9922466d12bSMateusz Guzik __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp 9932466d12bSMateusz Guzik LOCK_FILE_LINE_ARG_DEF) 994834f70f3SMateusz Guzik { 995834f70f3SMateusz Guzik 996834f70f3SMateusz Guzik /* 997834f70f3SMateusz Guzik * If no other thread has an exclusive lock then try to bump up 998834f70f3SMateusz Guzik * the count of sharers. Since we have to preserve the state 999834f70f3SMateusz Guzik * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 1000834f70f3SMateusz Guzik * shared lock loop back and retry. 1001834f70f3SMateusz Guzik */ 10022466d12bSMateusz Guzik while (__sx_can_read(td, *xp, fp)) { 1003834f70f3SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp, 1004834f70f3SMateusz Guzik *xp + SX_ONE_SHARER)) { 1005834f70f3SMateusz Guzik if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1006834f70f3SMateusz Guzik CTR4(KTR_LOCK, "%s: %p succeed %p -> %p", 1007834f70f3SMateusz Guzik __func__, sx, (void *)*xp, 1008834f70f3SMateusz Guzik (void *)(*xp + SX_ONE_SHARER)); 10092466d12bSMateusz Guzik td->td_sx_slocks++; 1010834f70f3SMateusz Guzik return (true); 1011834f70f3SMateusz Guzik } 1012834f70f3SMateusz Guzik } 1013834f70f3SMateusz Guzik return (false); 1014834f70f3SMateusz Guzik } 1015834f70f3SMateusz Guzik 1016834f70f3SMateusz Guzik static int __noinline 1017013c0b49SMateusz Guzik _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF) 10184e7f640dSJohn Baldwin { 10194e7f640dSJohn Baldwin GIANT_DECLARE; 10202466d12bSMateusz Guzik struct thread *td; 10214e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 10224e7f640dSJohn Baldwin volatile struct thread *owner; 10232466d12bSMateusz Guzik u_int i, n, spintries = 0; 1024fb106123SMateusz Guzik bool adaptive; 10254e7f640dSJohn Baldwin #endif 10261723a064SJeff Roberson #ifdef LOCK_PROFILING 1027c1a6d9faSAttilio Rao uint64_t waittime = 0; 1028c1a6d9faSAttilio Rao int contested = 0; 10291723a064SJeff Roberson #endif 1030c1a6d9faSAttilio Rao int error = 0; 103104126895SMateusz Guzik #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 10321ada9041SMateusz Guzik struct lock_delay_arg lda; 10331ada9041SMateusz Guzik #endif 1034a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 103561852185SMateusz Guzik u_int sleep_cnt = 0; 1036a5aedd68SStacey Son int64_t sleep_time = 0; 1037076dd8ebSAndriy Gapon int64_t all_time = 0; 1038a5aedd68SStacey Son #endif 1039e41d6166SMateusz Guzik #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 10401dce110fSMatt Macy uintptr_t state = 0; 1041e41d6166SMateusz Guzik #endif 1042284194f1SMateusz Guzik int extra_work = 0; 1043c1a6d9faSAttilio Rao 10442466d12bSMateusz Guzik td = curthread; 10452466d12bSMateusz Guzik 104609bdec20SMateusz Guzik #ifdef KDTRACE_HOOKS 104709bdec20SMateusz Guzik if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) { 10482466d12bSMateusz Guzik if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG)) 104909bdec20SMateusz Guzik goto out_lockstat; 105009bdec20SMateusz Guzik extra_work = 1; 105109bdec20SMateusz Guzik all_time -= lockstat_nsecs(&sx->lock_object); 105209bdec20SMateusz Guzik state = x; 105309bdec20SMateusz Guzik } 105409bdec20SMateusz Guzik #endif 105509bdec20SMateusz Guzik #ifdef LOCK_PROFILING 105609bdec20SMateusz Guzik extra_work = 1; 105709bdec20SMateusz Guzik state = x; 105809bdec20SMateusz Guzik #endif 105909bdec20SMateusz Guzik 106035370593SAndriy Gapon if (SCHEDULER_STOPPED()) 106135370593SAndriy Gapon return (0); 106235370593SAndriy Gapon 1063fa5000a4SMateusz Guzik #if defined(ADAPTIVE_SX) 10641ada9041SMateusz Guzik lock_delay_arg_init(&lda, &sx_delay); 1065fa5000a4SMateusz Guzik #elif defined(KDTRACE_HOOKS) 1066fa5000a4SMateusz Guzik lock_delay_arg_init(&lda, NULL); 10671ada9041SMateusz Guzik #endif 1068e41d6166SMateusz Guzik 1069fb106123SMateusz Guzik #ifdef ADAPTIVE_SX 1070c505b599SMateusz Guzik adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0); 1071fb106123SMateusz Guzik #endif 1072fb106123SMateusz Guzik 1073ae7d25a4SMateusz Guzik #ifdef HWPMC_HOOKS 1074ae7d25a4SMateusz Guzik PMC_SOFT_CALL( , , lock, failed); 1075ae7d25a4SMateusz Guzik #endif 1076ae7d25a4SMateusz Guzik lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 1077ae7d25a4SMateusz Guzik &waittime); 1078ae7d25a4SMateusz Guzik 1079fb106123SMateusz Guzik #ifndef INVARIANTS 1080fb106123SMateusz Guzik GIANT_SAVE(extra_work); 1081fb106123SMateusz Guzik #endif 1082076dd8ebSAndriy Gapon 10834e7f640dSJohn Baldwin /* 10844e7f640dSJohn Baldwin * As with rwlocks, we don't make any attempt to try to block 10854e7f640dSJohn Baldwin * shared locks once there is an exclusive waiter. 10864e7f640dSJohn Baldwin */ 10874e7f640dSJohn Baldwin for (;;) { 10882466d12bSMateusz Guzik if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG)) 10894e7f640dSJohn Baldwin break; 1090fb106123SMateusz Guzik #ifdef INVARIANTS 1091fb106123SMateusz Guzik GIANT_SAVE(extra_work); 1092fb106123SMateusz Guzik #endif 1093c5f61e6fSMateusz Guzik #ifdef KDTRACE_HOOKS 1094c5f61e6fSMateusz Guzik lda.spin_cnt++; 1095c5f61e6fSMateusz Guzik #endif 1096c5f61e6fSMateusz Guzik 10974e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 1098fb106123SMateusz Guzik if (__predict_false(!adaptive)) 1099fb106123SMateusz Guzik goto sleepq; 11002466d12bSMateusz Guzik 11014e7f640dSJohn Baldwin /* 11024e7f640dSJohn Baldwin * If the owner is running on another CPU, spin until 11034e7f640dSJohn Baldwin * the owner stops running or the state of the lock 11044e7f640dSJohn Baldwin * changes. 11054e7f640dSJohn Baldwin */ 11062466d12bSMateusz Guzik if ((x & SX_LOCK_SHARED) == 0) { 1107c5f61e6fSMateusz Guzik owner = lv_sx_owner(x); 11084e7f640dSJohn Baldwin if (TD_IS_RUNNING(owner)) { 11094e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 11104e7f640dSJohn Baldwin CTR3(KTR_LOCK, 11114e7f640dSJohn Baldwin "%s: spinning on %p held by %p", 11124e7f640dSJohn Baldwin __func__, sx, owner); 11132cba8dd3SJohn Baldwin KTR_STATE1(KTR_SCHED, "thread", 11142cba8dd3SJohn Baldwin sched_tdname(curthread), "spinning", 11152cba8dd3SJohn Baldwin "lockname:\"%s\"", sx->lock_object.lo_name); 1116c5f61e6fSMateusz Guzik do { 11171ada9041SMateusz Guzik lock_delay(&lda); 1118c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 1119c5f61e6fSMateusz Guzik owner = lv_sx_owner(x); 1120c5f61e6fSMateusz Guzik } while (owner != NULL && TD_IS_RUNNING(owner)); 11212cba8dd3SJohn Baldwin KTR_STATE0(KTR_SCHED, "thread", 11222cba8dd3SJohn Baldwin sched_tdname(curthread), "running"); 11234e7f640dSJohn Baldwin continue; 11244e7f640dSJohn Baldwin } 11252466d12bSMateusz Guzik } else { 11262466d12bSMateusz Guzik if ((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) { 11272466d12bSMateusz Guzik MPASS(!__sx_can_read(td, x, false)); 11282466d12bSMateusz Guzik lock_delay_spin(2); 11292466d12bSMateusz Guzik x = SX_READ_VALUE(sx); 11302466d12bSMateusz Guzik continue; 11312466d12bSMateusz Guzik } 11322466d12bSMateusz Guzik if (spintries < asx_retries) { 11332466d12bSMateusz Guzik KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 11342466d12bSMateusz Guzik "spinning", "lockname:\"%s\"", 11352466d12bSMateusz Guzik sx->lock_object.lo_name); 11362466d12bSMateusz Guzik n = SX_SHARERS(x); 11372466d12bSMateusz Guzik for (i = 0; i < asx_loops; i += n) { 11382466d12bSMateusz Guzik lock_delay_spin(n); 11392466d12bSMateusz Guzik x = SX_READ_VALUE(sx); 11402466d12bSMateusz Guzik if (!(x & SX_LOCK_SHARED)) 11412466d12bSMateusz Guzik break; 11422466d12bSMateusz Guzik n = SX_SHARERS(x); 11432466d12bSMateusz Guzik if (n == 0) 11442466d12bSMateusz Guzik break; 11452466d12bSMateusz Guzik if (__sx_can_read(td, x, false)) 11462466d12bSMateusz Guzik break; 11472466d12bSMateusz Guzik } 11482466d12bSMateusz Guzik #ifdef KDTRACE_HOOKS 11492466d12bSMateusz Guzik lda.spin_cnt += i; 11502466d12bSMateusz Guzik #endif 11512466d12bSMateusz Guzik KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 11522466d12bSMateusz Guzik "running"); 11532466d12bSMateusz Guzik if (i < asx_loops) 11542466d12bSMateusz Guzik continue; 11552466d12bSMateusz Guzik } 11562466d12bSMateusz Guzik } 1157cde25ed4SMateusz Guzik sleepq: 11584e7f640dSJohn Baldwin #endif 11594e7f640dSJohn Baldwin 11604e7f640dSJohn Baldwin /* 11614e7f640dSJohn Baldwin * Some other thread already has an exclusive lock, so 11624e7f640dSJohn Baldwin * start the process of blocking. 11634e7f640dSJohn Baldwin */ 11644e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 1165c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 116693118b62SMateusz Guzik retry_sleepq: 11672466d12bSMateusz Guzik if (((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) || 11682466d12bSMateusz Guzik __sx_can_read(td, x, false)) { 11694e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 11704e7f640dSJohn Baldwin continue; 11714e7f640dSJohn Baldwin } 11724e7f640dSJohn Baldwin 11734e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 11744e7f640dSJohn Baldwin /* 11754e7f640dSJohn Baldwin * If the owner is running on another CPU, spin until 11764e7f640dSJohn Baldwin * the owner stops running or the state of the lock 11774e7f640dSJohn Baldwin * changes. 11784e7f640dSJohn Baldwin */ 1179fb106123SMateusz Guzik if (!(x & SX_LOCK_SHARED) && adaptive) { 11804e7f640dSJohn Baldwin owner = (struct thread *)SX_OWNER(x); 11814e7f640dSJohn Baldwin if (TD_IS_RUNNING(owner)) { 11824e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 1183c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 11844e7f640dSJohn Baldwin continue; 11854e7f640dSJohn Baldwin } 11864e7f640dSJohn Baldwin } 11874e7f640dSJohn Baldwin #endif 11884e7f640dSJohn Baldwin 11894e7f640dSJohn Baldwin /* 11904e7f640dSJohn Baldwin * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 11914e7f640dSJohn Baldwin * fail to set it drop the sleep queue lock and loop 11924e7f640dSJohn Baldwin * back. 11934e7f640dSJohn Baldwin */ 11944e7f640dSJohn Baldwin if (!(x & SX_LOCK_SHARED_WAITERS)) { 119593118b62SMateusz Guzik if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 119693118b62SMateusz Guzik x | SX_LOCK_SHARED_WAITERS)) 119793118b62SMateusz Guzik goto retry_sleepq; 11984e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 11994e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 12004e7f640dSJohn Baldwin __func__, sx); 12014e7f640dSJohn Baldwin } 12024e7f640dSJohn Baldwin 12034e7f640dSJohn Baldwin /* 12044e7f640dSJohn Baldwin * Since we have been unable to acquire the shared lock, 12054e7f640dSJohn Baldwin * we have to sleep. 12064e7f640dSJohn Baldwin */ 12074e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 12084e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 12094e7f640dSJohn Baldwin __func__, sx); 12104e7f640dSJohn Baldwin 1211a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 1212e2b25737SMark Johnston sleep_time -= lockstat_nsecs(&sx->lock_object); 1213a5aedd68SStacey Son #endif 12144e7f640dSJohn Baldwin sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 1215f9819486SAttilio Rao SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 1216f9819486SAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); 1217f9819486SAttilio Rao if (!(opts & SX_INTERRUPTIBLE)) 1218c5aa6b58SJeff Roberson sleepq_wait(&sx->lock_object, 0); 1219f9819486SAttilio Rao else 1220c5aa6b58SJeff Roberson error = sleepq_wait_sig(&sx->lock_object, 0); 1221a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 1222e2b25737SMark Johnston sleep_time += lockstat_nsecs(&sx->lock_object); 1223a5aedd68SStacey Son sleep_cnt++; 1224a5aedd68SStacey Son #endif 1225f9819486SAttilio Rao if (error) { 1226f9819486SAttilio Rao if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1227f9819486SAttilio Rao CTR2(KTR_LOCK, 1228f9819486SAttilio Rao "%s: interruptible sleep by %p suspended by signal", 1229f9819486SAttilio Rao __func__, sx); 1230f9819486SAttilio Rao break; 1231f9819486SAttilio Rao } 12324e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 12334e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 12344e7f640dSJohn Baldwin __func__, sx); 1235c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 12364e7f640dSJohn Baldwin } 1237e41d6166SMateusz Guzik #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 1238e41d6166SMateusz Guzik if (__predict_true(!extra_work)) 1239e41d6166SMateusz Guzik return (error); 1240e41d6166SMateusz Guzik #endif 1241076dd8ebSAndriy Gapon #ifdef KDTRACE_HOOKS 1242e2b25737SMark Johnston all_time += lockstat_nsecs(&sx->lock_object); 1243076dd8ebSAndriy Gapon if (sleep_time) 124432cd0147SMark Johnston LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 1245076dd8ebSAndriy Gapon LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1246076dd8ebSAndriy Gapon (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 12471ada9041SMateusz Guzik if (lda.spin_cnt > sleep_cnt) 124832cd0147SMark Johnston LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 1249076dd8ebSAndriy Gapon LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1250076dd8ebSAndriy Gapon (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 125109bdec20SMateusz Guzik out_lockstat: 1252076dd8ebSAndriy Gapon #endif 12533ae56ce9SMateusz Guzik if (error == 0) { 1254de2c95ccSMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 1255de2c95ccSMark Johnston contested, waittime, file, line, LOCKSTAT_READER); 12563ae56ce9SMateusz Guzik } 12574e7f640dSJohn Baldwin GIANT_RESTORE(); 1258f9819486SAttilio Rao return (error); 12594e7f640dSJohn Baldwin } 12604e7f640dSJohn Baldwin 1261834f70f3SMateusz Guzik int 1262013c0b49SMateusz Guzik _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF) 12634e7f640dSJohn Baldwin { 12642466d12bSMateusz Guzik struct thread *td; 12654e7f640dSJohn Baldwin uintptr_t x; 1266834f70f3SMateusz Guzik int error; 12674e7f640dSJohn Baldwin 1268704cb42fSMark Johnston KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 1269704cb42fSMark Johnston !TD_IS_IDLETHREAD(curthread), 1270834f70f3SMateusz Guzik ("sx_slock() by idle thread %p on sx %s @ %s:%d", 1271834f70f3SMateusz Guzik curthread, sx->lock_object.lo_name, file, line)); 12723ae56ce9SMateusz Guzik KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1273834f70f3SMateusz Guzik ("sx_slock() of destroyed sx @ %s:%d", file, line)); 1274834f70f3SMateusz Guzik WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); 1275834f70f3SMateusz Guzik 1276834f70f3SMateusz Guzik error = 0; 12772466d12bSMateusz Guzik td = curthread; 1278c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 1279e4ccf57fSMateusz Guzik if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) || 12802466d12bSMateusz Guzik !__sx_slock_try(sx, td, &x, true LOCK_FILE_LINE_ARG))) 1281013c0b49SMateusz Guzik error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG); 1282e4ccf57fSMateusz Guzik else 1283e4ccf57fSMateusz Guzik lock_profile_obtain_lock_success(&sx->lock_object, 0, 0, 1284e4ccf57fSMateusz Guzik file, line); 1285834f70f3SMateusz Guzik if (error == 0) { 1286834f70f3SMateusz Guzik LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 1287834f70f3SMateusz Guzik WITNESS_LOCK(&sx->lock_object, 0, file, line); 1288834f70f3SMateusz Guzik TD_LOCKS_INC(curthread); 1289834f70f3SMateusz Guzik } 1290834f70f3SMateusz Guzik return (error); 1291834f70f3SMateusz Guzik } 1292834f70f3SMateusz Guzik 1293013c0b49SMateusz Guzik int 1294013c0b49SMateusz Guzik _sx_slock(struct sx *sx, int opts, const char *file, int line) 1295013c0b49SMateusz Guzik { 1296013c0b49SMateusz Guzik 1297013c0b49SMateusz Guzik return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG)); 1298013c0b49SMateusz Guzik } 1299013c0b49SMateusz Guzik 1300834f70f3SMateusz Guzik static bool __always_inline 13012466d12bSMateusz Guzik _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp) 1302834f70f3SMateusz Guzik { 1303834f70f3SMateusz Guzik 13044e7f640dSJohn Baldwin for (;;) { 13052466d12bSMateusz Guzik if (SX_SHARERS(*xp) > 1 || !(*xp & SX_LOCK_WAITERS)) { 1306834f70f3SMateusz Guzik if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp, 1307834f70f3SMateusz Guzik *xp - SX_ONE_SHARER)) { 13084e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 13094e7f640dSJohn Baldwin CTR4(KTR_LOCK, 13104e7f640dSJohn Baldwin "%s: %p succeeded %p -> %p", 1311834f70f3SMateusz Guzik __func__, sx, (void *)*xp, 1312834f70f3SMateusz Guzik (void *)(*xp - SX_ONE_SHARER)); 13132466d12bSMateusz Guzik td->td_sx_slocks--; 1314834f70f3SMateusz Guzik return (true); 13154e7f640dSJohn Baldwin } 13164e7f640dSJohn Baldwin continue; 13174e7f640dSJohn Baldwin } 1318834f70f3SMateusz Guzik break; 1319834f70f3SMateusz Guzik } 1320834f70f3SMateusz Guzik return (false); 1321834f70f3SMateusz Guzik } 1322834f70f3SMateusz Guzik 1323834f70f3SMateusz Guzik static void __noinline 13242466d12bSMateusz Guzik _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x 13252466d12bSMateusz Guzik LOCK_FILE_LINE_ARG_DEF) 1326834f70f3SMateusz Guzik { 13271b54ffc8SMateusz Guzik int wakeup_swapper = 0; 13282466d12bSMateusz Guzik uintptr_t setx, queue; 1329834f70f3SMateusz Guzik 1330834f70f3SMateusz Guzik if (SCHEDULER_STOPPED()) 1331834f70f3SMateusz Guzik return; 1332834f70f3SMateusz Guzik 13332466d12bSMateusz Guzik if (_sx_sunlock_try(sx, td, &x)) 1334cec17473SMateusz Guzik goto out_lockstat; 13354e7f640dSJohn Baldwin 13364e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 1337cec17473SMateusz Guzik x = SX_READ_VALUE(sx); 1338cec17473SMateusz Guzik for (;;) { 13392466d12bSMateusz Guzik if (_sx_sunlock_try(sx, td, &x)) 13401b54ffc8SMateusz Guzik break; 13411b54ffc8SMateusz Guzik 13424e7f640dSJohn Baldwin /* 13434e7f640dSJohn Baldwin * Wake up semantic here is quite simple: 13444e7f640dSJohn Baldwin * Just wake up all the exclusive waiters. 13454e7f640dSJohn Baldwin * Note that the state of the lock could have changed, 13464e7f640dSJohn Baldwin * so if it fails loop back and retry. 13474e7f640dSJohn Baldwin */ 13482466d12bSMateusz Guzik setx = SX_LOCK_UNLOCKED; 13492466d12bSMateusz Guzik queue = SQ_SHARED_QUEUE; 13502466d12bSMateusz Guzik if (x & SX_LOCK_EXCLUSIVE_WAITERS) { 13512466d12bSMateusz Guzik setx |= (x & SX_LOCK_SHARED_WAITERS); 13522466d12bSMateusz Guzik queue = SQ_EXCLUSIVE_QUEUE; 13532466d12bSMateusz Guzik } 13542466d12bSMateusz Guzik setx |= (x & SX_LOCK_WRITE_SPINNER); 1355cec17473SMateusz Guzik if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx)) 13564e7f640dSJohn Baldwin continue; 13574e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 13584e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p waking up all thread on" 13594e7f640dSJohn Baldwin "exclusive queue", __func__, sx); 1360da7bbd2cSJohn Baldwin wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 13612466d12bSMateusz Guzik 0, queue); 13622466d12bSMateusz Guzik td->td_sx_slocks--; 1363cec17473SMateusz Guzik break; 1364cec17473SMateusz Guzik } 1365c5aa6b58SJeff Roberson sleepq_release(&sx->lock_object); 1366da7bbd2cSJohn Baldwin if (wakeup_swapper) 1367da7bbd2cSJohn Baldwin kick_proc0(); 1368cec17473SMateusz Guzik out_lockstat: 1369dbe4541dSMark Johnston LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); 1370834f70f3SMateusz Guzik } 1371834f70f3SMateusz Guzik 1372834f70f3SMateusz Guzik void 1373013c0b49SMateusz Guzik _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 1374834f70f3SMateusz Guzik { 13752466d12bSMateusz Guzik struct thread *td; 1376834f70f3SMateusz Guzik uintptr_t x; 1377834f70f3SMateusz Guzik 1378834f70f3SMateusz Guzik KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1379834f70f3SMateusz Guzik ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); 1380834f70f3SMateusz Guzik _sx_assert(sx, SA_SLOCKED, file, line); 1381834f70f3SMateusz Guzik WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 1382834f70f3SMateusz Guzik LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 1383834f70f3SMateusz Guzik 13842466d12bSMateusz Guzik td = curthread; 1385834f70f3SMateusz Guzik x = SX_READ_VALUE(sx); 1386e4ccf57fSMateusz Guzik if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) || 13872466d12bSMateusz Guzik !_sx_sunlock_try(sx, td, &x))) 13882466d12bSMateusz Guzik _sx_sunlock_hard(sx, td, x LOCK_FILE_LINE_ARG); 1389e4ccf57fSMateusz Guzik else 1390e4ccf57fSMateusz Guzik lock_profile_release_lock(&sx->lock_object); 1391834f70f3SMateusz Guzik 13923ae56ce9SMateusz Guzik TD_LOCKS_DEC(curthread); 1393d55229b7SJason Evans } 13944e5e677bSJohn Baldwin 1395013c0b49SMateusz Guzik void 1396013c0b49SMateusz Guzik _sx_sunlock(struct sx *sx, const char *file, int line) 1397013c0b49SMateusz Guzik { 1398013c0b49SMateusz Guzik 1399013c0b49SMateusz Guzik _sx_sunlock_int(sx LOCK_FILE_LINE_ARG); 1400013c0b49SMateusz Guzik } 1401013c0b49SMateusz Guzik 14024e5e677bSJohn Baldwin #ifdef INVARIANT_SUPPORT 1403781a35dfSJohn Baldwin #ifndef INVARIANTS 1404781a35dfSJohn Baldwin #undef _sx_assert 1405781a35dfSJohn Baldwin #endif 1406781a35dfSJohn Baldwin 14074e5e677bSJohn Baldwin /* 14084e5e677bSJohn Baldwin * In the non-WITNESS case, sx_assert() can only detect that at least 14094e5e677bSJohn Baldwin * *some* thread owns an slock, but it cannot guarantee that *this* 14104e5e677bSJohn Baldwin * thread owns an slock. 14114e5e677bSJohn Baldwin */ 14124e5e677bSJohn Baldwin void 1413d576deedSPawel Jakub Dawidek _sx_assert(const struct sx *sx, int what, const char *file, int line) 14144e5e677bSJohn Baldwin { 14154e7f640dSJohn Baldwin #ifndef WITNESS 14164e7f640dSJohn Baldwin int slocked = 0; 14174e7f640dSJohn Baldwin #endif 14184e5e677bSJohn Baldwin 141903129ba9SJohn Baldwin if (panicstr != NULL) 142003129ba9SJohn Baldwin return; 14214e5e677bSJohn Baldwin switch (what) { 14227ec137e5SJohn Baldwin case SA_SLOCKED: 14237ec137e5SJohn Baldwin case SA_SLOCKED | SA_NOTRECURSED: 14247ec137e5SJohn Baldwin case SA_SLOCKED | SA_RECURSED: 14254e7f640dSJohn Baldwin #ifndef WITNESS 14264e7f640dSJohn Baldwin slocked = 1; 14274e7f640dSJohn Baldwin /* FALLTHROUGH */ 14284e7f640dSJohn Baldwin #endif 14297ec137e5SJohn Baldwin case SA_LOCKED: 14307ec137e5SJohn Baldwin case SA_LOCKED | SA_NOTRECURSED: 14317ec137e5SJohn Baldwin case SA_LOCKED | SA_RECURSED: 14324e5e677bSJohn Baldwin #ifdef WITNESS 1433aa89d8cdSJohn Baldwin witness_assert(&sx->lock_object, what, file, line); 14344e5e677bSJohn Baldwin #else 14354e7f640dSJohn Baldwin /* 14364e7f640dSJohn Baldwin * If some other thread has an exclusive lock or we 14374e7f640dSJohn Baldwin * have one and are asserting a shared lock, fail. 14384e7f640dSJohn Baldwin * Also, if no one has a lock at all, fail. 14394e7f640dSJohn Baldwin */ 14404e7f640dSJohn Baldwin if (sx->sx_lock == SX_LOCK_UNLOCKED || 14414e7f640dSJohn Baldwin (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || 14424e7f640dSJohn Baldwin sx_xholder(sx) != curthread))) 144303129ba9SJohn Baldwin panic("Lock %s not %slocked @ %s:%d\n", 14444e7f640dSJohn Baldwin sx->lock_object.lo_name, slocked ? "share " : "", 14454e7f640dSJohn Baldwin file, line); 14464e7f640dSJohn Baldwin 14474e7f640dSJohn Baldwin if (!(sx->sx_lock & SX_LOCK_SHARED)) { 14484e7f640dSJohn Baldwin if (sx_recursed(sx)) { 14497ec137e5SJohn Baldwin if (what & SA_NOTRECURSED) 14504e7f640dSJohn Baldwin panic("Lock %s recursed @ %s:%d\n", 14514e7f640dSJohn Baldwin sx->lock_object.lo_name, file, 14524e7f640dSJohn Baldwin line); 14537ec137e5SJohn Baldwin } else if (what & SA_RECURSED) 14544e7f640dSJohn Baldwin panic("Lock %s not recursed @ %s:%d\n", 14554e7f640dSJohn Baldwin sx->lock_object.lo_name, file, line); 14564e7f640dSJohn Baldwin } 14574e5e677bSJohn Baldwin #endif 14584e5e677bSJohn Baldwin break; 14597ec137e5SJohn Baldwin case SA_XLOCKED: 14607ec137e5SJohn Baldwin case SA_XLOCKED | SA_NOTRECURSED: 14617ec137e5SJohn Baldwin case SA_XLOCKED | SA_RECURSED: 14624e7f640dSJohn Baldwin if (sx_xholder(sx) != curthread) 146303129ba9SJohn Baldwin panic("Lock %s not exclusively locked @ %s:%d\n", 1464aa89d8cdSJohn Baldwin sx->lock_object.lo_name, file, line); 14654e7f640dSJohn Baldwin if (sx_recursed(sx)) { 14667ec137e5SJohn Baldwin if (what & SA_NOTRECURSED) 14674e7f640dSJohn Baldwin panic("Lock %s recursed @ %s:%d\n", 14684e7f640dSJohn Baldwin sx->lock_object.lo_name, file, line); 14697ec137e5SJohn Baldwin } else if (what & SA_RECURSED) 14704e7f640dSJohn Baldwin panic("Lock %s not recursed @ %s:%d\n", 14714e7f640dSJohn Baldwin sx->lock_object.lo_name, file, line); 14724e5e677bSJohn Baldwin break; 14737ec137e5SJohn Baldwin case SA_UNLOCKED: 147419b0efd3SPawel Jakub Dawidek #ifdef WITNESS 1475aa89d8cdSJohn Baldwin witness_assert(&sx->lock_object, what, file, line); 147619b0efd3SPawel Jakub Dawidek #else 1477f6739b1dSPawel Jakub Dawidek /* 14784e7f640dSJohn Baldwin * If we hold an exclusve lock fail. We can't 14794e7f640dSJohn Baldwin * reliably check to see if we hold a shared lock or 14804e7f640dSJohn Baldwin * not. 1481f6739b1dSPawel Jakub Dawidek */ 14824e7f640dSJohn Baldwin if (sx_xholder(sx) == curthread) 148303129ba9SJohn Baldwin panic("Lock %s exclusively locked @ %s:%d\n", 1484aa89d8cdSJohn Baldwin sx->lock_object.lo_name, file, line); 148519b0efd3SPawel Jakub Dawidek #endif 148619b0efd3SPawel Jakub Dawidek break; 14874e5e677bSJohn Baldwin default: 14884e5e677bSJohn Baldwin panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 14894e5e677bSJohn Baldwin line); 14904e5e677bSJohn Baldwin } 14914e5e677bSJohn Baldwin } 14924e5e677bSJohn Baldwin #endif /* INVARIANT_SUPPORT */ 1493d272fe53SJohn Baldwin 1494d272fe53SJohn Baldwin #ifdef DDB 14954e7f640dSJohn Baldwin static void 1496d576deedSPawel Jakub Dawidek db_show_sx(const struct lock_object *lock) 1497d272fe53SJohn Baldwin { 1498d272fe53SJohn Baldwin struct thread *td; 1499d576deedSPawel Jakub Dawidek const struct sx *sx; 1500d272fe53SJohn Baldwin 1501d576deedSPawel Jakub Dawidek sx = (const struct sx *)lock; 1502d272fe53SJohn Baldwin 1503d272fe53SJohn Baldwin db_printf(" state: "); 15044e7f640dSJohn Baldwin if (sx->sx_lock == SX_LOCK_UNLOCKED) 15054e7f640dSJohn Baldwin db_printf("UNLOCKED\n"); 15060026c92cSJohn Baldwin else if (sx->sx_lock == SX_LOCK_DESTROYED) { 15070026c92cSJohn Baldwin db_printf("DESTROYED\n"); 15080026c92cSJohn Baldwin return; 15090026c92cSJohn Baldwin } else if (sx->sx_lock & SX_LOCK_SHARED) 15104e7f640dSJohn Baldwin db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); 15114e7f640dSJohn Baldwin else { 15124e7f640dSJohn Baldwin td = sx_xholder(sx); 1513d272fe53SJohn Baldwin db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1514431f8906SJulian Elischer td->td_tid, td->td_proc->p_pid, td->td_name); 15154e7f640dSJohn Baldwin if (sx_recursed(sx)) 15164e7f640dSJohn Baldwin db_printf(" recursed: %d\n", sx->sx_recurse); 15174e7f640dSJohn Baldwin } 15184e7f640dSJohn Baldwin 15194e7f640dSJohn Baldwin db_printf(" waiters: "); 15204e7f640dSJohn Baldwin switch(sx->sx_lock & 15214e7f640dSJohn Baldwin (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { 15224e7f640dSJohn Baldwin case SX_LOCK_SHARED_WAITERS: 15234e7f640dSJohn Baldwin db_printf("shared\n"); 15244e7f640dSJohn Baldwin break; 15254e7f640dSJohn Baldwin case SX_LOCK_EXCLUSIVE_WAITERS: 15264e7f640dSJohn Baldwin db_printf("exclusive\n"); 15274e7f640dSJohn Baldwin break; 15284e7f640dSJohn Baldwin case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: 15294e7f640dSJohn Baldwin db_printf("exclusive and shared\n"); 15304e7f640dSJohn Baldwin break; 15314e7f640dSJohn Baldwin default: 15324e7f640dSJohn Baldwin db_printf("none\n"); 15334e7f640dSJohn Baldwin } 1534d272fe53SJohn Baldwin } 1535462a7addSJohn Baldwin 1536462a7addSJohn Baldwin /* 1537462a7addSJohn Baldwin * Check to see if a thread that is blocked on a sleep queue is actually 1538462a7addSJohn Baldwin * blocked on an sx lock. If so, output some details and return true. 1539462a7addSJohn Baldwin * If the lock has an exclusive owner, return that in *ownerp. 1540462a7addSJohn Baldwin */ 1541462a7addSJohn Baldwin int 1542462a7addSJohn Baldwin sx_chain(struct thread *td, struct thread **ownerp) 1543462a7addSJohn Baldwin { 1544462a7addSJohn Baldwin struct sx *sx; 1545462a7addSJohn Baldwin 1546462a7addSJohn Baldwin /* 15474e7f640dSJohn Baldwin * Check to see if this thread is blocked on an sx lock. 15484e7f640dSJohn Baldwin * First, we check the lock class. If that is ok, then we 15494e7f640dSJohn Baldwin * compare the lock name against the wait message. 1550462a7addSJohn Baldwin */ 15514e7f640dSJohn Baldwin sx = td->td_wchan; 15524e7f640dSJohn Baldwin if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || 15534e7f640dSJohn Baldwin sx->lock_object.lo_name != td->td_wmesg) 1554462a7addSJohn Baldwin return (0); 1555462a7addSJohn Baldwin 1556462a7addSJohn Baldwin /* We think we have an sx lock, so output some details. */ 1557462a7addSJohn Baldwin db_printf("blocked on sx \"%s\" ", td->td_wmesg); 15584e7f640dSJohn Baldwin *ownerp = sx_xholder(sx); 15594e7f640dSJohn Baldwin if (sx->sx_lock & SX_LOCK_SHARED) 15604e7f640dSJohn Baldwin db_printf("SLOCK (count %ju)\n", 15614e7f640dSJohn Baldwin (uintmax_t)SX_SHARERS(sx->sx_lock)); 15624e7f640dSJohn Baldwin else 1563462a7addSJohn Baldwin db_printf("XLOCK\n"); 1564462a7addSJohn Baldwin return (1); 1565462a7addSJohn Baldwin } 1566d272fe53SJohn Baldwin #endif 1567