19454b2d8SWarner Losh /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 44e7f640dSJohn Baldwin * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 54e7f640dSJohn Baldwin * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 64e7f640dSJohn Baldwin * All rights reserved. 76281b30aSJason Evans * 86281b30aSJason Evans * Redistribution and use in source and binary forms, with or without 96281b30aSJason Evans * modification, are permitted provided that the following conditions 106281b30aSJason Evans * are met: 116281b30aSJason Evans * 1. Redistributions of source code must retain the above copyright 126281b30aSJason Evans * notice(s), this list of conditions and the following disclaimer as 136281b30aSJason Evans * the first lines of this file unmodified other than the possible 146281b30aSJason Evans * addition of one or more copyright notices. 156281b30aSJason Evans * 2. Redistributions in binary form must reproduce the above copyright 166281b30aSJason Evans * notice(s), this list of conditions and the following disclaimer in the 176281b30aSJason Evans * documentation and/or other materials provided with the distribution. 186281b30aSJason Evans * 196281b30aSJason Evans * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 206281b30aSJason Evans * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 216281b30aSJason Evans * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 226281b30aSJason Evans * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 236281b30aSJason Evans * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 246281b30aSJason Evans * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 256281b30aSJason Evans * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 266281b30aSJason Evans * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 276281b30aSJason Evans * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 286281b30aSJason Evans * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 296281b30aSJason Evans * DAMAGE. 306281b30aSJason Evans */ 316281b30aSJason Evans 326281b30aSJason Evans /* 334e7f640dSJohn Baldwin * Shared/exclusive locks. This implementation attempts to ensure 344e7f640dSJohn Baldwin * deterministic lock granting behavior, so that slocks and xlocks are 354e7f640dSJohn Baldwin * interleaved. 366281b30aSJason Evans * 376281b30aSJason Evans * Priority propagation will not generally raise the priority of lock holders, 386281b30aSJason Evans * so should not be relied upon in combination with sx locks. 396281b30aSJason Evans */ 406281b30aSJason Evans 414e7f640dSJohn Baldwin #include "opt_ddb.h" 42f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h" 43e31d0833SAttilio Rao #include "opt_no_adaptive_sx.h" 444e7f640dSJohn Baldwin 45677b542eSDavid E. O'Brien #include <sys/cdefs.h> 46677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 47677b542eSDavid E. O'Brien 486281b30aSJason Evans #include <sys/param.h> 497a7ce668SAndriy Gapon #include <sys/systm.h> 50cd2fe4e6SAttilio Rao #include <sys/kdb.h> 510453ade5SMateusz Guzik #include <sys/kernel.h> 526281b30aSJason Evans #include <sys/ktr.h> 5319284646SJohn Baldwin #include <sys/lock.h> 546281b30aSJason Evans #include <sys/mutex.h> 55d272fe53SJohn Baldwin #include <sys/proc.h> 562cba8dd3SJohn Baldwin #include <sys/sched.h> 574e7f640dSJohn Baldwin #include <sys/sleepqueue.h> 586281b30aSJason Evans #include <sys/sx.h> 591ada9041SMateusz Guzik #include <sys/smp.h> 60e31d0833SAttilio Rao #include <sys/sysctl.h> 614e7f640dSJohn Baldwin 62e31d0833SAttilio Rao #if defined(SMP) && !defined(NO_ADAPTIVE_SX) 634e7f640dSJohn Baldwin #include <machine/cpu.h> 644e7f640dSJohn Baldwin #endif 656281b30aSJason Evans 66462a7addSJohn Baldwin #ifdef DDB 67d272fe53SJohn Baldwin #include <ddb/ddb.h> 684e7f640dSJohn Baldwin #endif 69d272fe53SJohn Baldwin 701ae1c2a3SAttilio Rao #if defined(SMP) && !defined(NO_ADAPTIVE_SX) 711ae1c2a3SAttilio Rao #define ADAPTIVE_SX 724e7f640dSJohn Baldwin #endif 734e7f640dSJohn Baldwin 74f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 75f5f9340bSFabien Thomas #include <sys/pmckern.h> 76f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed); 77f5f9340bSFabien Thomas #endif 78f5f9340bSFabien Thomas 794e7f640dSJohn Baldwin /* Handy macros for sleep queues. */ 804e7f640dSJohn Baldwin #define SQ_EXCLUSIVE_QUEUE 0 814e7f640dSJohn Baldwin #define SQ_SHARED_QUEUE 1 824e7f640dSJohn Baldwin 834e7f640dSJohn Baldwin /* 844e7f640dSJohn Baldwin * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 854e7f640dSJohn Baldwin * drop Giant anytime we have to sleep or if we adaptively spin. 864e7f640dSJohn Baldwin */ 874e7f640dSJohn Baldwin #define GIANT_DECLARE \ 884e7f640dSJohn Baldwin int _giantcnt = 0; \ 894e7f640dSJohn Baldwin WITNESS_SAVE_DECL(Giant) \ 904e7f640dSJohn Baldwin 91e41d6166SMateusz Guzik #define GIANT_SAVE(work) do { \ 92fb106123SMateusz Guzik if (__predict_false(mtx_owned(&Giant))) { \ 93e41d6166SMateusz Guzik work++; \ 944e7f640dSJohn Baldwin WITNESS_SAVE(&Giant.lock_object, Giant); \ 954e7f640dSJohn Baldwin while (mtx_owned(&Giant)) { \ 964e7f640dSJohn Baldwin _giantcnt++; \ 974e7f640dSJohn Baldwin mtx_unlock(&Giant); \ 984e7f640dSJohn Baldwin } \ 994e7f640dSJohn Baldwin } \ 1004e7f640dSJohn Baldwin } while (0) 1014e7f640dSJohn Baldwin 1024e7f640dSJohn Baldwin #define GIANT_RESTORE() do { \ 1034e7f640dSJohn Baldwin if (_giantcnt > 0) { \ 1044e7f640dSJohn Baldwin mtx_assert(&Giant, MA_NOTOWNED); \ 1054e7f640dSJohn Baldwin while (_giantcnt--) \ 1064e7f640dSJohn Baldwin mtx_lock(&Giant); \ 1074e7f640dSJohn Baldwin WITNESS_RESTORE(&Giant.lock_object, Giant); \ 1084e7f640dSJohn Baldwin } \ 1094e7f640dSJohn Baldwin } while (0) 1104e7f640dSJohn Baldwin 1114e7f640dSJohn Baldwin /* 112da7d0d1eSJohn Baldwin * Returns true if an exclusive lock is recursed. It assumes 113da7d0d1eSJohn Baldwin * curthread currently has an exclusive lock. 1144e7f640dSJohn Baldwin */ 1154e7f640dSJohn Baldwin #define sx_recursed(sx) ((sx)->sx_recurse != 0) 1164e7f640dSJohn Baldwin 117d576deedSPawel Jakub Dawidek static void assert_sx(const struct lock_object *lock, int what); 1184e7f640dSJohn Baldwin #ifdef DDB 119d576deedSPawel Jakub Dawidek static void db_show_sx(const struct lock_object *lock); 120d272fe53SJohn Baldwin #endif 1217faf4d90SDavide Italiano static void lock_sx(struct lock_object *lock, uintptr_t how); 122a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 123d576deedSPawel Jakub Dawidek static int owner_sx(const struct lock_object *lock, struct thread **owner); 124a5aedd68SStacey Son #endif 1257faf4d90SDavide Italiano static uintptr_t unlock_sx(struct lock_object *lock); 126d272fe53SJohn Baldwin 12719284646SJohn Baldwin struct lock_class lock_class_sx = { 128ae8dde30SJohn Baldwin .lc_name = "sx", 129ae8dde30SJohn Baldwin .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 130f9721b43SAttilio Rao .lc_assert = assert_sx, 131d272fe53SJohn Baldwin #ifdef DDB 132ae8dde30SJohn Baldwin .lc_ddb_show = db_show_sx, 133d272fe53SJohn Baldwin #endif 1346e21afd4SJohn Baldwin .lc_lock = lock_sx, 1356e21afd4SJohn Baldwin .lc_unlock = unlock_sx, 136a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 137a5aedd68SStacey Son .lc_owner = owner_sx, 138a5aedd68SStacey Son #endif 13919284646SJohn Baldwin }; 14019284646SJohn Baldwin 141781a35dfSJohn Baldwin #ifndef INVARIANTS 142781a35dfSJohn Baldwin #define _sx_assert(sx, what, file, line) 143781a35dfSJohn Baldwin #endif 144781a35dfSJohn Baldwin 1451ae1c2a3SAttilio Rao #ifdef ADAPTIVE_SX 1462e77cad1SMateusz Guzik #ifdef SX_CUSTOM_BACKOFF 1476b8dd26eSMateusz Guzik static u_short __read_frequently asx_retries; 1486b8dd26eSMateusz Guzik static u_short __read_frequently asx_loops; 1497029da5cSPawel Biernacki static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1507029da5cSPawel Biernacki "sxlock debugging"); 1516b8dd26eSMateusz Guzik SYSCTL_U16(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); 1526b8dd26eSMateusz Guzik SYSCTL_U16(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); 1531ada9041SMateusz Guzik 154574adb65SMateusz Guzik static struct lock_delay_config __read_frequently sx_delay; 1551ada9041SMateusz Guzik 1566b8dd26eSMateusz Guzik SYSCTL_U16(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base, 1571ada9041SMateusz Guzik 0, ""); 1586b8dd26eSMateusz Guzik SYSCTL_U16(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max, 1591ada9041SMateusz Guzik 0, ""); 1601ada9041SMateusz Guzik 161e0e259a8SMateusz Guzik static void 162e0e259a8SMateusz Guzik sx_lock_delay_init(void *arg __unused) 163e0e259a8SMateusz Guzik { 164e0e259a8SMateusz Guzik 165e0e259a8SMateusz Guzik lock_delay_default_init(&sx_delay); 166e0e259a8SMateusz Guzik asx_retries = 10; 167e0e259a8SMateusz Guzik asx_loops = max(10000, sx_delay.max); 168e0e259a8SMateusz Guzik } 169e0e259a8SMateusz Guzik LOCK_DELAY_SYSINIT(sx_lock_delay_init); 1702e77cad1SMateusz Guzik #else 1712e77cad1SMateusz Guzik #define sx_delay locks_delay 1722e77cad1SMateusz Guzik #define asx_retries locks_delay_retries 1732e77cad1SMateusz Guzik #define asx_loops locks_delay_loops 1742e77cad1SMateusz Guzik #endif 1751ae1c2a3SAttilio Rao #endif 1761ae1c2a3SAttilio Rao 1776281b30aSJason Evans void 178d576deedSPawel Jakub Dawidek assert_sx(const struct lock_object *lock, int what) 179f9721b43SAttilio Rao { 180f9721b43SAttilio Rao 181d576deedSPawel Jakub Dawidek sx_assert((const struct sx *)lock, what); 182f9721b43SAttilio Rao } 183f9721b43SAttilio Rao 184f9721b43SAttilio Rao void 1857faf4d90SDavide Italiano lock_sx(struct lock_object *lock, uintptr_t how) 1866e21afd4SJohn Baldwin { 1876e21afd4SJohn Baldwin struct sx *sx; 1886e21afd4SJohn Baldwin 1896e21afd4SJohn Baldwin sx = (struct sx *)lock; 1906e21afd4SJohn Baldwin if (how) 1916e21afd4SJohn Baldwin sx_slock(sx); 192cf6b879fSDavide Italiano else 193cf6b879fSDavide Italiano sx_xlock(sx); 1946e21afd4SJohn Baldwin } 1956e21afd4SJohn Baldwin 1967faf4d90SDavide Italiano uintptr_t 1976e21afd4SJohn Baldwin unlock_sx(struct lock_object *lock) 1986e21afd4SJohn Baldwin { 1996e21afd4SJohn Baldwin struct sx *sx; 2006e21afd4SJohn Baldwin 2016e21afd4SJohn Baldwin sx = (struct sx *)lock; 2027ec137e5SJohn Baldwin sx_assert(sx, SA_LOCKED | SA_NOTRECURSED); 2036e21afd4SJohn Baldwin if (sx_xlocked(sx)) { 2046e21afd4SJohn Baldwin sx_xunlock(sx); 205cf6b879fSDavide Italiano return (0); 2066e21afd4SJohn Baldwin } else { 2076e21afd4SJohn Baldwin sx_sunlock(sx); 208cf6b879fSDavide Italiano return (1); 2096e21afd4SJohn Baldwin } 2106e21afd4SJohn Baldwin } 2116e21afd4SJohn Baldwin 212a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 213a5aedd68SStacey Son int 214d576deedSPawel Jakub Dawidek owner_sx(const struct lock_object *lock, struct thread **owner) 215a5aedd68SStacey Son { 216c365a293SMark Johnston const struct sx *sx; 217c365a293SMark Johnston uintptr_t x; 218a5aedd68SStacey Son 219c365a293SMark Johnston sx = (const struct sx *)lock; 220c365a293SMark Johnston x = sx->sx_lock; 221c365a293SMark Johnston *owner = NULL; 222a5aedd68SStacey Son return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) : 223c365a293SMark Johnston ((*owner = (struct thread *)SX_OWNER(x)) != NULL)); 224a5aedd68SStacey Son } 225a5aedd68SStacey Son #endif 226a5aedd68SStacey Son 2276e21afd4SJohn Baldwin void 228c27b5699SAndrew R. Reiter sx_sysinit(void *arg) 229c27b5699SAndrew R. Reiter { 230c27b5699SAndrew R. Reiter struct sx_args *sargs = arg; 231c27b5699SAndrew R. Reiter 232e4cd31ddSJeff Roberson sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags); 233c27b5699SAndrew R. Reiter } 234c27b5699SAndrew R. Reiter 235c27b5699SAndrew R. Reiter void 2364e7f640dSJohn Baldwin sx_init_flags(struct sx *sx, const char *description, int opts) 2376281b30aSJason Evans { 2384e7f640dSJohn Baldwin int flags; 2396281b30aSJason Evans 240b0d67325SJohn Baldwin MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | 241f26db694SMateusz Guzik SX_NOPROFILE | SX_NEW)) == 0); 242353998acSAttilio Rao ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, 243353998acSAttilio Rao ("%s: sx_lock not aligned for %s: %p", __func__, description, 244353998acSAttilio Rao &sx->sx_lock)); 245b0d67325SJohn Baldwin 246f0830182SAttilio Rao flags = LO_SLEEPABLE | LO_UPGRADABLE; 2474e7f640dSJohn Baldwin if (opts & SX_DUPOK) 2484e7f640dSJohn Baldwin flags |= LO_DUPOK; 2494e7f640dSJohn Baldwin if (opts & SX_NOPROFILE) 2504e7f640dSJohn Baldwin flags |= LO_NOPROFILE; 2514e7f640dSJohn Baldwin if (!(opts & SX_NOWITNESS)) 2524e7f640dSJohn Baldwin flags |= LO_WITNESS; 253f0830182SAttilio Rao if (opts & SX_RECURSE) 254f0830182SAttilio Rao flags |= LO_RECURSABLE; 2554e7f640dSJohn Baldwin if (opts & SX_QUIET) 2564e7f640dSJohn Baldwin flags |= LO_QUIET; 257fd07ddcfSDmitry Chagin if (opts & SX_NEW) 258fd07ddcfSDmitry Chagin flags |= LO_NEW; 2594e7f640dSJohn Baldwin 260b5fb43e5SJohn Baldwin lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); 2614e7f640dSJohn Baldwin sx->sx_lock = SX_LOCK_UNLOCKED; 2624e7f640dSJohn Baldwin sx->sx_recurse = 0; 2636281b30aSJason Evans } 2646281b30aSJason Evans 2656281b30aSJason Evans void 2666281b30aSJason Evans sx_destroy(struct sx *sx) 2676281b30aSJason Evans { 2686281b30aSJason Evans 2694e7f640dSJohn Baldwin KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 2704e7f640dSJohn Baldwin KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); 2710026c92cSJohn Baldwin sx->sx_lock = SX_LOCK_DESTROYED; 272aa89d8cdSJohn Baldwin lock_destroy(&sx->lock_object); 2736281b30aSJason Evans } 2746281b30aSJason Evans 275f9819486SAttilio Rao int 276013c0b49SMateusz Guzik sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 2775f36700aSJohn Baldwin { 2784e7f640dSJohn Baldwin uintptr_t x; 2795f36700aSJohn Baldwin 28035370593SAndriy Gapon if (SCHEDULER_STOPPED()) 28135370593SAndriy Gapon return (1); 28235370593SAndriy Gapon 283cd2fe4e6SAttilio Rao KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 284e3ae0dfeSAttilio Rao ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", 285e3ae0dfeSAttilio Rao curthread, sx->lock_object.lo_name, file, line)); 286e3ae0dfeSAttilio Rao 2874e7f640dSJohn Baldwin x = sx->sx_lock; 2885c5df0d9SMateusz Guzik for (;;) { 2890026c92cSJohn Baldwin KASSERT(x != SX_LOCK_DESTROYED, 2900026c92cSJohn Baldwin ("sx_try_slock() of destroyed sx @ %s:%d", file, line)); 291764a938bSPawel Jakub Dawidek if (!(x & SX_LOCK_SHARED)) 292764a938bSPawel Jakub Dawidek break; 2935c5df0d9SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) { 294aa89d8cdSJohn Baldwin LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); 295aa89d8cdSJohn Baldwin WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); 296de2c95ccSMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 297de2c95ccSMark Johnston sx, 0, 0, file, line, LOCKSTAT_READER); 298ce1c953eSMark Johnston TD_LOCKS_INC(curthread); 2992466d12bSMateusz Guzik curthread->td_sx_slocks++; 3005f36700aSJohn Baldwin return (1); 3015f36700aSJohn Baldwin } 302764a938bSPawel Jakub Dawidek } 3034e7f640dSJohn Baldwin 3044e7f640dSJohn Baldwin LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 3054e7f640dSJohn Baldwin return (0); 3065f36700aSJohn Baldwin } 3075f36700aSJohn Baldwin 308f9819486SAttilio Rao int 309013c0b49SMateusz Guzik sx_try_slock_(struct sx *sx, const char *file, int line) 310013c0b49SMateusz Guzik { 311013c0b49SMateusz Guzik 312013c0b49SMateusz Guzik return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG)); 313013c0b49SMateusz Guzik } 314013c0b49SMateusz Guzik 315013c0b49SMateusz Guzik int 316f9819486SAttilio Rao _sx_xlock(struct sx *sx, int opts, const char *file, int line) 3176281b30aSJason Evans { 3186ebb77b6SMateusz Guzik uintptr_t tid, x; 319f9819486SAttilio Rao int error = 0; 3206281b30aSJason Evans 321704cb42fSMark Johnston KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 322704cb42fSMark Johnston !TD_IS_IDLETHREAD(curthread), 323e3ae0dfeSAttilio Rao ("sx_xlock() by idle thread %p on sx %s @ %s:%d", 324e3ae0dfeSAttilio Rao curthread, sx->lock_object.lo_name, file, line)); 3250026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 3260026c92cSJohn Baldwin ("sx_xlock() of destroyed sx @ %s:%d", file, line)); 327aa89d8cdSJohn Baldwin WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 32841313430SJohn Baldwin line, NULL); 3296ebb77b6SMateusz Guzik tid = (uintptr_t)curthread; 3306ebb77b6SMateusz Guzik x = SX_LOCK_UNLOCKED; 3316ebb77b6SMateusz Guzik if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 332013c0b49SMateusz Guzik error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG); 3336ebb77b6SMateusz Guzik else 3346ebb77b6SMateusz Guzik LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 3356ebb77b6SMateusz Guzik 0, 0, file, line, LOCKSTAT_WRITER); 336f9819486SAttilio Rao if (!error) { 337f9819486SAttilio Rao LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, 338f9819486SAttilio Rao file, line); 339aa89d8cdSJohn Baldwin WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 340ce1c953eSMark Johnston TD_LOCKS_INC(curthread); 3416281b30aSJason Evans } 3426281b30aSJason Evans 343f9819486SAttilio Rao return (error); 344f9819486SAttilio Rao } 345f9819486SAttilio Rao 3465f36700aSJohn Baldwin int 347013c0b49SMateusz Guzik sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 3485f36700aSJohn Baldwin { 3495c5df0d9SMateusz Guzik struct thread *td; 3505c5df0d9SMateusz Guzik uintptr_t tid, x; 3514e7f640dSJohn Baldwin int rval; 3525c5df0d9SMateusz Guzik bool recursed; 3535f36700aSJohn Baldwin 3545c5df0d9SMateusz Guzik td = curthread; 3555c5df0d9SMateusz Guzik tid = (uintptr_t)td; 3565c5df0d9SMateusz Guzik if (SCHEDULER_STOPPED_TD(td)) 35735370593SAndriy Gapon return (1); 35835370593SAndriy Gapon 359704cb42fSMark Johnston KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), 360e3ae0dfeSAttilio Rao ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d", 361e3ae0dfeSAttilio Rao curthread, sx->lock_object.lo_name, file, line)); 3620026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 3630026c92cSJohn Baldwin ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); 3644e7f640dSJohn Baldwin 3655c5df0d9SMateusz Guzik rval = 1; 3665c5df0d9SMateusz Guzik recursed = false; 3675c5df0d9SMateusz Guzik x = SX_LOCK_UNLOCKED; 368b247fd39SMateusz Guzik for (;;) { 369b247fd39SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 370b247fd39SMateusz Guzik break; 371b247fd39SMateusz Guzik if (x == SX_LOCK_UNLOCKED) 372b247fd39SMateusz Guzik continue; 3735c5df0d9SMateusz Guzik if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) { 3744e7f640dSJohn Baldwin sx->sx_recurse++; 3754e7f640dSJohn Baldwin atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 376b247fd39SMateusz Guzik break; 3775c5df0d9SMateusz Guzik } 378b247fd39SMateusz Guzik rval = 0; 379b247fd39SMateusz Guzik break; 3805c5df0d9SMateusz Guzik } 3815c5df0d9SMateusz Guzik 3824e7f640dSJohn Baldwin LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); 3834e7f640dSJohn Baldwin if (rval) { 3844e7f640dSJohn Baldwin WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 3854e7f640dSJohn Baldwin file, line); 3865c5df0d9SMateusz Guzik if (!recursed) 387de2c95ccSMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 388de2c95ccSMark Johnston sx, 0, 0, file, line, LOCKSTAT_WRITER); 389ce1c953eSMark Johnston TD_LOCKS_INC(curthread); 3905f36700aSJohn Baldwin } 3914e7f640dSJohn Baldwin 3924e7f640dSJohn Baldwin return (rval); 3935f36700aSJohn Baldwin } 3945f36700aSJohn Baldwin 395013c0b49SMateusz Guzik int 396013c0b49SMateusz Guzik sx_try_xlock_(struct sx *sx, const char *file, int line) 397013c0b49SMateusz Guzik { 398013c0b49SMateusz Guzik 399013c0b49SMateusz Guzik return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG)); 400013c0b49SMateusz Guzik } 401013c0b49SMateusz Guzik 4026281b30aSJason Evans void 40319284646SJohn Baldwin _sx_xunlock(struct sx *sx, const char *file, int line) 4046281b30aSJason Evans { 4056281b30aSJason Evans 4060026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 4070026c92cSJohn Baldwin ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); 4087ec137e5SJohn Baldwin _sx_assert(sx, SA_XLOCKED, file, line); 409aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 4104e7f640dSJohn Baldwin LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, 4114e7f640dSJohn Baldwin line); 4120108a980SMateusz Guzik #if LOCK_DEBUG > 0 4136ebb77b6SMateusz Guzik _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line); 414ffd5c94cSMateusz Guzik #else 415ffd5c94cSMateusz Guzik __sx_xunlock(sx, curthread, file, line); 416ffd5c94cSMateusz Guzik #endif 417ce1c953eSMark Johnston TD_LOCKS_DEC(curthread); 4186281b30aSJason Evans } 419d55229b7SJason Evans 4204e7f640dSJohn Baldwin /* 4214e7f640dSJohn Baldwin * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 4224e7f640dSJohn Baldwin * This will only succeed if this thread holds a single shared lock. 4234e7f640dSJohn Baldwin * Return 1 if if the upgrade succeed, 0 otherwise. 4244e7f640dSJohn Baldwin */ 425d55229b7SJason Evans int 426013c0b49SMateusz Guzik sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 427d55229b7SJason Evans { 4284e7f640dSJohn Baldwin uintptr_t x; 429a8e747c5SMateusz Guzik uintptr_t waiters; 4304e7f640dSJohn Baldwin int success; 431d55229b7SJason Evans 43235370593SAndriy Gapon if (SCHEDULER_STOPPED()) 43335370593SAndriy Gapon return (1); 43435370593SAndriy Gapon 4350026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 4360026c92cSJohn Baldwin ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line)); 4377ec137e5SJohn Baldwin _sx_assert(sx, SA_SLOCKED, file, line); 438d55229b7SJason Evans 4394e7f640dSJohn Baldwin /* 4404e7f640dSJohn Baldwin * Try to switch from one shared lock to an exclusive lock. We need 4414e7f640dSJohn Baldwin * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that 4424e7f640dSJohn Baldwin * we will wake up the exclusive waiters when we drop the lock. 4434e7f640dSJohn Baldwin */ 444a8e747c5SMateusz Guzik success = 0; 445a8e747c5SMateusz Guzik x = SX_READ_VALUE(sx); 446a8e747c5SMateusz Guzik for (;;) { 447a8e747c5SMateusz Guzik if (SX_SHARERS(x) > 1) 448a8e747c5SMateusz Guzik break; 4492466d12bSMateusz Guzik waiters = (x & SX_LOCK_WAITERS); 450a8e747c5SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, 451a8e747c5SMateusz Guzik (uintptr_t)curthread | waiters)) { 452a8e747c5SMateusz Guzik success = 1; 453a8e747c5SMateusz Guzik break; 454a8e747c5SMateusz Guzik } 455a8e747c5SMateusz Guzik } 4564e7f640dSJohn Baldwin LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); 457a5aedd68SStacey Son if (success) { 4582466d12bSMateusz Guzik curthread->td_sx_slocks--; 459aa89d8cdSJohn Baldwin WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 460b0b7cb50SJohn Baldwin file, line); 46132cd0147SMark Johnston LOCKSTAT_RECORD0(sx__upgrade, sx); 462a5aedd68SStacey Son } 4634e7f640dSJohn Baldwin return (success); 464d55229b7SJason Evans } 465d55229b7SJason Evans 466013c0b49SMateusz Guzik int 467013c0b49SMateusz Guzik sx_try_upgrade_(struct sx *sx, const char *file, int line) 468013c0b49SMateusz Guzik { 469013c0b49SMateusz Guzik 470013c0b49SMateusz Guzik return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG)); 471013c0b49SMateusz Guzik } 472013c0b49SMateusz Guzik 4734e7f640dSJohn Baldwin /* 4744e7f640dSJohn Baldwin * Downgrade an unrecursed exclusive lock into a single shared lock. 4754e7f640dSJohn Baldwin */ 476d55229b7SJason Evans void 477013c0b49SMateusz Guzik sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 478d55229b7SJason Evans { 4794e7f640dSJohn Baldwin uintptr_t x; 480da7bbd2cSJohn Baldwin int wakeup_swapper; 481d55229b7SJason Evans 48235370593SAndriy Gapon if (SCHEDULER_STOPPED()) 48335370593SAndriy Gapon return; 48435370593SAndriy Gapon 4850026c92cSJohn Baldwin KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 4860026c92cSJohn Baldwin ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); 4877ec137e5SJohn Baldwin _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); 4884e7f640dSJohn Baldwin #ifndef INVARIANTS 4894e7f640dSJohn Baldwin if (sx_recursed(sx)) 4904e7f640dSJohn Baldwin panic("downgrade of a recursed lock"); 4914e7f640dSJohn Baldwin #endif 492d55229b7SJason Evans 493aa89d8cdSJohn Baldwin WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); 494d55229b7SJason Evans 4954e7f640dSJohn Baldwin /* 4964e7f640dSJohn Baldwin * Try to switch from an exclusive lock with no shared waiters 4974e7f640dSJohn Baldwin * to one sharer with no shared waiters. If there are 4984e7f640dSJohn Baldwin * exclusive waiters, we don't need to lock the sleep queue so 4994e7f640dSJohn Baldwin * long as we preserve the flag. We do one quick try and if 5004e7f640dSJohn Baldwin * that fails we grab the sleepq lock to keep the flags from 5014e7f640dSJohn Baldwin * changing and do it the slow way. 5024e7f640dSJohn Baldwin * 5034e7f640dSJohn Baldwin * We have to lock the sleep queue if there are shared waiters 5044e7f640dSJohn Baldwin * so we can wake them up. 5054e7f640dSJohn Baldwin */ 5064e7f640dSJohn Baldwin x = sx->sx_lock; 5074e7f640dSJohn Baldwin if (!(x & SX_LOCK_SHARED_WAITERS) && 5084e7f640dSJohn Baldwin atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | 50926d94f99SMark Johnston (x & SX_LOCK_EXCLUSIVE_WAITERS))) 51026d94f99SMark Johnston goto out; 5114e7f640dSJohn Baldwin 5124e7f640dSJohn Baldwin /* 5134e7f640dSJohn Baldwin * Lock the sleep queue so we can read the waiters bits 5144e7f640dSJohn Baldwin * without any races and wakeup any shared waiters. 5154e7f640dSJohn Baldwin */ 5164e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 5174e7f640dSJohn Baldwin 5184e7f640dSJohn Baldwin /* 5194e7f640dSJohn Baldwin * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single 5204e7f640dSJohn Baldwin * shared lock. If there are any shared waiters, wake them up. 5214e7f640dSJohn Baldwin */ 522da7bbd2cSJohn Baldwin wakeup_swapper = 0; 5234e7f640dSJohn Baldwin x = sx->sx_lock; 5244e7f640dSJohn Baldwin atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | 5254e7f640dSJohn Baldwin (x & SX_LOCK_EXCLUSIVE_WAITERS)); 5264e7f640dSJohn Baldwin if (x & SX_LOCK_SHARED_WAITERS) 527da7bbd2cSJohn Baldwin wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 528da7bbd2cSJohn Baldwin 0, SQ_SHARED_QUEUE); 5294e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 530d55229b7SJason Evans 531da7bbd2cSJohn Baldwin if (wakeup_swapper) 532da7bbd2cSJohn Baldwin kick_proc0(); 53326d94f99SMark Johnston 53426d94f99SMark Johnston out: 5352466d12bSMateusz Guzik curthread->td_sx_slocks++; 53626d94f99SMark Johnston LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 53726d94f99SMark Johnston LOCKSTAT_RECORD0(sx__downgrade, sx); 5384e7f640dSJohn Baldwin } 539d55229b7SJason Evans 540013c0b49SMateusz Guzik void 541013c0b49SMateusz Guzik sx_downgrade_(struct sx *sx, const char *file, int line) 542013c0b49SMateusz Guzik { 543013c0b49SMateusz Guzik 544013c0b49SMateusz Guzik sx_downgrade_int(sx LOCK_FILE_LINE_ARG); 545013c0b49SMateusz Guzik } 546013c0b49SMateusz Guzik 5472466d12bSMateusz Guzik #ifdef ADAPTIVE_SX 5482466d12bSMateusz Guzik static inline void 5492466d12bSMateusz Guzik sx_drop_critical(uintptr_t x, bool *in_critical, int *extra_work) 5502466d12bSMateusz Guzik { 5512466d12bSMateusz Guzik 5522466d12bSMateusz Guzik if (x & SX_LOCK_WRITE_SPINNER) 5532466d12bSMateusz Guzik return; 5542466d12bSMateusz Guzik if (*in_critical) { 5552466d12bSMateusz Guzik critical_exit(); 5562466d12bSMateusz Guzik *in_critical = false; 5572466d12bSMateusz Guzik (*extra_work)--; 5582466d12bSMateusz Guzik } 5592466d12bSMateusz Guzik } 5602466d12bSMateusz Guzik #else 5612466d12bSMateusz Guzik #define sx_drop_critical(x, in_critical, extra_work) do { } while(0) 5622466d12bSMateusz Guzik #endif 5632466d12bSMateusz Guzik 5644e7f640dSJohn Baldwin /* 5654e7f640dSJohn Baldwin * This function represents the so-called 'hard case' for sx_xlock 5664e7f640dSJohn Baldwin * operation. All 'easy case' failures are redirected to this. Note 5674e7f640dSJohn Baldwin * that ideally this would be a static function, but it needs to be 5684e7f640dSJohn Baldwin * accessible from at least sx.h. 5694e7f640dSJohn Baldwin */ 570f9819486SAttilio Rao int 571013c0b49SMateusz Guzik _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF) 5724e7f640dSJohn Baldwin { 5734e7f640dSJohn Baldwin GIANT_DECLARE; 5742466d12bSMateusz Guzik uintptr_t tid, setx; 5754e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 576094c148bSMateusz Guzik struct thread *owner; 577d07e22cdSMateusz Guzik u_int i, n, spintries = 0; 5781dce110fSMatt Macy enum { READERS, WRITER } sleep_reason = READERS; 5792466d12bSMateusz Guzik bool in_critical = false; 5804e7f640dSJohn Baldwin #endif 5811723a064SJeff Roberson #ifdef LOCK_PROFILING 5821723a064SJeff Roberson uint64_t waittime = 0; 5831723a064SJeff Roberson int contested = 0; 5841723a064SJeff Roberson #endif 5851723a064SJeff Roberson int error = 0; 58604126895SMateusz Guzik #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 5871ada9041SMateusz Guzik struct lock_delay_arg lda; 5881ada9041SMateusz Guzik #endif 589a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 59061852185SMateusz Guzik u_int sleep_cnt = 0; 591a5aedd68SStacey Son int64_t sleep_time = 0; 592076dd8ebSAndriy Gapon int64_t all_time = 0; 593a5aedd68SStacey Son #endif 594e41d6166SMateusz Guzik #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 5951dce110fSMatt Macy uintptr_t state = 0; 5962466d12bSMateusz Guzik int doing_lockprof = 0; 597e41d6166SMateusz Guzik #endif 598284194f1SMateusz Guzik int extra_work = 0; 5994e7f640dSJohn Baldwin 600013c0b49SMateusz Guzik tid = (uintptr_t)curthread; 60109bdec20SMateusz Guzik 60209bdec20SMateusz Guzik #ifdef KDTRACE_HOOKS 60309bdec20SMateusz Guzik if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) { 60409bdec20SMateusz Guzik while (x == SX_LOCK_UNLOCKED) { 60509bdec20SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 60609bdec20SMateusz Guzik goto out_lockstat; 60709bdec20SMateusz Guzik } 60809bdec20SMateusz Guzik extra_work = 1; 6092466d12bSMateusz Guzik doing_lockprof = 1; 61009bdec20SMateusz Guzik all_time -= lockstat_nsecs(&sx->lock_object); 61109bdec20SMateusz Guzik state = x; 61209bdec20SMateusz Guzik } 61309bdec20SMateusz Guzik #endif 61409bdec20SMateusz Guzik #ifdef LOCK_PROFILING 61509bdec20SMateusz Guzik extra_work = 1; 6162466d12bSMateusz Guzik doing_lockprof = 1; 61709bdec20SMateusz Guzik state = x; 61809bdec20SMateusz Guzik #endif 61909bdec20SMateusz Guzik 62035370593SAndriy Gapon if (SCHEDULER_STOPPED()) 62135370593SAndriy Gapon return (0); 62235370593SAndriy Gapon 623c1aaf63cSMateusz Guzik if (__predict_false(x == SX_LOCK_UNLOCKED)) 624c1aaf63cSMateusz Guzik x = SX_READ_VALUE(sx); 625c1aaf63cSMateusz Guzik 6264e7f640dSJohn Baldwin /* If we already hold an exclusive lock, then recurse. */ 627c5f61e6fSMateusz Guzik if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) { 628f0830182SAttilio Rao KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, 629b0d67325SJohn Baldwin ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", 630b0d67325SJohn Baldwin sx->lock_object.lo_name, file, line)); 6314e7f640dSJohn Baldwin sx->sx_recurse++; 6324e7f640dSJohn Baldwin atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 6334e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 6344e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 635f9819486SAttilio Rao return (0); 6364e7f640dSJohn Baldwin } 6374e7f640dSJohn Baldwin 6384e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 6394e7f640dSJohn Baldwin CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 6404e7f640dSJohn Baldwin sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 6414e7f640dSJohn Baldwin 642*f90d57b8SMateusz Guzik #if defined(ADAPTIVE_SX) 643*f90d57b8SMateusz Guzik lock_delay_arg_init(&lda, &sx_delay); 644*f90d57b8SMateusz Guzik #elif defined(KDTRACE_HOOKS) 645*f90d57b8SMateusz Guzik lock_delay_arg_init_noadapt(&lda); 646*f90d57b8SMateusz Guzik #endif 647*f90d57b8SMateusz Guzik 648ae7d25a4SMateusz Guzik #ifdef HWPMC_HOOKS 649ae7d25a4SMateusz Guzik PMC_SOFT_CALL( , , lock, failed); 650ae7d25a4SMateusz Guzik #endif 651ae7d25a4SMateusz Guzik lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 652ae7d25a4SMateusz Guzik &waittime); 653ae7d25a4SMateusz Guzik 654fb106123SMateusz Guzik #ifndef INVARIANTS 655fb106123SMateusz Guzik GIANT_SAVE(extra_work); 656fb106123SMateusz Guzik #endif 657e41d6166SMateusz Guzik 658fc4f686dSMateusz Guzik for (;;) { 659c5f61e6fSMateusz Guzik if (x == SX_LOCK_UNLOCKED) { 660fa474043SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 661fc4f686dSMateusz Guzik break; 662c5f61e6fSMateusz Guzik continue; 663c5f61e6fSMateusz Guzik } 664fb106123SMateusz Guzik #ifdef INVARIANTS 665fb106123SMateusz Guzik GIANT_SAVE(extra_work); 666fb106123SMateusz Guzik #endif 667a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 6681ada9041SMateusz Guzik lda.spin_cnt++; 669a5aedd68SStacey Son #endif 6704e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 671befd3e35SMateusz Guzik if (x == (SX_LOCK_SHARED | SX_LOCK_WRITE_SPINNER)) { 672befd3e35SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 673befd3e35SMateusz Guzik break; 674befd3e35SMateusz Guzik continue; 675befd3e35SMateusz Guzik } 676befd3e35SMateusz Guzik 6774e7f640dSJohn Baldwin /* 6784e7f640dSJohn Baldwin * If the lock is write locked and the owner is 6794e7f640dSJohn Baldwin * running on another CPU, spin until the owner stops 6804e7f640dSJohn Baldwin * running or the state of the lock changes. 6814e7f640dSJohn Baldwin */ 6821ae1c2a3SAttilio Rao if ((x & SX_LOCK_SHARED) == 0) { 6832466d12bSMateusz Guzik sx_drop_critical(x, &in_critical, &extra_work); 684d94df98cSMateusz Guzik sleep_reason = WRITER; 685c5f61e6fSMateusz Guzik owner = lv_sx_owner(x); 686d94df98cSMateusz Guzik if (!TD_IS_RUNNING(owner)) 687d94df98cSMateusz Guzik goto sleepq; 6884e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 689d94df98cSMateusz Guzik CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 6904e7f640dSJohn Baldwin __func__, sx, owner); 691d94df98cSMateusz Guzik KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 692d94df98cSMateusz Guzik "spinning", "lockname:\"%s\"", 6932cba8dd3SJohn Baldwin sx->lock_object.lo_name); 694c5f61e6fSMateusz Guzik do { 6951ada9041SMateusz Guzik lock_delay(&lda); 696c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 697c5f61e6fSMateusz Guzik owner = lv_sx_owner(x); 698d94df98cSMateusz Guzik } while (owner != NULL && TD_IS_RUNNING(owner)); 699d94df98cSMateusz Guzik KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 700d94df98cSMateusz Guzik "running"); 7014e7f640dSJohn Baldwin continue; 702d94df98cSMateusz Guzik } else if (SX_SHARERS(x) > 0) { 703d94df98cSMateusz Guzik sleep_reason = READERS; 704d94df98cSMateusz Guzik if (spintries == asx_retries) 705d94df98cSMateusz Guzik goto sleepq; 7062466d12bSMateusz Guzik if (!(x & SX_LOCK_WRITE_SPINNER)) { 7072466d12bSMateusz Guzik if (!in_critical) { 7082466d12bSMateusz Guzik critical_enter(); 7092466d12bSMateusz Guzik in_critical = true; 7102466d12bSMateusz Guzik extra_work++; 7112466d12bSMateusz Guzik } 7122466d12bSMateusz Guzik if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 7132466d12bSMateusz Guzik x | SX_LOCK_WRITE_SPINNER)) { 7142466d12bSMateusz Guzik critical_exit(); 7152466d12bSMateusz Guzik in_critical = false; 7162466d12bSMateusz Guzik extra_work--; 7172466d12bSMateusz Guzik continue; 7182466d12bSMateusz Guzik } 7192466d12bSMateusz Guzik } 7201ae1c2a3SAttilio Rao spintries++; 721d94df98cSMateusz Guzik KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 722d94df98cSMateusz Guzik "spinning", "lockname:\"%s\"", 723d94df98cSMateusz Guzik sx->lock_object.lo_name); 724d07e22cdSMateusz Guzik n = SX_SHARERS(x); 7252466d12bSMateusz Guzik for (i = 0; i < asx_loops; i += n) { 726d07e22cdSMateusz Guzik lock_delay_spin(n); 72720a15d17SMateusz Guzik x = SX_READ_VALUE(sx); 7282466d12bSMateusz Guzik if (!(x & SX_LOCK_WRITE_SPINNER)) 7292466d12bSMateusz Guzik break; 7302466d12bSMateusz Guzik if (!(x & SX_LOCK_SHARED)) 7312466d12bSMateusz Guzik break; 7322466d12bSMateusz Guzik n = SX_SHARERS(x); 7332466d12bSMateusz Guzik if (n == 0) 7341ae1c2a3SAttilio Rao break; 7351ae1c2a3SAttilio Rao } 73620a15d17SMateusz Guzik #ifdef KDTRACE_HOOKS 73720a15d17SMateusz Guzik lda.spin_cnt += i; 73820a15d17SMateusz Guzik #endif 739d94df98cSMateusz Guzik KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 740d94df98cSMateusz Guzik "running"); 741efa9f177SMateusz Guzik if (i < asx_loops) 7421ae1c2a3SAttilio Rao continue; 7431ae1c2a3SAttilio Rao } 744fb106123SMateusz Guzik sleepq: 745cde25ed4SMateusz Guzik #endif 7464e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 747c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 74893118b62SMateusz Guzik retry_sleepq: 7494e7f640dSJohn Baldwin 7504e7f640dSJohn Baldwin /* 7514e7f640dSJohn Baldwin * If the lock was released while spinning on the 7524e7f640dSJohn Baldwin * sleep queue chain lock, try again. 7534e7f640dSJohn Baldwin */ 7544e7f640dSJohn Baldwin if (x == SX_LOCK_UNLOCKED) { 7554e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 7562466d12bSMateusz Guzik sx_drop_critical(x, &in_critical, &extra_work); 7574e7f640dSJohn Baldwin continue; 7584e7f640dSJohn Baldwin } 7594e7f640dSJohn Baldwin 7604e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 7614e7f640dSJohn Baldwin /* 7624e7f640dSJohn Baldwin * The current lock owner might have started executing 7634e7f640dSJohn Baldwin * on another CPU (or the lock could have changed 7644e7f640dSJohn Baldwin * owners) while we were waiting on the sleep queue 7654e7f640dSJohn Baldwin * chain lock. If so, drop the sleep queue lock and try 7664e7f640dSJohn Baldwin * again. 7674e7f640dSJohn Baldwin */ 76828f1a9e3SMateusz Guzik if (!(x & SX_LOCK_SHARED)) { 7694e7f640dSJohn Baldwin owner = (struct thread *)SX_OWNER(x); 7704e7f640dSJohn Baldwin if (TD_IS_RUNNING(owner)) { 7714e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 7722466d12bSMateusz Guzik sx_drop_critical(x, &in_critical, 7732466d12bSMateusz Guzik &extra_work); 7744e7f640dSJohn Baldwin continue; 7754e7f640dSJohn Baldwin } 776d94df98cSMateusz Guzik } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) { 77728f1a9e3SMateusz Guzik sleepq_release(&sx->lock_object); 7782466d12bSMateusz Guzik sx_drop_critical(x, &in_critical, &extra_work); 77928f1a9e3SMateusz Guzik continue; 78028f1a9e3SMateusz Guzik } 7814e7f640dSJohn Baldwin #endif 7824e7f640dSJohn Baldwin 7834e7f640dSJohn Baldwin /* 7844e7f640dSJohn Baldwin * If an exclusive lock was released with both shared 7854e7f640dSJohn Baldwin * and exclusive waiters and a shared waiter hasn't 7864e7f640dSJohn Baldwin * woken up and acquired the lock yet, sx_lock will be 7874e7f640dSJohn Baldwin * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 7884e7f640dSJohn Baldwin * If we see that value, try to acquire it once. Note 7894e7f640dSJohn Baldwin * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 7904e7f640dSJohn Baldwin * as there are other exclusive waiters still. If we 7914e7f640dSJohn Baldwin * fail, restart the loop. 7924e7f640dSJohn Baldwin */ 7932466d12bSMateusz Guzik setx = x & (SX_LOCK_WAITERS | SX_LOCK_WRITE_SPINNER); 7942466d12bSMateusz Guzik if ((x & ~setx) == SX_LOCK_SHARED) { 7952466d12bSMateusz Guzik setx &= ~SX_LOCK_WRITE_SPINNER; 7962466d12bSMateusz Guzik if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx)) 79793118b62SMateusz Guzik goto retry_sleepq; 7984e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 7994e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p claimed by new writer", 8004e7f640dSJohn Baldwin __func__, sx); 8014e7f640dSJohn Baldwin break; 8024e7f640dSJohn Baldwin } 8034e7f640dSJohn Baldwin 8042466d12bSMateusz Guzik #ifdef ADAPTIVE_SX 8052466d12bSMateusz Guzik /* 8062466d12bSMateusz Guzik * It is possible we set the SX_LOCK_WRITE_SPINNER bit. 8072466d12bSMateusz Guzik * It is an invariant that when the bit is set, there is 8082466d12bSMateusz Guzik * a writer ready to grab the lock. Thus clear the bit since 8092466d12bSMateusz Guzik * we are going to sleep. 8102466d12bSMateusz Guzik */ 8112466d12bSMateusz Guzik if (in_critical) { 8122466d12bSMateusz Guzik if ((x & SX_LOCK_WRITE_SPINNER) || 8132466d12bSMateusz Guzik !((x & SX_LOCK_EXCLUSIVE_WAITERS))) { 8142466d12bSMateusz Guzik setx = x & ~SX_LOCK_WRITE_SPINNER; 8152466d12bSMateusz Guzik setx |= SX_LOCK_EXCLUSIVE_WAITERS; 8162466d12bSMateusz Guzik if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 8172466d12bSMateusz Guzik setx)) { 8182466d12bSMateusz Guzik goto retry_sleepq; 8192466d12bSMateusz Guzik } 8202466d12bSMateusz Guzik } 8212466d12bSMateusz Guzik critical_exit(); 8222466d12bSMateusz Guzik in_critical = false; 8232466d12bSMateusz Guzik } else { 8242466d12bSMateusz Guzik #endif 8254e7f640dSJohn Baldwin /* 8264e7f640dSJohn Baldwin * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 8274e7f640dSJohn Baldwin * than loop back and retry. 8284e7f640dSJohn Baldwin */ 8294e7f640dSJohn Baldwin if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 83093118b62SMateusz Guzik if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 8314e7f640dSJohn Baldwin x | SX_LOCK_EXCLUSIVE_WAITERS)) { 83293118b62SMateusz Guzik goto retry_sleepq; 8334e7f640dSJohn Baldwin } 8344e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 8354e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 8364e7f640dSJohn Baldwin __func__, sx); 8374e7f640dSJohn Baldwin } 8382466d12bSMateusz Guzik #ifdef ADAPTIVE_SX 8392466d12bSMateusz Guzik } 8402466d12bSMateusz Guzik #endif 8414e7f640dSJohn Baldwin 8424e7f640dSJohn Baldwin /* 8434e7f640dSJohn Baldwin * Since we have been unable to acquire the exclusive 8444e7f640dSJohn Baldwin * lock and the exclusive waiters flag is set, we have 8454e7f640dSJohn Baldwin * to sleep. 8464e7f640dSJohn Baldwin */ 8474e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 8484e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 8494e7f640dSJohn Baldwin __func__, sx); 8504e7f640dSJohn Baldwin 851a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 852e2b25737SMark Johnston sleep_time -= lockstat_nsecs(&sx->lock_object); 853a5aedd68SStacey Son #endif 8544e7f640dSJohn Baldwin sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 855f9819486SAttilio Rao SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 856f9819486SAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); 857f9819486SAttilio Rao if (!(opts & SX_INTERRUPTIBLE)) 858c5aa6b58SJeff Roberson sleepq_wait(&sx->lock_object, 0); 859f9819486SAttilio Rao else 860c5aa6b58SJeff Roberson error = sleepq_wait_sig(&sx->lock_object, 0); 861a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 862e2b25737SMark Johnston sleep_time += lockstat_nsecs(&sx->lock_object); 863a5aedd68SStacey Son sleep_cnt++; 864a5aedd68SStacey Son #endif 865f9819486SAttilio Rao if (error) { 866f9819486SAttilio Rao if (LOCK_LOG_TEST(&sx->lock_object, 0)) 867f9819486SAttilio Rao CTR2(KTR_LOCK, 868f9819486SAttilio Rao "%s: interruptible sleep by %p suspended by signal", 869f9819486SAttilio Rao __func__, sx); 870f9819486SAttilio Rao break; 871f9819486SAttilio Rao } 8724e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 8734e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 8744e7f640dSJohn Baldwin __func__, sx); 875c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 8764e7f640dSJohn Baldwin } 877e41d6166SMateusz Guzik if (__predict_true(!extra_work)) 878e41d6166SMateusz Guzik return (error); 8792466d12bSMateusz Guzik #ifdef ADAPTIVE_SX 8802466d12bSMateusz Guzik if (in_critical) 8812466d12bSMateusz Guzik critical_exit(); 8822466d12bSMateusz Guzik #endif 883ee252fc9SMateusz Guzik GIANT_RESTORE(); 8842466d12bSMateusz Guzik #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 8852466d12bSMateusz Guzik if (__predict_true(!doing_lockprof)) 8862466d12bSMateusz Guzik return (error); 887e41d6166SMateusz Guzik #endif 888076dd8ebSAndriy Gapon #ifdef KDTRACE_HOOKS 889e2b25737SMark Johnston all_time += lockstat_nsecs(&sx->lock_object); 890076dd8ebSAndriy Gapon if (sleep_time) 89132cd0147SMark Johnston LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 892076dd8ebSAndriy Gapon LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 893076dd8ebSAndriy Gapon (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 8941ada9041SMateusz Guzik if (lda.spin_cnt > sleep_cnt) 89532cd0147SMark Johnston LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 896076dd8ebSAndriy Gapon LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 897076dd8ebSAndriy Gapon (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 89809bdec20SMateusz Guzik out_lockstat: 899076dd8ebSAndriy Gapon #endif 900f9819486SAttilio Rao if (!error) 901de2c95ccSMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 902de2c95ccSMark Johnston contested, waittime, file, line, LOCKSTAT_WRITER); 903f9819486SAttilio Rao return (error); 9044e7f640dSJohn Baldwin } 9054e7f640dSJohn Baldwin 9064e7f640dSJohn Baldwin /* 9074e7f640dSJohn Baldwin * This function represents the so-called 'hard case' for sx_xunlock 9084e7f640dSJohn Baldwin * operation. All 'easy case' failures are redirected to this. Note 9094e7f640dSJohn Baldwin * that ideally this would be a static function, but it needs to be 9104e7f640dSJohn Baldwin * accessible from at least sx.h. 9114e7f640dSJohn Baldwin */ 9124e7f640dSJohn Baldwin void 913b584eb2eSMateusz Guzik _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) 9144e7f640dSJohn Baldwin { 915b584eb2eSMateusz Guzik uintptr_t tid, setx; 916da7bbd2cSJohn Baldwin int queue, wakeup_swapper; 9174e7f640dSJohn Baldwin 91835370593SAndriy Gapon if (SCHEDULER_STOPPED()) 91935370593SAndriy Gapon return; 92035370593SAndriy Gapon 921b584eb2eSMateusz Guzik tid = (uintptr_t)curthread; 9224e7f640dSJohn Baldwin 923b584eb2eSMateusz Guzik if (__predict_false(x == tid)) 9243b3cf014SMateusz Guzik x = SX_READ_VALUE(sx); 925b584eb2eSMateusz Guzik 926b584eb2eSMateusz Guzik MPASS(!(x & SX_LOCK_SHARED)); 927b584eb2eSMateusz Guzik 928b584eb2eSMateusz Guzik if (__predict_false(x & SX_LOCK_RECURSED)) { 9296ebb77b6SMateusz Guzik /* The lock is recursed, unrecurse one level. */ 9304e7f640dSJohn Baldwin if ((--sx->sx_recurse) == 0) 9314e7f640dSJohn Baldwin atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 9324e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 9334e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 9344e7f640dSJohn Baldwin return; 9354e7f640dSJohn Baldwin } 9363b3cf014SMateusz Guzik 9373b3cf014SMateusz Guzik LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); 9383b3cf014SMateusz Guzik if (x == tid && 9393b3cf014SMateusz Guzik atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) 9403b3cf014SMateusz Guzik return; 9413b3cf014SMateusz Guzik 9424e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 9434e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 9444e7f640dSJohn Baldwin 9454e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 946bc24577cSMateusz Guzik x = SX_READ_VALUE(sx); 9472d96bd88SMateusz Guzik MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)); 9484e7f640dSJohn Baldwin 9494e7f640dSJohn Baldwin /* 9504e7f640dSJohn Baldwin * The wake up algorithm here is quite simple and probably not 9514e7f640dSJohn Baldwin * ideal. It gives precedence to shared waiters if they are 9524e7f640dSJohn Baldwin * present. For this condition, we have to preserve the 9534e7f640dSJohn Baldwin * state of the exclusive waiters flag. 9542028867dSAttilio Rao * If interruptible sleeps left the shared queue empty avoid a 9552028867dSAttilio Rao * starvation for the threads sleeping on the exclusive queue by giving 9562028867dSAttilio Rao * them precedence and cleaning up the shared waiters bit anyway. 9574e7f640dSJohn Baldwin */ 958bc24577cSMateusz Guzik setx = SX_LOCK_UNLOCKED; 9594e7f640dSJohn Baldwin queue = SQ_SHARED_QUEUE; 9602466d12bSMateusz Guzik if ((x & SX_LOCK_EXCLUSIVE_WAITERS) != 0 && 9612466d12bSMateusz Guzik sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) { 9622466d12bSMateusz Guzik queue = SQ_EXCLUSIVE_QUEUE; 9632466d12bSMateusz Guzik setx |= (x & SX_LOCK_SHARED_WAITERS); 964bc24577cSMateusz Guzik } 965bc24577cSMateusz Guzik atomic_store_rel_ptr(&sx->sx_lock, setx); 9664e7f640dSJohn Baldwin 9674e7f640dSJohn Baldwin /* Wake up all the waiters for the specific queue. */ 9684e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 9694e7f640dSJohn Baldwin CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 9704e7f640dSJohn Baldwin __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 9714e7f640dSJohn Baldwin "exclusive"); 972bc24577cSMateusz Guzik 973da7bbd2cSJohn Baldwin wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, 974da7bbd2cSJohn Baldwin queue); 975c5aa6b58SJeff Roberson sleepq_release(&sx->lock_object); 976da7bbd2cSJohn Baldwin if (wakeup_swapper) 977da7bbd2cSJohn Baldwin kick_proc0(); 9784e7f640dSJohn Baldwin } 9794e7f640dSJohn Baldwin 980834f70f3SMateusz Guzik static bool __always_inline 9812466d12bSMateusz Guzik __sx_can_read(struct thread *td, uintptr_t x, bool fp) 9822466d12bSMateusz Guzik { 9832466d12bSMateusz Guzik 9842466d12bSMateusz Guzik if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER)) 9852466d12bSMateusz Guzik == SX_LOCK_SHARED) 9862466d12bSMateusz Guzik return (true); 9872466d12bSMateusz Guzik if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED)) 9882466d12bSMateusz Guzik return (true); 9892466d12bSMateusz Guzik return (false); 9902466d12bSMateusz Guzik } 9912466d12bSMateusz Guzik 9922466d12bSMateusz Guzik static bool __always_inline 9932466d12bSMateusz Guzik __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp 9942466d12bSMateusz Guzik LOCK_FILE_LINE_ARG_DEF) 995834f70f3SMateusz Guzik { 996834f70f3SMateusz Guzik 997834f70f3SMateusz Guzik /* 998834f70f3SMateusz Guzik * If no other thread has an exclusive lock then try to bump up 999834f70f3SMateusz Guzik * the count of sharers. Since we have to preserve the state 1000834f70f3SMateusz Guzik * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 1001834f70f3SMateusz Guzik * shared lock loop back and retry. 1002834f70f3SMateusz Guzik */ 10032466d12bSMateusz Guzik while (__sx_can_read(td, *xp, fp)) { 1004834f70f3SMateusz Guzik if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp, 1005834f70f3SMateusz Guzik *xp + SX_ONE_SHARER)) { 1006834f70f3SMateusz Guzik if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1007834f70f3SMateusz Guzik CTR4(KTR_LOCK, "%s: %p succeed %p -> %p", 1008834f70f3SMateusz Guzik __func__, sx, (void *)*xp, 1009834f70f3SMateusz Guzik (void *)(*xp + SX_ONE_SHARER)); 10102466d12bSMateusz Guzik td->td_sx_slocks++; 1011834f70f3SMateusz Guzik return (true); 1012834f70f3SMateusz Guzik } 1013834f70f3SMateusz Guzik } 1014834f70f3SMateusz Guzik return (false); 1015834f70f3SMateusz Guzik } 1016834f70f3SMateusz Guzik 1017834f70f3SMateusz Guzik static int __noinline 1018013c0b49SMateusz Guzik _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF) 10194e7f640dSJohn Baldwin { 10204e7f640dSJohn Baldwin GIANT_DECLARE; 10212466d12bSMateusz Guzik struct thread *td; 10224e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 1023094c148bSMateusz Guzik struct thread *owner; 10242466d12bSMateusz Guzik u_int i, n, spintries = 0; 10254e7f640dSJohn Baldwin #endif 10261723a064SJeff Roberson #ifdef LOCK_PROFILING 1027c1a6d9faSAttilio Rao uint64_t waittime = 0; 1028c1a6d9faSAttilio Rao int contested = 0; 10291723a064SJeff Roberson #endif 1030c1a6d9faSAttilio Rao int error = 0; 103104126895SMateusz Guzik #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 10321ada9041SMateusz Guzik struct lock_delay_arg lda; 10331ada9041SMateusz Guzik #endif 1034a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 103561852185SMateusz Guzik u_int sleep_cnt = 0; 1036a5aedd68SStacey Son int64_t sleep_time = 0; 1037076dd8ebSAndriy Gapon int64_t all_time = 0; 1038a5aedd68SStacey Son #endif 1039e41d6166SMateusz Guzik #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 10401dce110fSMatt Macy uintptr_t state = 0; 1041e41d6166SMateusz Guzik #endif 1042284194f1SMateusz Guzik int extra_work = 0; 1043c1a6d9faSAttilio Rao 10442466d12bSMateusz Guzik td = curthread; 10452466d12bSMateusz Guzik 104609bdec20SMateusz Guzik #ifdef KDTRACE_HOOKS 104709bdec20SMateusz Guzik if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) { 10482466d12bSMateusz Guzik if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG)) 104909bdec20SMateusz Guzik goto out_lockstat; 105009bdec20SMateusz Guzik extra_work = 1; 105109bdec20SMateusz Guzik all_time -= lockstat_nsecs(&sx->lock_object); 105209bdec20SMateusz Guzik state = x; 105309bdec20SMateusz Guzik } 105409bdec20SMateusz Guzik #endif 105509bdec20SMateusz Guzik #ifdef LOCK_PROFILING 105609bdec20SMateusz Guzik extra_work = 1; 105709bdec20SMateusz Guzik state = x; 105809bdec20SMateusz Guzik #endif 105909bdec20SMateusz Guzik 106035370593SAndriy Gapon if (SCHEDULER_STOPPED()) 106135370593SAndriy Gapon return (0); 106235370593SAndriy Gapon 1063fa5000a4SMateusz Guzik #if defined(ADAPTIVE_SX) 10641ada9041SMateusz Guzik lock_delay_arg_init(&lda, &sx_delay); 1065fa5000a4SMateusz Guzik #elif defined(KDTRACE_HOOKS) 1066c795344fSMateusz Guzik lock_delay_arg_init_noadapt(&lda); 10671ada9041SMateusz Guzik #endif 1068e41d6166SMateusz Guzik 1069ae7d25a4SMateusz Guzik #ifdef HWPMC_HOOKS 1070ae7d25a4SMateusz Guzik PMC_SOFT_CALL( , , lock, failed); 1071ae7d25a4SMateusz Guzik #endif 1072ae7d25a4SMateusz Guzik lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 1073ae7d25a4SMateusz Guzik &waittime); 1074ae7d25a4SMateusz Guzik 1075fb106123SMateusz Guzik #ifndef INVARIANTS 1076fb106123SMateusz Guzik GIANT_SAVE(extra_work); 1077fb106123SMateusz Guzik #endif 1078076dd8ebSAndriy Gapon 10794e7f640dSJohn Baldwin /* 10804e7f640dSJohn Baldwin * As with rwlocks, we don't make any attempt to try to block 10814e7f640dSJohn Baldwin * shared locks once there is an exclusive waiter. 10824e7f640dSJohn Baldwin */ 10834e7f640dSJohn Baldwin for (;;) { 10842466d12bSMateusz Guzik if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG)) 10854e7f640dSJohn Baldwin break; 1086fb106123SMateusz Guzik #ifdef INVARIANTS 1087fb106123SMateusz Guzik GIANT_SAVE(extra_work); 1088fb106123SMateusz Guzik #endif 1089c5f61e6fSMateusz Guzik #ifdef KDTRACE_HOOKS 1090c5f61e6fSMateusz Guzik lda.spin_cnt++; 1091c5f61e6fSMateusz Guzik #endif 1092c5f61e6fSMateusz Guzik 10934e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 10944e7f640dSJohn Baldwin /* 10954e7f640dSJohn Baldwin * If the owner is running on another CPU, spin until 10964e7f640dSJohn Baldwin * the owner stops running or the state of the lock 10974e7f640dSJohn Baldwin * changes. 10984e7f640dSJohn Baldwin */ 10992466d12bSMateusz Guzik if ((x & SX_LOCK_SHARED) == 0) { 1100c5f61e6fSMateusz Guzik owner = lv_sx_owner(x); 11014e7f640dSJohn Baldwin if (TD_IS_RUNNING(owner)) { 11024e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 11034e7f640dSJohn Baldwin CTR3(KTR_LOCK, 11044e7f640dSJohn Baldwin "%s: spinning on %p held by %p", 11054e7f640dSJohn Baldwin __func__, sx, owner); 11062cba8dd3SJohn Baldwin KTR_STATE1(KTR_SCHED, "thread", 11072cba8dd3SJohn Baldwin sched_tdname(curthread), "spinning", 11082cba8dd3SJohn Baldwin "lockname:\"%s\"", sx->lock_object.lo_name); 1109c5f61e6fSMateusz Guzik do { 11101ada9041SMateusz Guzik lock_delay(&lda); 1111c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 1112c5f61e6fSMateusz Guzik owner = lv_sx_owner(x); 1113c5f61e6fSMateusz Guzik } while (owner != NULL && TD_IS_RUNNING(owner)); 11142cba8dd3SJohn Baldwin KTR_STATE0(KTR_SCHED, "thread", 11152cba8dd3SJohn Baldwin sched_tdname(curthread), "running"); 11164e7f640dSJohn Baldwin continue; 11174e7f640dSJohn Baldwin } 11182466d12bSMateusz Guzik } else { 11192466d12bSMateusz Guzik if ((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) { 11202466d12bSMateusz Guzik MPASS(!__sx_can_read(td, x, false)); 11212466d12bSMateusz Guzik lock_delay_spin(2); 11222466d12bSMateusz Guzik x = SX_READ_VALUE(sx); 11232466d12bSMateusz Guzik continue; 11242466d12bSMateusz Guzik } 11252466d12bSMateusz Guzik if (spintries < asx_retries) { 11262466d12bSMateusz Guzik KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 11272466d12bSMateusz Guzik "spinning", "lockname:\"%s\"", 11282466d12bSMateusz Guzik sx->lock_object.lo_name); 11292466d12bSMateusz Guzik n = SX_SHARERS(x); 11302466d12bSMateusz Guzik for (i = 0; i < asx_loops; i += n) { 11312466d12bSMateusz Guzik lock_delay_spin(n); 11322466d12bSMateusz Guzik x = SX_READ_VALUE(sx); 11332466d12bSMateusz Guzik if (!(x & SX_LOCK_SHARED)) 11342466d12bSMateusz Guzik break; 11352466d12bSMateusz Guzik n = SX_SHARERS(x); 11362466d12bSMateusz Guzik if (n == 0) 11372466d12bSMateusz Guzik break; 11382466d12bSMateusz Guzik if (__sx_can_read(td, x, false)) 11392466d12bSMateusz Guzik break; 11402466d12bSMateusz Guzik } 11412466d12bSMateusz Guzik #ifdef KDTRACE_HOOKS 11422466d12bSMateusz Guzik lda.spin_cnt += i; 11432466d12bSMateusz Guzik #endif 11442466d12bSMateusz Guzik KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 11452466d12bSMateusz Guzik "running"); 11462466d12bSMateusz Guzik if (i < asx_loops) 11472466d12bSMateusz Guzik continue; 11482466d12bSMateusz Guzik } 11492466d12bSMateusz Guzik } 11504e7f640dSJohn Baldwin #endif 11514e7f640dSJohn Baldwin 11524e7f640dSJohn Baldwin /* 11534e7f640dSJohn Baldwin * Some other thread already has an exclusive lock, so 11544e7f640dSJohn Baldwin * start the process of blocking. 11554e7f640dSJohn Baldwin */ 11564e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 1157c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 115893118b62SMateusz Guzik retry_sleepq: 11592466d12bSMateusz Guzik if (((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) || 11602466d12bSMateusz Guzik __sx_can_read(td, x, false)) { 11614e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 11624e7f640dSJohn Baldwin continue; 11634e7f640dSJohn Baldwin } 11644e7f640dSJohn Baldwin 11654e7f640dSJohn Baldwin #ifdef ADAPTIVE_SX 11664e7f640dSJohn Baldwin /* 11674e7f640dSJohn Baldwin * If the owner is running on another CPU, spin until 11684e7f640dSJohn Baldwin * the owner stops running or the state of the lock 11694e7f640dSJohn Baldwin * changes. 11704e7f640dSJohn Baldwin */ 1171f26db694SMateusz Guzik if (!(x & SX_LOCK_SHARED)) { 11724e7f640dSJohn Baldwin owner = (struct thread *)SX_OWNER(x); 11734e7f640dSJohn Baldwin if (TD_IS_RUNNING(owner)) { 11744e7f640dSJohn Baldwin sleepq_release(&sx->lock_object); 1175c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 11764e7f640dSJohn Baldwin continue; 11774e7f640dSJohn Baldwin } 11784e7f640dSJohn Baldwin } 11794e7f640dSJohn Baldwin #endif 11804e7f640dSJohn Baldwin 11814e7f640dSJohn Baldwin /* 11824e7f640dSJohn Baldwin * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 11834e7f640dSJohn Baldwin * fail to set it drop the sleep queue lock and loop 11844e7f640dSJohn Baldwin * back. 11854e7f640dSJohn Baldwin */ 11864e7f640dSJohn Baldwin if (!(x & SX_LOCK_SHARED_WAITERS)) { 118793118b62SMateusz Guzik if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 118893118b62SMateusz Guzik x | SX_LOCK_SHARED_WAITERS)) 118993118b62SMateusz Guzik goto retry_sleepq; 11904e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 11914e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 11924e7f640dSJohn Baldwin __func__, sx); 11934e7f640dSJohn Baldwin } 11944e7f640dSJohn Baldwin 11954e7f640dSJohn Baldwin /* 11964e7f640dSJohn Baldwin * Since we have been unable to acquire the shared lock, 11974e7f640dSJohn Baldwin * we have to sleep. 11984e7f640dSJohn Baldwin */ 11994e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 12004e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 12014e7f640dSJohn Baldwin __func__, sx); 12024e7f640dSJohn Baldwin 1203a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 1204e2b25737SMark Johnston sleep_time -= lockstat_nsecs(&sx->lock_object); 1205a5aedd68SStacey Son #endif 12064e7f640dSJohn Baldwin sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 1207f9819486SAttilio Rao SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 1208f9819486SAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); 1209f9819486SAttilio Rao if (!(opts & SX_INTERRUPTIBLE)) 1210c5aa6b58SJeff Roberson sleepq_wait(&sx->lock_object, 0); 1211f9819486SAttilio Rao else 1212c5aa6b58SJeff Roberson error = sleepq_wait_sig(&sx->lock_object, 0); 1213a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 1214e2b25737SMark Johnston sleep_time += lockstat_nsecs(&sx->lock_object); 1215a5aedd68SStacey Son sleep_cnt++; 1216a5aedd68SStacey Son #endif 1217f9819486SAttilio Rao if (error) { 1218f9819486SAttilio Rao if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1219f9819486SAttilio Rao CTR2(KTR_LOCK, 1220f9819486SAttilio Rao "%s: interruptible sleep by %p suspended by signal", 1221f9819486SAttilio Rao __func__, sx); 1222f9819486SAttilio Rao break; 1223f9819486SAttilio Rao } 12244e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 12254e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 12264e7f640dSJohn Baldwin __func__, sx); 1227c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 12284e7f640dSJohn Baldwin } 1229e41d6166SMateusz Guzik #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 1230e41d6166SMateusz Guzik if (__predict_true(!extra_work)) 1231e41d6166SMateusz Guzik return (error); 1232e41d6166SMateusz Guzik #endif 1233076dd8ebSAndriy Gapon #ifdef KDTRACE_HOOKS 1234e2b25737SMark Johnston all_time += lockstat_nsecs(&sx->lock_object); 1235076dd8ebSAndriy Gapon if (sleep_time) 123632cd0147SMark Johnston LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 1237076dd8ebSAndriy Gapon LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1238076dd8ebSAndriy Gapon (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 12391ada9041SMateusz Guzik if (lda.spin_cnt > sleep_cnt) 124032cd0147SMark Johnston LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 1241076dd8ebSAndriy Gapon LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1242076dd8ebSAndriy Gapon (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 124309bdec20SMateusz Guzik out_lockstat: 1244076dd8ebSAndriy Gapon #endif 12453ae56ce9SMateusz Guzik if (error == 0) { 1246de2c95ccSMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 1247de2c95ccSMark Johnston contested, waittime, file, line, LOCKSTAT_READER); 12483ae56ce9SMateusz Guzik } 12494e7f640dSJohn Baldwin GIANT_RESTORE(); 1250f9819486SAttilio Rao return (error); 12514e7f640dSJohn Baldwin } 12524e7f640dSJohn Baldwin 1253834f70f3SMateusz Guzik int 1254013c0b49SMateusz Guzik _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF) 12554e7f640dSJohn Baldwin { 12562466d12bSMateusz Guzik struct thread *td; 12574e7f640dSJohn Baldwin uintptr_t x; 1258834f70f3SMateusz Guzik int error; 12594e7f640dSJohn Baldwin 1260704cb42fSMark Johnston KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 1261704cb42fSMark Johnston !TD_IS_IDLETHREAD(curthread), 1262834f70f3SMateusz Guzik ("sx_slock() by idle thread %p on sx %s @ %s:%d", 1263834f70f3SMateusz Guzik curthread, sx->lock_object.lo_name, file, line)); 12643ae56ce9SMateusz Guzik KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1265834f70f3SMateusz Guzik ("sx_slock() of destroyed sx @ %s:%d", file, line)); 1266834f70f3SMateusz Guzik WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); 1267834f70f3SMateusz Guzik 1268834f70f3SMateusz Guzik error = 0; 12692466d12bSMateusz Guzik td = curthread; 1270c5f61e6fSMateusz Guzik x = SX_READ_VALUE(sx); 1271e4ccf57fSMateusz Guzik if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) || 12722466d12bSMateusz Guzik !__sx_slock_try(sx, td, &x, true LOCK_FILE_LINE_ARG))) 1273013c0b49SMateusz Guzik error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG); 1274e4ccf57fSMateusz Guzik else 1275e4ccf57fSMateusz Guzik lock_profile_obtain_lock_success(&sx->lock_object, 0, 0, 1276e4ccf57fSMateusz Guzik file, line); 1277834f70f3SMateusz Guzik if (error == 0) { 1278834f70f3SMateusz Guzik LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 1279834f70f3SMateusz Guzik WITNESS_LOCK(&sx->lock_object, 0, file, line); 1280834f70f3SMateusz Guzik TD_LOCKS_INC(curthread); 1281834f70f3SMateusz Guzik } 1282834f70f3SMateusz Guzik return (error); 1283834f70f3SMateusz Guzik } 1284834f70f3SMateusz Guzik 1285013c0b49SMateusz Guzik int 1286013c0b49SMateusz Guzik _sx_slock(struct sx *sx, int opts, const char *file, int line) 1287013c0b49SMateusz Guzik { 1288013c0b49SMateusz Guzik 1289013c0b49SMateusz Guzik return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG)); 1290013c0b49SMateusz Guzik } 1291013c0b49SMateusz Guzik 1292834f70f3SMateusz Guzik static bool __always_inline 12932466d12bSMateusz Guzik _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp) 1294834f70f3SMateusz Guzik { 1295834f70f3SMateusz Guzik 12964e7f640dSJohn Baldwin for (;;) { 12972466d12bSMateusz Guzik if (SX_SHARERS(*xp) > 1 || !(*xp & SX_LOCK_WAITERS)) { 1298834f70f3SMateusz Guzik if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp, 1299834f70f3SMateusz Guzik *xp - SX_ONE_SHARER)) { 13004e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 13014e7f640dSJohn Baldwin CTR4(KTR_LOCK, 13024e7f640dSJohn Baldwin "%s: %p succeeded %p -> %p", 1303834f70f3SMateusz Guzik __func__, sx, (void *)*xp, 1304834f70f3SMateusz Guzik (void *)(*xp - SX_ONE_SHARER)); 13052466d12bSMateusz Guzik td->td_sx_slocks--; 1306834f70f3SMateusz Guzik return (true); 13074e7f640dSJohn Baldwin } 13084e7f640dSJohn Baldwin continue; 13094e7f640dSJohn Baldwin } 1310834f70f3SMateusz Guzik break; 1311834f70f3SMateusz Guzik } 1312834f70f3SMateusz Guzik return (false); 1313834f70f3SMateusz Guzik } 1314834f70f3SMateusz Guzik 1315834f70f3SMateusz Guzik static void __noinline 13162466d12bSMateusz Guzik _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x 13172466d12bSMateusz Guzik LOCK_FILE_LINE_ARG_DEF) 1318834f70f3SMateusz Guzik { 13191b54ffc8SMateusz Guzik int wakeup_swapper = 0; 13202466d12bSMateusz Guzik uintptr_t setx, queue; 1321834f70f3SMateusz Guzik 1322834f70f3SMateusz Guzik if (SCHEDULER_STOPPED()) 1323834f70f3SMateusz Guzik return; 1324834f70f3SMateusz Guzik 13252466d12bSMateusz Guzik if (_sx_sunlock_try(sx, td, &x)) 1326cec17473SMateusz Guzik goto out_lockstat; 13274e7f640dSJohn Baldwin 13284e7f640dSJohn Baldwin sleepq_lock(&sx->lock_object); 1329cec17473SMateusz Guzik x = SX_READ_VALUE(sx); 1330cec17473SMateusz Guzik for (;;) { 13312466d12bSMateusz Guzik if (_sx_sunlock_try(sx, td, &x)) 13321b54ffc8SMateusz Guzik break; 13331b54ffc8SMateusz Guzik 13344e7f640dSJohn Baldwin /* 13354e7f640dSJohn Baldwin * Wake up semantic here is quite simple: 13364e7f640dSJohn Baldwin * Just wake up all the exclusive waiters. 13374e7f640dSJohn Baldwin * Note that the state of the lock could have changed, 13384e7f640dSJohn Baldwin * so if it fails loop back and retry. 13394e7f640dSJohn Baldwin */ 13402466d12bSMateusz Guzik setx = SX_LOCK_UNLOCKED; 13412466d12bSMateusz Guzik queue = SQ_SHARED_QUEUE; 13422466d12bSMateusz Guzik if (x & SX_LOCK_EXCLUSIVE_WAITERS) { 13432466d12bSMateusz Guzik setx |= (x & SX_LOCK_SHARED_WAITERS); 13442466d12bSMateusz Guzik queue = SQ_EXCLUSIVE_QUEUE; 13452466d12bSMateusz Guzik } 13462466d12bSMateusz Guzik setx |= (x & SX_LOCK_WRITE_SPINNER); 1347cec17473SMateusz Guzik if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx)) 13484e7f640dSJohn Baldwin continue; 13494e7f640dSJohn Baldwin if (LOCK_LOG_TEST(&sx->lock_object, 0)) 13504e7f640dSJohn Baldwin CTR2(KTR_LOCK, "%s: %p waking up all thread on" 13514e7f640dSJohn Baldwin "exclusive queue", __func__, sx); 1352da7bbd2cSJohn Baldwin wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 13532466d12bSMateusz Guzik 0, queue); 13542466d12bSMateusz Guzik td->td_sx_slocks--; 1355cec17473SMateusz Guzik break; 1356cec17473SMateusz Guzik } 1357c5aa6b58SJeff Roberson sleepq_release(&sx->lock_object); 1358da7bbd2cSJohn Baldwin if (wakeup_swapper) 1359da7bbd2cSJohn Baldwin kick_proc0(); 1360cec17473SMateusz Guzik out_lockstat: 1361dbe4541dSMark Johnston LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); 1362834f70f3SMateusz Guzik } 1363834f70f3SMateusz Guzik 1364834f70f3SMateusz Guzik void 1365013c0b49SMateusz Guzik _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 1366834f70f3SMateusz Guzik { 13672466d12bSMateusz Guzik struct thread *td; 1368834f70f3SMateusz Guzik uintptr_t x; 1369834f70f3SMateusz Guzik 1370834f70f3SMateusz Guzik KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1371834f70f3SMateusz Guzik ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); 1372834f70f3SMateusz Guzik _sx_assert(sx, SA_SLOCKED, file, line); 1373834f70f3SMateusz Guzik WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 1374834f70f3SMateusz Guzik LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 1375834f70f3SMateusz Guzik 13762466d12bSMateusz Guzik td = curthread; 1377834f70f3SMateusz Guzik x = SX_READ_VALUE(sx); 1378e4ccf57fSMateusz Guzik if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) || 13792466d12bSMateusz Guzik !_sx_sunlock_try(sx, td, &x))) 13802466d12bSMateusz Guzik _sx_sunlock_hard(sx, td, x LOCK_FILE_LINE_ARG); 1381e4ccf57fSMateusz Guzik else 1382e4ccf57fSMateusz Guzik lock_profile_release_lock(&sx->lock_object); 1383834f70f3SMateusz Guzik 13843ae56ce9SMateusz Guzik TD_LOCKS_DEC(curthread); 1385d55229b7SJason Evans } 13864e5e677bSJohn Baldwin 1387013c0b49SMateusz Guzik void 1388013c0b49SMateusz Guzik _sx_sunlock(struct sx *sx, const char *file, int line) 1389013c0b49SMateusz Guzik { 1390013c0b49SMateusz Guzik 1391013c0b49SMateusz Guzik _sx_sunlock_int(sx LOCK_FILE_LINE_ARG); 1392013c0b49SMateusz Guzik } 1393013c0b49SMateusz Guzik 13944e5e677bSJohn Baldwin #ifdef INVARIANT_SUPPORT 1395781a35dfSJohn Baldwin #ifndef INVARIANTS 1396781a35dfSJohn Baldwin #undef _sx_assert 1397781a35dfSJohn Baldwin #endif 1398781a35dfSJohn Baldwin 13994e5e677bSJohn Baldwin /* 14004e5e677bSJohn Baldwin * In the non-WITNESS case, sx_assert() can only detect that at least 14014e5e677bSJohn Baldwin * *some* thread owns an slock, but it cannot guarantee that *this* 14024e5e677bSJohn Baldwin * thread owns an slock. 14034e5e677bSJohn Baldwin */ 14044e5e677bSJohn Baldwin void 1405d576deedSPawel Jakub Dawidek _sx_assert(const struct sx *sx, int what, const char *file, int line) 14064e5e677bSJohn Baldwin { 14074e7f640dSJohn Baldwin #ifndef WITNESS 14084e7f640dSJohn Baldwin int slocked = 0; 14094e7f640dSJohn Baldwin #endif 14104e5e677bSJohn Baldwin 1411d54474e6SEric van Gyzen if (SCHEDULER_STOPPED()) 141203129ba9SJohn Baldwin return; 14134e5e677bSJohn Baldwin switch (what) { 14147ec137e5SJohn Baldwin case SA_SLOCKED: 14157ec137e5SJohn Baldwin case SA_SLOCKED | SA_NOTRECURSED: 14167ec137e5SJohn Baldwin case SA_SLOCKED | SA_RECURSED: 14174e7f640dSJohn Baldwin #ifndef WITNESS 14184e7f640dSJohn Baldwin slocked = 1; 14194e7f640dSJohn Baldwin /* FALLTHROUGH */ 14204e7f640dSJohn Baldwin #endif 14217ec137e5SJohn Baldwin case SA_LOCKED: 14227ec137e5SJohn Baldwin case SA_LOCKED | SA_NOTRECURSED: 14237ec137e5SJohn Baldwin case SA_LOCKED | SA_RECURSED: 14244e5e677bSJohn Baldwin #ifdef WITNESS 1425aa89d8cdSJohn Baldwin witness_assert(&sx->lock_object, what, file, line); 14264e5e677bSJohn Baldwin #else 14274e7f640dSJohn Baldwin /* 14284e7f640dSJohn Baldwin * If some other thread has an exclusive lock or we 14294e7f640dSJohn Baldwin * have one and are asserting a shared lock, fail. 14304e7f640dSJohn Baldwin * Also, if no one has a lock at all, fail. 14314e7f640dSJohn Baldwin */ 14324e7f640dSJohn Baldwin if (sx->sx_lock == SX_LOCK_UNLOCKED || 14334e7f640dSJohn Baldwin (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || 14344e7f640dSJohn Baldwin sx_xholder(sx) != curthread))) 143503129ba9SJohn Baldwin panic("Lock %s not %slocked @ %s:%d\n", 14364e7f640dSJohn Baldwin sx->lock_object.lo_name, slocked ? "share " : "", 14374e7f640dSJohn Baldwin file, line); 14384e7f640dSJohn Baldwin 14394e7f640dSJohn Baldwin if (!(sx->sx_lock & SX_LOCK_SHARED)) { 14404e7f640dSJohn Baldwin if (sx_recursed(sx)) { 14417ec137e5SJohn Baldwin if (what & SA_NOTRECURSED) 14424e7f640dSJohn Baldwin panic("Lock %s recursed @ %s:%d\n", 14434e7f640dSJohn Baldwin sx->lock_object.lo_name, file, 14444e7f640dSJohn Baldwin line); 14457ec137e5SJohn Baldwin } else if (what & SA_RECURSED) 14464e7f640dSJohn Baldwin panic("Lock %s not recursed @ %s:%d\n", 14474e7f640dSJohn Baldwin sx->lock_object.lo_name, file, line); 14484e7f640dSJohn Baldwin } 14494e5e677bSJohn Baldwin #endif 14504e5e677bSJohn Baldwin break; 14517ec137e5SJohn Baldwin case SA_XLOCKED: 14527ec137e5SJohn Baldwin case SA_XLOCKED | SA_NOTRECURSED: 14537ec137e5SJohn Baldwin case SA_XLOCKED | SA_RECURSED: 14544e7f640dSJohn Baldwin if (sx_xholder(sx) != curthread) 145503129ba9SJohn Baldwin panic("Lock %s not exclusively locked @ %s:%d\n", 1456aa89d8cdSJohn Baldwin sx->lock_object.lo_name, file, line); 14574e7f640dSJohn Baldwin if (sx_recursed(sx)) { 14587ec137e5SJohn Baldwin if (what & SA_NOTRECURSED) 14594e7f640dSJohn Baldwin panic("Lock %s recursed @ %s:%d\n", 14604e7f640dSJohn Baldwin sx->lock_object.lo_name, file, line); 14617ec137e5SJohn Baldwin } else if (what & SA_RECURSED) 14624e7f640dSJohn Baldwin panic("Lock %s not recursed @ %s:%d\n", 14634e7f640dSJohn Baldwin sx->lock_object.lo_name, file, line); 14644e5e677bSJohn Baldwin break; 14657ec137e5SJohn Baldwin case SA_UNLOCKED: 146619b0efd3SPawel Jakub Dawidek #ifdef WITNESS 1467aa89d8cdSJohn Baldwin witness_assert(&sx->lock_object, what, file, line); 146819b0efd3SPawel Jakub Dawidek #else 1469f6739b1dSPawel Jakub Dawidek /* 14704e7f640dSJohn Baldwin * If we hold an exclusve lock fail. We can't 14714e7f640dSJohn Baldwin * reliably check to see if we hold a shared lock or 14724e7f640dSJohn Baldwin * not. 1473f6739b1dSPawel Jakub Dawidek */ 14744e7f640dSJohn Baldwin if (sx_xholder(sx) == curthread) 147503129ba9SJohn Baldwin panic("Lock %s exclusively locked @ %s:%d\n", 1476aa89d8cdSJohn Baldwin sx->lock_object.lo_name, file, line); 147719b0efd3SPawel Jakub Dawidek #endif 147819b0efd3SPawel Jakub Dawidek break; 14794e5e677bSJohn Baldwin default: 14804e5e677bSJohn Baldwin panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 14814e5e677bSJohn Baldwin line); 14824e5e677bSJohn Baldwin } 14834e5e677bSJohn Baldwin } 14844e5e677bSJohn Baldwin #endif /* INVARIANT_SUPPORT */ 1485d272fe53SJohn Baldwin 1486d272fe53SJohn Baldwin #ifdef DDB 14874e7f640dSJohn Baldwin static void 1488d576deedSPawel Jakub Dawidek db_show_sx(const struct lock_object *lock) 1489d272fe53SJohn Baldwin { 1490d272fe53SJohn Baldwin struct thread *td; 1491d576deedSPawel Jakub Dawidek const struct sx *sx; 1492d272fe53SJohn Baldwin 1493d576deedSPawel Jakub Dawidek sx = (const struct sx *)lock; 1494d272fe53SJohn Baldwin 1495d272fe53SJohn Baldwin db_printf(" state: "); 14964e7f640dSJohn Baldwin if (sx->sx_lock == SX_LOCK_UNLOCKED) 14974e7f640dSJohn Baldwin db_printf("UNLOCKED\n"); 14980026c92cSJohn Baldwin else if (sx->sx_lock == SX_LOCK_DESTROYED) { 14990026c92cSJohn Baldwin db_printf("DESTROYED\n"); 15000026c92cSJohn Baldwin return; 15010026c92cSJohn Baldwin } else if (sx->sx_lock & SX_LOCK_SHARED) 15024e7f640dSJohn Baldwin db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); 15034e7f640dSJohn Baldwin else { 15044e7f640dSJohn Baldwin td = sx_xholder(sx); 1505d272fe53SJohn Baldwin db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1506431f8906SJulian Elischer td->td_tid, td->td_proc->p_pid, td->td_name); 15074e7f640dSJohn Baldwin if (sx_recursed(sx)) 15084e7f640dSJohn Baldwin db_printf(" recursed: %d\n", sx->sx_recurse); 15094e7f640dSJohn Baldwin } 15104e7f640dSJohn Baldwin 15114e7f640dSJohn Baldwin db_printf(" waiters: "); 15124e7f640dSJohn Baldwin switch(sx->sx_lock & 15134e7f640dSJohn Baldwin (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { 15144e7f640dSJohn Baldwin case SX_LOCK_SHARED_WAITERS: 15154e7f640dSJohn Baldwin db_printf("shared\n"); 15164e7f640dSJohn Baldwin break; 15174e7f640dSJohn Baldwin case SX_LOCK_EXCLUSIVE_WAITERS: 15184e7f640dSJohn Baldwin db_printf("exclusive\n"); 15194e7f640dSJohn Baldwin break; 15204e7f640dSJohn Baldwin case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: 15214e7f640dSJohn Baldwin db_printf("exclusive and shared\n"); 15224e7f640dSJohn Baldwin break; 15234e7f640dSJohn Baldwin default: 15244e7f640dSJohn Baldwin db_printf("none\n"); 15254e7f640dSJohn Baldwin } 1526d272fe53SJohn Baldwin } 1527462a7addSJohn Baldwin 1528462a7addSJohn Baldwin /* 1529462a7addSJohn Baldwin * Check to see if a thread that is blocked on a sleep queue is actually 1530462a7addSJohn Baldwin * blocked on an sx lock. If so, output some details and return true. 1531462a7addSJohn Baldwin * If the lock has an exclusive owner, return that in *ownerp. 1532462a7addSJohn Baldwin */ 1533462a7addSJohn Baldwin int 1534462a7addSJohn Baldwin sx_chain(struct thread *td, struct thread **ownerp) 1535462a7addSJohn Baldwin { 1536fea73412SConrad Meyer const struct sx *sx; 1537462a7addSJohn Baldwin 1538462a7addSJohn Baldwin /* 15394e7f640dSJohn Baldwin * Check to see if this thread is blocked on an sx lock. 15404e7f640dSJohn Baldwin * First, we check the lock class. If that is ok, then we 15414e7f640dSJohn Baldwin * compare the lock name against the wait message. 1542462a7addSJohn Baldwin */ 15434e7f640dSJohn Baldwin sx = td->td_wchan; 15444e7f640dSJohn Baldwin if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || 15454e7f640dSJohn Baldwin sx->lock_object.lo_name != td->td_wmesg) 1546462a7addSJohn Baldwin return (0); 1547462a7addSJohn Baldwin 1548462a7addSJohn Baldwin /* We think we have an sx lock, so output some details. */ 1549462a7addSJohn Baldwin db_printf("blocked on sx \"%s\" ", td->td_wmesg); 15504e7f640dSJohn Baldwin *ownerp = sx_xholder(sx); 15514e7f640dSJohn Baldwin if (sx->sx_lock & SX_LOCK_SHARED) 15524e7f640dSJohn Baldwin db_printf("SLOCK (count %ju)\n", 15534e7f640dSJohn Baldwin (uintmax_t)SX_SHARERS(sx->sx_lock)); 15544e7f640dSJohn Baldwin else 1555462a7addSJohn Baldwin db_printf("XLOCK\n"); 1556462a7addSJohn Baldwin return (1); 1557462a7addSJohn Baldwin } 1558d272fe53SJohn Baldwin #endif 1559