1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Berkeley Software Design Inc's name may not be used to endorse or
15 * promote products derived from this software without specific prior
16 * written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
31 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
32 */
33
34 /*
35 * Machine independent bits of mutex implementation.
36 */
37
38 #include <sys/cdefs.h>
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/conf.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
57 #include <sys/sbuf.h>
58 #include <sys/smp.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
63
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
67
68 #include <ddb/ddb.h>
69
70 #include <fs/devfs/devfs_int.h>
71
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74
75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
76 #define ADAPTIVE_MUTEXES
77 #endif
78
79 #ifdef HWPMC_HOOKS
80 #include <sys/pmckern.h>
81 PMC_SOFT_DEFINE( , , lock, failed);
82 #endif
83
84 /*
85 * Return the mutex address when the lock cookie address is provided.
86 * This functionality assumes that struct mtx* have a member named mtx_lock.
87 */
88 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
89
90 /*
91 * Internal utility macros.
92 */
93 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
94
95 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
96
97 static void assert_mtx(const struct lock_object *lock, int what);
98 #ifdef DDB
99 static void db_show_mtx(const struct lock_object *lock);
100 #endif
101 static void lock_mtx(struct lock_object *lock, uintptr_t how);
102 static void lock_spin(struct lock_object *lock, uintptr_t how);
103 static int trylock_mtx(struct lock_object *lock, uintptr_t how);
104 static int trylock_spin(struct lock_object *lock, uintptr_t how);
105 #ifdef KDTRACE_HOOKS
106 static int owner_mtx(const struct lock_object *lock,
107 struct thread **owner);
108 #endif
109 static uintptr_t unlock_mtx(struct lock_object *lock);
110 static uintptr_t unlock_spin(struct lock_object *lock);
111
112 /*
113 * Lock classes for sleep and spin mutexes.
114 */
115 struct lock_class lock_class_mtx_sleep = {
116 .lc_name = "sleep mutex",
117 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
118 .lc_assert = assert_mtx,
119 #ifdef DDB
120 .lc_ddb_show = db_show_mtx,
121 #endif
122 .lc_lock = lock_mtx,
123 .lc_trylock = trylock_mtx,
124 .lc_unlock = unlock_mtx,
125 #ifdef KDTRACE_HOOKS
126 .lc_owner = owner_mtx,
127 #endif
128 };
129 struct lock_class lock_class_mtx_spin = {
130 .lc_name = "spin mutex",
131 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
132 .lc_assert = assert_mtx,
133 #ifdef DDB
134 .lc_ddb_show = db_show_mtx,
135 #endif
136 .lc_lock = lock_spin,
137 .lc_trylock = trylock_spin,
138 .lc_unlock = unlock_spin,
139 #ifdef KDTRACE_HOOKS
140 .lc_owner = owner_mtx,
141 #endif
142 };
143
144 #ifdef ADAPTIVE_MUTEXES
145 #ifdef MUTEX_CUSTOM_BACKOFF
146 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
147 "mtx debugging");
148
149 static struct lock_delay_config __read_frequently mtx_delay;
150
151 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
152 0, "");
153 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
154 0, "");
155
156 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
157 #else
158 #define mtx_delay locks_delay
159 #endif
160 #endif
161
162 #ifdef MUTEX_SPIN_CUSTOM_BACKOFF
163 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin,
164 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
165 "mtx spin debugging");
166
167 static struct lock_delay_config __read_frequently mtx_spin_delay;
168
169 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
170 &mtx_spin_delay.base, 0, "");
171 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
172 &mtx_spin_delay.max, 0, "");
173
174 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
175 #else
176 #define mtx_spin_delay locks_delay
177 #endif
178
179 /*
180 * System-wide mutexes
181 */
182 struct mtx blocked_lock;
183 struct mtx __exclusive_cache_line Giant;
184
185 static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *);
186
187 static void
assert_mtx(const struct lock_object * lock,int what)188 assert_mtx(const struct lock_object *lock, int what)
189 {
190
191 /*
192 * Treat LA_LOCKED as if LA_XLOCKED was asserted.
193 *
194 * Some callers of lc_assert uses LA_LOCKED to indicate that either
195 * a shared lock or write lock was held, while other callers uses
196 * the more strict LA_XLOCKED (used as MA_OWNED).
197 *
198 * Mutex is the only lock class that can not be shared, as a result,
199 * we can reasonably consider the caller really intends to assert
200 * LA_XLOCKED when they are asserting LA_LOCKED on a mutex object.
201 */
202 if (what & LA_LOCKED) {
203 what &= ~LA_LOCKED;
204 what |= LA_XLOCKED;
205 }
206 mtx_assert((const struct mtx *)lock, what);
207 }
208
209 static void
lock_mtx(struct lock_object * lock,uintptr_t how)210 lock_mtx(struct lock_object *lock, uintptr_t how)
211 {
212
213 mtx_lock((struct mtx *)lock);
214 }
215
216 static void
lock_spin(struct lock_object * lock,uintptr_t how)217 lock_spin(struct lock_object *lock, uintptr_t how)
218 {
219
220 mtx_lock_spin((struct mtx *)lock);
221 }
222
223 static int
trylock_mtx(struct lock_object * lock,uintptr_t how)224 trylock_mtx(struct lock_object *lock, uintptr_t how)
225 {
226
227 return (mtx_trylock((struct mtx *)lock));
228 }
229
230 static int
trylock_spin(struct lock_object * lock,uintptr_t how)231 trylock_spin(struct lock_object *lock, uintptr_t how)
232 {
233
234 return (mtx_trylock_spin((struct mtx *)lock));
235 }
236
237 static uintptr_t
unlock_mtx(struct lock_object * lock)238 unlock_mtx(struct lock_object *lock)
239 {
240 struct mtx *m;
241
242 m = (struct mtx *)lock;
243 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
244 mtx_unlock(m);
245 return (0);
246 }
247
248 static uintptr_t
unlock_spin(struct lock_object * lock)249 unlock_spin(struct lock_object *lock)
250 {
251 struct mtx *m;
252
253 m = (struct mtx *)lock;
254 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
255 mtx_unlock_spin(m);
256 return (0);
257 }
258
259 #ifdef KDTRACE_HOOKS
260 static int
owner_mtx(const struct lock_object * lock,struct thread ** owner)261 owner_mtx(const struct lock_object *lock, struct thread **owner)
262 {
263 const struct mtx *m;
264 uintptr_t x;
265
266 m = (const struct mtx *)lock;
267 x = m->mtx_lock;
268 *owner = (struct thread *)(x & ~MTX_FLAGMASK);
269 return (*owner != NULL);
270 }
271 #endif
272
273 /*
274 * Function versions of the inlined __mtx_* macros. These are used by
275 * modules and can also be called from assembly language if needed.
276 */
277 void
__mtx_lock_flags(volatile uintptr_t * c,int opts,const char * file,int line)278 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
279 {
280 struct mtx *m;
281 uintptr_t tid, v;
282
283 m = mtxlock2mtx(c);
284
285 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
286 !TD_IS_IDLETHREAD(curthread),
287 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
288 curthread, m->lock_object.lo_name, file, line));
289 KASSERT(m->mtx_lock != MTX_DESTROYED,
290 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
291 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
292 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
293 file, line));
294 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
295 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
296
297 tid = (uintptr_t)curthread;
298 v = MTX_UNOWNED;
299 if (!_mtx_obtain_lock_fetch(m, &v, tid))
300 _mtx_lock_sleep(m, v, opts, file, line);
301 else
302 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
303 m, 0, 0, file, line);
304 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
305 line);
306 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
307 file, line);
308 TD_LOCKS_INC(curthread);
309 }
310
311 void
__mtx_unlock_flags(volatile uintptr_t * c,int opts,const char * file,int line)312 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
313 {
314 struct mtx *m;
315
316 m = mtxlock2mtx(c);
317
318 KASSERT(m->mtx_lock != MTX_DESTROYED,
319 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
320 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
321 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
322 file, line));
323 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
324 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
325 line);
326 mtx_assert(m, MA_OWNED);
327
328 #ifdef LOCK_PROFILING
329 __mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line);
330 #else
331 __mtx_unlock(m, curthread, opts, file, line);
332 #endif
333 TD_LOCKS_DEC(curthread);
334 }
335
336 void
__mtx_lock_spin_flags(volatile uintptr_t * c,int opts,const char * file,int line)337 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
338 int line)
339 {
340 struct mtx *m;
341 #ifdef SMP
342 uintptr_t tid, v;
343 #endif
344
345 m = mtxlock2mtx(c);
346
347 KASSERT(m->mtx_lock != MTX_DESTROYED,
348 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
349 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
350 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
351 m->lock_object.lo_name, file, line));
352 if (mtx_owned(m))
353 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
354 (opts & MTX_RECURSE) != 0,
355 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
356 m->lock_object.lo_name, file, line));
357 opts &= ~MTX_RECURSE;
358 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
359 file, line, NULL);
360 #ifdef SMP
361 spinlock_enter();
362 tid = (uintptr_t)curthread;
363 v = MTX_UNOWNED;
364 if (!_mtx_obtain_lock_fetch(m, &v, tid))
365 _mtx_lock_spin(m, v, opts, file, line);
366 else
367 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire,
368 m, 0, 0, file, line);
369 #else
370 __mtx_lock_spin(m, curthread, opts, file, line);
371 #endif
372 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
373 line);
374 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
375 }
376
377 int
__mtx_trylock_spin_flags(volatile uintptr_t * c,int opts,const char * file,int line)378 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
379 int line)
380 {
381 struct mtx *m;
382
383 if (SCHEDULER_STOPPED())
384 return (1);
385
386 m = mtxlock2mtx(c);
387
388 KASSERT(m->mtx_lock != MTX_DESTROYED,
389 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
390 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
391 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
392 m->lock_object.lo_name, file, line));
393 KASSERT((opts & MTX_RECURSE) == 0,
394 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
395 m->lock_object.lo_name, file, line));
396 if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
397 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
398 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
399 return (1);
400 }
401 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
402 return (0);
403 }
404
405 void
__mtx_unlock_spin_flags(volatile uintptr_t * c,int opts,const char * file,int line)406 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
407 int line)
408 {
409 struct mtx *m;
410
411 m = mtxlock2mtx(c);
412
413 KASSERT(m->mtx_lock != MTX_DESTROYED,
414 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
415 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
416 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
417 m->lock_object.lo_name, file, line));
418 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
419 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
420 line);
421 mtx_assert(m, MA_OWNED);
422
423 __mtx_unlock_spin(m);
424 }
425
426 /*
427 * The important part of mtx_trylock{,_flags}()
428 * Tries to acquire lock `m.' If this function is called on a mutex that
429 * is already owned, it will recursively acquire the lock.
430 */
431 int
_mtx_trylock_flags_int(struct mtx * m,int opts LOCK_FILE_LINE_ARG_DEF)432 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
433 {
434 struct thread *td;
435 uintptr_t tid, v;
436 #ifdef LOCK_PROFILING
437 uint64_t waittime = 0;
438 int contested = 0;
439 #endif
440 int rval;
441 bool recursed;
442
443 td = curthread;
444 tid = (uintptr_t)td;
445 if (SCHEDULER_STOPPED())
446 return (1);
447
448 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
449 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
450 curthread, m->lock_object.lo_name, file, line));
451 KASSERT(m->mtx_lock != MTX_DESTROYED,
452 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
453 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
454 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
455 file, line));
456
457 rval = 1;
458 recursed = false;
459 v = MTX_UNOWNED;
460 for (;;) {
461 if (_mtx_obtain_lock_fetch(m, &v, tid))
462 break;
463 if (v == MTX_UNOWNED)
464 continue;
465 if (v == tid &&
466 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
467 (opts & MTX_RECURSE) != 0)) {
468 m->mtx_recurse++;
469 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
470 recursed = true;
471 break;
472 }
473 rval = 0;
474 break;
475 }
476
477 opts &= ~MTX_RECURSE;
478
479 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
480 if (rval) {
481 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
482 file, line);
483 TD_LOCKS_INC(curthread);
484 if (!recursed)
485 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
486 m, contested, waittime, file, line);
487 }
488
489 return (rval);
490 }
491
492 int
_mtx_trylock_flags_(volatile uintptr_t * c,int opts,const char * file,int line)493 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
494 {
495 struct mtx *m;
496
497 m = mtxlock2mtx(c);
498 return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
499 }
500
501 /*
502 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
503 *
504 * We call this if the lock is either contested (i.e. we need to go to
505 * sleep waiting for it), or if we need to recurse on it.
506 */
507 #if LOCK_DEBUG > 0
508 void
__mtx_lock_sleep(volatile uintptr_t * c,uintptr_t v,int opts,const char * file,int line)509 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
510 int line)
511 #else
512 void
513 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
514 #endif
515 {
516 struct thread *td;
517 struct mtx *m;
518 struct turnstile *ts;
519 uintptr_t tid;
520 struct thread *owner;
521 #ifdef LOCK_PROFILING
522 int contested = 0;
523 uint64_t waittime = 0;
524 #endif
525 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
526 struct lock_delay_arg lda;
527 #endif
528 #ifdef KDTRACE_HOOKS
529 u_int sleep_cnt = 0;
530 int64_t sleep_time = 0;
531 int64_t all_time = 0;
532 #endif
533 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
534 int doing_lockprof = 0;
535 #endif
536
537 td = curthread;
538 tid = (uintptr_t)td;
539 m = mtxlock2mtx(c);
540
541 #ifdef KDTRACE_HOOKS
542 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
543 while (v == MTX_UNOWNED) {
544 if (_mtx_obtain_lock_fetch(m, &v, tid))
545 goto out_lockstat;
546 }
547 doing_lockprof = 1;
548 all_time -= lockstat_nsecs(&m->lock_object);
549 }
550 #endif
551 #ifdef LOCK_PROFILING
552 doing_lockprof = 1;
553 #endif
554
555 if (SCHEDULER_STOPPED())
556 return;
557
558 if (__predict_false(v == MTX_UNOWNED))
559 v = MTX_READ_VALUE(m);
560
561 if (__predict_false(lv_mtx_owner(v) == td)) {
562 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
563 (opts & MTX_RECURSE) != 0,
564 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
565 m->lock_object.lo_name, file, line));
566 #if LOCK_DEBUG > 0
567 opts &= ~MTX_RECURSE;
568 #endif
569 m->mtx_recurse++;
570 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
571 if (LOCK_LOG_TEST(&m->lock_object, opts))
572 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
573 return;
574 }
575 #if LOCK_DEBUG > 0
576 opts &= ~MTX_RECURSE;
577 #endif
578
579 #if defined(ADAPTIVE_MUTEXES)
580 lock_delay_arg_init(&lda, &mtx_delay);
581 #elif defined(KDTRACE_HOOKS)
582 lock_delay_arg_init_noadapt(&lda);
583 #endif
584
585 #ifdef HWPMC_HOOKS
586 PMC_SOFT_CALL( , , lock, failed);
587 #endif
588 lock_profile_obtain_lock_failed(&m->lock_object, false,
589 &contested, &waittime);
590 if (LOCK_LOG_TEST(&m->lock_object, opts))
591 CTR4(KTR_LOCK,
592 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
593 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
594
595 THREAD_CONTENDS_ON_LOCK(&m->lock_object);
596
597 for (;;) {
598 if (v == MTX_UNOWNED) {
599 if (_mtx_obtain_lock_fetch(m, &v, tid))
600 break;
601 continue;
602 }
603 #ifdef KDTRACE_HOOKS
604 lda.spin_cnt++;
605 #endif
606 #ifdef ADAPTIVE_MUTEXES
607 /*
608 * If the owner is running on another CPU, spin until the
609 * owner stops running or the state of the lock changes.
610 */
611 owner = lv_mtx_owner(v);
612 if (TD_IS_RUNNING(owner)) {
613 if (LOCK_LOG_TEST(&m->lock_object, 0))
614 CTR3(KTR_LOCK,
615 "%s: spinning on %p held by %p",
616 __func__, m, owner);
617 KTR_STATE1(KTR_SCHED, "thread",
618 sched_tdname((struct thread *)tid),
619 "spinning", "lockname:\"%s\"",
620 m->lock_object.lo_name);
621 do {
622 lock_delay(&lda);
623 v = MTX_READ_VALUE(m);
624 owner = lv_mtx_owner(v);
625 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
626 KTR_STATE0(KTR_SCHED, "thread",
627 sched_tdname((struct thread *)tid),
628 "running");
629 continue;
630 }
631 #endif
632
633 ts = turnstile_trywait(&m->lock_object);
634 v = MTX_READ_VALUE(m);
635 retry_turnstile:
636
637 /*
638 * Check if the lock has been released while spinning for
639 * the turnstile chain lock.
640 */
641 if (v == MTX_UNOWNED) {
642 turnstile_cancel(ts);
643 continue;
644 }
645
646 #ifdef ADAPTIVE_MUTEXES
647 /*
648 * The current lock owner might have started executing
649 * on another CPU (or the lock could have changed
650 * owners) while we were waiting on the turnstile
651 * chain lock. If so, drop the turnstile lock and try
652 * again.
653 */
654 owner = lv_mtx_owner(v);
655 if (TD_IS_RUNNING(owner)) {
656 turnstile_cancel(ts);
657 continue;
658 }
659 #endif
660
661 /*
662 * If the mutex isn't already contested and a failure occurs
663 * setting the contested bit, the mutex was either released
664 * or the state of the MTX_RECURSED bit changed.
665 */
666 if ((v & MTX_CONTESTED) == 0 &&
667 !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
668 goto retry_turnstile;
669 }
670
671 /*
672 * We definitely must sleep for this lock.
673 */
674 mtx_assert(m, MA_NOTOWNED);
675
676 /*
677 * Block on the turnstile.
678 */
679 #ifdef KDTRACE_HOOKS
680 sleep_time -= lockstat_nsecs(&m->lock_object);
681 #endif
682 #ifndef ADAPTIVE_MUTEXES
683 owner = mtx_owner(m);
684 #endif
685 MPASS(owner == mtx_owner(m));
686 turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
687 #ifdef KDTRACE_HOOKS
688 sleep_time += lockstat_nsecs(&m->lock_object);
689 sleep_cnt++;
690 #endif
691 v = MTX_READ_VALUE(m);
692 }
693 THREAD_CONTENTION_DONE(&m->lock_object);
694 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
695 if (__predict_true(!doing_lockprof))
696 return;
697 #endif
698 #ifdef KDTRACE_HOOKS
699 all_time += lockstat_nsecs(&m->lock_object);
700 if (sleep_time)
701 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
702
703 /*
704 * Only record the loops spinning and not sleeping.
705 */
706 if (lda.spin_cnt > sleep_cnt)
707 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
708 out_lockstat:
709 #endif
710 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
711 waittime, file, line);
712 }
713
714 #ifdef SMP
715 /*
716 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
717 *
718 * This is only called if we need to actually spin for the lock. Recursion
719 * is handled inline.
720 */
721 #if LOCK_DEBUG > 0
722 void
_mtx_lock_spin_cookie(volatile uintptr_t * c,uintptr_t v,int opts,const char * file,int line)723 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
724 const char *file, int line)
725 #else
726 void
727 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
728 #endif
729 {
730 struct mtx *m;
731 struct lock_delay_arg lda;
732 uintptr_t tid;
733 #ifdef LOCK_PROFILING
734 int contested = 0;
735 uint64_t waittime = 0;
736 #endif
737 #ifdef KDTRACE_HOOKS
738 int64_t spin_time = 0;
739 #endif
740 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
741 int doing_lockprof = 0;
742 #endif
743
744 tid = (uintptr_t)curthread;
745 m = mtxlock2mtx(c);
746
747 #ifdef KDTRACE_HOOKS
748 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
749 while (v == MTX_UNOWNED) {
750 if (_mtx_obtain_lock_fetch(m, &v, tid))
751 goto out_lockstat;
752 }
753 doing_lockprof = 1;
754 spin_time -= lockstat_nsecs(&m->lock_object);
755 }
756 #endif
757 #ifdef LOCK_PROFILING
758 doing_lockprof = 1;
759 #endif
760
761 if (__predict_false(v == MTX_UNOWNED))
762 v = MTX_READ_VALUE(m);
763
764 if (__predict_false(v == tid)) {
765 m->mtx_recurse++;
766 return;
767 }
768
769 if (SCHEDULER_STOPPED())
770 return;
771
772 if (LOCK_LOG_TEST(&m->lock_object, opts))
773 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
774 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
775 "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
776
777 lock_delay_arg_init(&lda, &mtx_spin_delay);
778
779 #ifdef HWPMC_HOOKS
780 PMC_SOFT_CALL( , , lock, failed);
781 #endif
782 lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime);
783
784 for (;;) {
785 if (v == MTX_UNOWNED) {
786 if (_mtx_obtain_lock_fetch(m, &v, tid))
787 break;
788 continue;
789 }
790 /* Give interrupts a chance while we spin. */
791 spinlock_exit();
792 do {
793 if (__predict_true(lda.spin_cnt < 10000000)) {
794 lock_delay(&lda);
795 } else {
796 _mtx_lock_indefinite_check(m, &lda);
797 }
798 v = MTX_READ_VALUE(m);
799 } while (v != MTX_UNOWNED);
800 spinlock_enter();
801 }
802
803 if (LOCK_LOG_TEST(&m->lock_object, opts))
804 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
805 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
806 "running");
807
808 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
809 if (__predict_true(!doing_lockprof))
810 return;
811 #endif
812 #ifdef KDTRACE_HOOKS
813 spin_time += lockstat_nsecs(&m->lock_object);
814 if (lda.spin_cnt != 0)
815 LOCKSTAT_RECORD1(spin__spin, m, spin_time);
816 out_lockstat:
817 #endif
818 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m,
819 contested, waittime, file, line);
820 }
821 #endif /* SMP */
822
823 #ifdef INVARIANTS
824 static void
thread_lock_validate(struct mtx * m,int opts,const char * file,int line)825 thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
826 {
827
828 KASSERT(m->mtx_lock != MTX_DESTROYED,
829 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
830 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
831 ("thread_lock() of sleep mutex %s @ %s:%d",
832 m->lock_object.lo_name, file, line));
833 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0,
834 ("thread_lock: got a recursive mutex %s @ %s:%d\n",
835 m->lock_object.lo_name, file, line));
836 WITNESS_CHECKORDER(&m->lock_object,
837 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
838 }
839 #else
840 #define thread_lock_validate(m, opts, file, line) do { } while (0)
841 #endif
842
843 #ifndef LOCK_PROFILING
844 #if LOCK_DEBUG > 0
845 void
_thread_lock(struct thread * td,int opts,const char * file,int line)846 _thread_lock(struct thread *td, int opts, const char *file, int line)
847 #else
848 void
849 _thread_lock(struct thread *td)
850 #endif
851 {
852 struct mtx *m;
853 uintptr_t tid;
854
855 tid = (uintptr_t)curthread;
856
857 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire)))
858 goto slowpath_noirq;
859 spinlock_enter();
860 m = td->td_lock;
861 thread_lock_validate(m, 0, file, line);
862 if (__predict_false(m == &blocked_lock))
863 goto slowpath_unlocked;
864 if (__predict_false(!_mtx_obtain_lock(m, tid)))
865 goto slowpath_unlocked;
866 if (__predict_true(m == td->td_lock)) {
867 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
868 return;
869 }
870 _mtx_release_lock_quick(m);
871 slowpath_unlocked:
872 spinlock_exit();
873 slowpath_noirq:
874 #if LOCK_DEBUG > 0
875 thread_lock_flags_(td, opts, file, line);
876 #else
877 thread_lock_flags_(td, 0, 0, 0);
878 #endif
879 }
880 #endif
881
882 void
thread_lock_flags_(struct thread * td,int opts,const char * file,int line)883 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
884 {
885 struct mtx *m;
886 uintptr_t tid, v;
887 struct lock_delay_arg lda;
888 #ifdef LOCK_PROFILING
889 int contested = 0;
890 uint64_t waittime = 0;
891 #endif
892 #ifdef KDTRACE_HOOKS
893 int64_t spin_time = 0;
894 #endif
895 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
896 int doing_lockprof = 1;
897 #endif
898
899 tid = (uintptr_t)curthread;
900
901 if (SCHEDULER_STOPPED()) {
902 /*
903 * Ensure that spinlock sections are balanced even when the
904 * scheduler is stopped, since we may otherwise inadvertently
905 * re-enable interrupts while dumping core.
906 */
907 spinlock_enter();
908 return;
909 }
910
911 lock_delay_arg_init(&lda, &mtx_spin_delay);
912
913 #ifdef HWPMC_HOOKS
914 PMC_SOFT_CALL( , , lock, failed);
915 #endif
916
917 #ifdef LOCK_PROFILING
918 doing_lockprof = 1;
919 #elif defined(KDTRACE_HOOKS)
920 doing_lockprof = lockstat_enabled;
921 #endif
922 #ifdef KDTRACE_HOOKS
923 if (__predict_false(doing_lockprof))
924 spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
925 #endif
926 spinlock_enter();
927
928 for (;;) {
929 retry:
930 m = td->td_lock;
931 thread_lock_validate(m, opts, file, line);
932 v = MTX_READ_VALUE(m);
933 for (;;) {
934 if (v == MTX_UNOWNED) {
935 if (_mtx_obtain_lock_fetch(m, &v, tid))
936 break;
937 continue;
938 }
939 MPASS(v != tid);
940 lock_profile_obtain_lock_failed(&m->lock_object, true,
941 &contested, &waittime);
942 /* Give interrupts a chance while we spin. */
943 spinlock_exit();
944 do {
945 if (__predict_true(lda.spin_cnt < 10000000)) {
946 lock_delay(&lda);
947 } else {
948 _mtx_lock_indefinite_check(m, &lda);
949 }
950 if (m != td->td_lock) {
951 spinlock_enter();
952 goto retry;
953 }
954 v = MTX_READ_VALUE(m);
955 } while (v != MTX_UNOWNED);
956 spinlock_enter();
957 }
958 if (m == td->td_lock)
959 break;
960 _mtx_release_lock_quick(m);
961 }
962 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
963 line);
964 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
965
966 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
967 if (__predict_true(!doing_lockprof))
968 return;
969 #endif
970 #ifdef KDTRACE_HOOKS
971 spin_time += lockstat_nsecs(&m->lock_object);
972 #endif
973 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested,
974 waittime, file, line);
975 #ifdef KDTRACE_HOOKS
976 if (lda.spin_cnt != 0)
977 LOCKSTAT_RECORD1(thread__spin, m, spin_time);
978 #endif
979 }
980
981 struct mtx *
thread_lock_block(struct thread * td)982 thread_lock_block(struct thread *td)
983 {
984 struct mtx *lock;
985
986 lock = td->td_lock;
987 mtx_assert(lock, MA_OWNED);
988 td->td_lock = &blocked_lock;
989
990 return (lock);
991 }
992
993 void
thread_lock_unblock(struct thread * td,struct mtx * new)994 thread_lock_unblock(struct thread *td, struct mtx *new)
995 {
996
997 mtx_assert(new, MA_OWNED);
998 KASSERT(td->td_lock == &blocked_lock,
999 ("thread %p lock %p not blocked_lock %p",
1000 td, td->td_lock, &blocked_lock));
1001 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
1002 }
1003
1004 void
thread_lock_block_wait(struct thread * td)1005 thread_lock_block_wait(struct thread *td)
1006 {
1007
1008 while (td->td_lock == &blocked_lock)
1009 cpu_spinwait();
1010
1011 /* Acquire fence to be certain that all thread state is visible. */
1012 atomic_thread_fence_acq();
1013 }
1014
1015 void
thread_lock_set(struct thread * td,struct mtx * new)1016 thread_lock_set(struct thread *td, struct mtx *new)
1017 {
1018 struct mtx *lock;
1019
1020 mtx_assert(new, MA_OWNED);
1021 lock = td->td_lock;
1022 mtx_assert(lock, MA_OWNED);
1023 td->td_lock = new;
1024 mtx_unlock_spin(lock);
1025 }
1026
1027 /*
1028 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
1029 *
1030 * We are only called here if the lock is recursed, contested (i.e. we
1031 * need to wake up a blocked thread) or lockstat probe is active.
1032 */
1033 #if LOCK_DEBUG > 0
1034 void
__mtx_unlock_sleep(volatile uintptr_t * c,uintptr_t v,int opts,const char * file,int line)1035 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
1036 const char *file, int line)
1037 #else
1038 void
1039 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
1040 #endif
1041 {
1042 struct mtx *m;
1043 struct turnstile *ts;
1044 uintptr_t tid;
1045
1046 if (SCHEDULER_STOPPED())
1047 return;
1048
1049 tid = (uintptr_t)curthread;
1050 m = mtxlock2mtx(c);
1051
1052 if (__predict_false(v == tid))
1053 v = MTX_READ_VALUE(m);
1054
1055 if (__predict_false(v & MTX_RECURSED)) {
1056 if (--(m->mtx_recurse) == 0)
1057 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1058 if (LOCK_LOG_TEST(&m->lock_object, opts))
1059 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1060 return;
1061 }
1062
1063 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1064 if (v == tid && _mtx_release_lock(m, tid))
1065 return;
1066
1067 /*
1068 * We have to lock the chain before the turnstile so this turnstile
1069 * can be removed from the hash list if it is empty.
1070 */
1071 turnstile_chain_lock(&m->lock_object);
1072 _mtx_release_lock_quick(m);
1073 ts = turnstile_lookup(&m->lock_object);
1074 if (__predict_false(ts == NULL)) {
1075 panic("got NULL turnstile on mutex %p v %p", m, (void *)v);
1076 }
1077 if (LOCK_LOG_TEST(&m->lock_object, opts))
1078 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1079 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
1080
1081 /*
1082 * This turnstile is now no longer associated with the mutex. We can
1083 * unlock the chain lock so a new turnstile may take it's place.
1084 */
1085 turnstile_unpend(ts);
1086 turnstile_chain_unlock(&m->lock_object);
1087 }
1088
1089 /*
1090 * All the unlocking of MTX_SPIN locks is done inline.
1091 * See the __mtx_unlock_spin() macro for the details.
1092 */
1093
1094 /*
1095 * The backing function for the INVARIANTS-enabled mtx_assert()
1096 */
1097 #ifdef INVARIANT_SUPPORT
1098 void
__mtx_assert(const volatile uintptr_t * c,int what,const char * file,int line)1099 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1100 {
1101 const struct mtx *m;
1102
1103 if (KERNEL_PANICKED() || dumping || SCHEDULER_STOPPED())
1104 return;
1105
1106 m = mtxlock2mtx(c);
1107
1108 switch (what) {
1109 case MA_OWNED:
1110 case MA_OWNED | MA_RECURSED:
1111 case MA_OWNED | MA_NOTRECURSED:
1112 if (!mtx_owned(m))
1113 panic("mutex %s not owned at %s:%d",
1114 m->lock_object.lo_name, file, line);
1115 if (mtx_recursed(m)) {
1116 if ((what & MA_NOTRECURSED) != 0)
1117 panic("mutex %s recursed at %s:%d",
1118 m->lock_object.lo_name, file, line);
1119 } else if ((what & MA_RECURSED) != 0) {
1120 panic("mutex %s unrecursed at %s:%d",
1121 m->lock_object.lo_name, file, line);
1122 }
1123 break;
1124 case MA_NOTOWNED:
1125 if (mtx_owned(m))
1126 panic("mutex %s owned at %s:%d",
1127 m->lock_object.lo_name, file, line);
1128 break;
1129 default:
1130 panic("unknown mtx_assert at %s:%d", file, line);
1131 }
1132 }
1133 #endif
1134
1135 /*
1136 * General init routine used by the MTX_SYSINIT() macro.
1137 */
1138 void
mtx_sysinit(void * arg)1139 mtx_sysinit(void *arg)
1140 {
1141 struct mtx_args *margs = arg;
1142
1143 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1144 margs->ma_opts);
1145 }
1146
1147 /*
1148 * Mutex initialization routine; initialize lock `m' of type contained in
1149 * `opts' with options contained in `opts' and name `name.' The optional
1150 * lock type `type' is used as a general lock category name for use with
1151 * witness.
1152 */
1153 void
_mtx_init(volatile uintptr_t * c,const char * name,const char * type,int opts)1154 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1155 {
1156 struct mtx *m;
1157 struct lock_class *class;
1158 int flags;
1159
1160 m = mtxlock2mtx(c);
1161
1162 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1163 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1164 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1165 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1166 &m->mtx_lock));
1167
1168 /* Determine lock class and lock flags. */
1169 if (opts & MTX_SPIN)
1170 class = &lock_class_mtx_spin;
1171 else
1172 class = &lock_class_mtx_sleep;
1173 flags = 0;
1174 if (opts & MTX_QUIET)
1175 flags |= LO_QUIET;
1176 if (opts & MTX_RECURSE)
1177 flags |= LO_RECURSABLE;
1178 if ((opts & MTX_NOWITNESS) == 0)
1179 flags |= LO_WITNESS;
1180 if (opts & MTX_DUPOK)
1181 flags |= LO_DUPOK;
1182 if (opts & MTX_NOPROFILE)
1183 flags |= LO_NOPROFILE;
1184 if (opts & MTX_NEW)
1185 flags |= LO_NEW;
1186
1187 /* Initialize mutex. */
1188 lock_init(&m->lock_object, class, name, type, flags);
1189
1190 m->mtx_lock = MTX_UNOWNED;
1191 m->mtx_recurse = 0;
1192 }
1193
1194 /*
1195 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
1196 * passed in as a flag here because if the corresponding mtx_init() was
1197 * called with MTX_QUIET set, then it will already be set in the mutex's
1198 * flags.
1199 */
1200 void
_mtx_destroy(volatile uintptr_t * c)1201 _mtx_destroy(volatile uintptr_t *c)
1202 {
1203 struct mtx *m;
1204
1205 m = mtxlock2mtx(c);
1206
1207 if (!mtx_owned(m))
1208 MPASS(mtx_unowned(m));
1209 else {
1210 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1211
1212 /* Perform the non-mtx related part of mtx_unlock_spin(). */
1213 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) {
1214 lock_profile_release_lock(&m->lock_object, true);
1215 spinlock_exit();
1216 } else {
1217 TD_LOCKS_DEC(curthread);
1218 lock_profile_release_lock(&m->lock_object, false);
1219 }
1220
1221 /* Tell witness this isn't locked to make it happy. */
1222 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1223 __LINE__);
1224 }
1225
1226 m->mtx_lock = MTX_DESTROYED;
1227 lock_destroy(&m->lock_object);
1228 }
1229
1230 /*
1231 * Intialize the mutex code and system mutexes. This is called from the MD
1232 * startup code prior to mi_startup(). The per-CPU data space needs to be
1233 * setup before this is called.
1234 */
1235 void
mutex_init(void)1236 mutex_init(void)
1237 {
1238
1239 /* Setup turnstiles so that sleep mutexes work. */
1240 init_turnstiles();
1241
1242 /*
1243 * Initialize mutexes.
1244 */
1245 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1246 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1247 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
1248 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1249 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1250 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1251 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1252 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1253 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1254 mtx_lock(&Giant);
1255 }
1256
1257 static void __noinline
_mtx_lock_indefinite_check(struct mtx * m,struct lock_delay_arg * ldap)1258 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
1259 {
1260 struct thread *td;
1261
1262 ldap->spin_cnt++;
1263 if (ldap->spin_cnt < 60000000 || kdb_active || KERNEL_PANICKED())
1264 cpu_lock_delay();
1265 else {
1266 td = mtx_owner(m);
1267
1268 /* If the mutex is unlocked, try again. */
1269 if (td == NULL)
1270 return;
1271
1272 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
1273 m, m->lock_object.lo_name, td, td->td_tid);
1274 #ifdef WITNESS
1275 witness_display_spinlock(&m->lock_object, td, printf);
1276 #endif
1277 panic("spin lock held too long");
1278 }
1279 cpu_spinwait();
1280 }
1281
1282 void
mtx_spin_wait_unlocked(struct mtx * m)1283 mtx_spin_wait_unlocked(struct mtx *m)
1284 {
1285 struct lock_delay_arg lda;
1286
1287 KASSERT(m->mtx_lock != MTX_DESTROYED,
1288 ("%s() of destroyed mutex %p", __func__, m));
1289 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
1290 ("%s() of sleep mutex %p (%s)", __func__, m,
1291 m->lock_object.lo_name));
1292 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1293 m->lock_object.lo_name));
1294
1295 lda.spin_cnt = 0;
1296
1297 while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) {
1298 if (__predict_true(lda.spin_cnt < 10000000)) {
1299 cpu_spinwait();
1300 lda.spin_cnt++;
1301 } else {
1302 _mtx_lock_indefinite_check(m, &lda);
1303 }
1304 }
1305 }
1306
1307 void
mtx_wait_unlocked(struct mtx * m)1308 mtx_wait_unlocked(struct mtx *m)
1309 {
1310 struct thread *owner;
1311 uintptr_t v;
1312
1313 KASSERT(m->mtx_lock != MTX_DESTROYED,
1314 ("%s() of destroyed mutex %p", __func__, m));
1315 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
1316 ("%s() not a sleep mutex %p (%s)", __func__, m,
1317 m->lock_object.lo_name));
1318 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1319 m->lock_object.lo_name));
1320
1321 for (;;) {
1322 v = atomic_load_acq_ptr(&m->mtx_lock);
1323 if (v == MTX_UNOWNED) {
1324 break;
1325 }
1326 owner = lv_mtx_owner(v);
1327 if (!TD_IS_RUNNING(owner)) {
1328 mtx_lock(m);
1329 mtx_unlock(m);
1330 break;
1331 }
1332 cpu_spinwait();
1333 }
1334 }
1335
1336 #ifdef DDB
1337 static void
db_show_mtx(const struct lock_object * lock)1338 db_show_mtx(const struct lock_object *lock)
1339 {
1340 struct thread *td;
1341 const struct mtx *m;
1342
1343 m = (const struct mtx *)lock;
1344
1345 db_printf(" flags: {");
1346 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1347 db_printf("SPIN");
1348 else
1349 db_printf("DEF");
1350 if (m->lock_object.lo_flags & LO_RECURSABLE)
1351 db_printf(", RECURSE");
1352 if (m->lock_object.lo_flags & LO_DUPOK)
1353 db_printf(", DUPOK");
1354 db_printf("}\n");
1355 db_printf(" state: {");
1356 if (mtx_unowned(m))
1357 db_printf("UNOWNED");
1358 else if (mtx_destroyed(m))
1359 db_printf("DESTROYED");
1360 else {
1361 db_printf("OWNED");
1362 if (m->mtx_lock & MTX_CONTESTED)
1363 db_printf(", CONTESTED");
1364 if (m->mtx_lock & MTX_RECURSED)
1365 db_printf(", RECURSED");
1366 }
1367 db_printf("}\n");
1368 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1369 td = mtx_owner(m);
1370 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1371 td->td_tid, td->td_proc->p_pid, td->td_name);
1372 if (mtx_recursed(m))
1373 db_printf(" recursed: %d\n", m->mtx_recurse);
1374 }
1375 }
1376 #endif
1377