1 /*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32 /* 33 * Machine independent bits of mutex implementation. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_adaptive_mutexes.h" 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/bus.h> 45 #include <sys/kernel.h> 46 #include <sys/ktr.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/proc.h> 51 #include <sys/resourcevar.h> 52 #include <sys/sched.h> 53 #include <sys/sbuf.h> 54 #include <sys/sysctl.h> 55 #include <sys/turnstile.h> 56 #include <sys/vmmeter.h> 57 58 #include <machine/atomic.h> 59 #include <machine/bus.h> 60 #include <machine/clock.h> 61 #include <machine/cpu.h> 62 63 #include <ddb/ddb.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_extern.h> 67 68 /* 69 * Internal utility macros. 70 */ 71 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 72 73 #define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 74 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 75 76 /* 77 * Lock classes for sleep and spin mutexes. 78 */ 79 struct lock_class lock_class_mtx_sleep = { 80 "sleep mutex", 81 LC_SLEEPLOCK | LC_RECURSABLE 82 }; 83 struct lock_class lock_class_mtx_spin = { 84 "spin mutex", 85 LC_SPINLOCK | LC_RECURSABLE 86 }; 87 88 /* 89 * System-wide mutexes 90 */ 91 struct mtx sched_lock; 92 struct mtx Giant; 93 94 #ifdef MUTEX_PROFILING 95 SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 96 SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 97 static int mutex_prof_enable = 0; 98 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 99 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 100 101 struct mutex_prof { 102 const char *name; 103 const char *file; 104 int line; 105 uintmax_t cnt_max; 106 uintmax_t cnt_tot; 107 uintmax_t cnt_cur; 108 uintmax_t cnt_contest_holding; 109 uintmax_t cnt_contest_locking; 110 struct mutex_prof *next; 111 }; 112 113 /* 114 * mprof_buf is a static pool of profiling records to avoid possible 115 * reentrance of the memory allocation functions. 116 * 117 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 118 */ 119 #define NUM_MPROF_BUFFERS 1000 120 static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 121 static int first_free_mprof_buf; 122 #define MPROF_HASH_SIZE 1009 123 static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 124 /* SWAG: sbuf size = avg stat. line size * number of locks */ 125 #define MPROF_SBUF_SIZE 256 * 400 126 127 static int mutex_prof_acquisitions; 128 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 129 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 130 static int mutex_prof_records; 131 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 132 &mutex_prof_records, 0, "Number of profiling records"); 133 static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 134 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 135 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 136 static int mutex_prof_rejected; 137 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 138 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 139 static int mutex_prof_hashsize = MPROF_HASH_SIZE; 140 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 141 &mutex_prof_hashsize, 0, "Hash size"); 142 static int mutex_prof_collisions = 0; 143 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 144 &mutex_prof_collisions, 0, "Number of hash collisions"); 145 146 /* 147 * mprof_mtx protects the profiling buffers and the hash. 148 */ 149 static struct mtx mprof_mtx; 150 MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 151 152 static u_int64_t 153 nanoseconds(void) 154 { 155 struct timespec tv; 156 157 nanotime(&tv); 158 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 159 } 160 161 static int 162 dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 163 { 164 struct sbuf *sb; 165 int error, i; 166 static int multiplier = 1; 167 168 if (first_free_mprof_buf == 0) 169 return (SYSCTL_OUT(req, "No locking recorded", 170 sizeof("No locking recorded"))); 171 172 retry_sbufops: 173 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN); 174 sbuf_printf(sb, "%6s %12s %11s %5s %12s %12s %s\n", 175 "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name"); 176 /* 177 * XXX this spinlock seems to be by far the largest perpetrator 178 * of spinlock latency (1.6 msec on an Athlon1600 was recorded 179 * even before I pessimized it further by moving the average 180 * computation here). 181 */ 182 mtx_lock_spin(&mprof_mtx); 183 for (i = 0; i < first_free_mprof_buf; ++i) { 184 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n", 185 mprof_buf[i].cnt_max / 1000, 186 mprof_buf[i].cnt_tot / 1000, 187 mprof_buf[i].cnt_cur, 188 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 : 189 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000), 190 mprof_buf[i].cnt_contest_holding, 191 mprof_buf[i].cnt_contest_locking, 192 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 193 if (sbuf_overflowed(sb)) { 194 mtx_unlock_spin(&mprof_mtx); 195 sbuf_delete(sb); 196 multiplier++; 197 goto retry_sbufops; 198 } 199 } 200 mtx_unlock_spin(&mprof_mtx); 201 sbuf_finish(sb); 202 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 203 sbuf_delete(sb); 204 return (error); 205 } 206 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 207 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 208 209 static int 210 reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 211 { 212 int error, v; 213 214 if (first_free_mprof_buf == 0) 215 return (0); 216 217 v = 0; 218 error = sysctl_handle_int(oidp, &v, 0, req); 219 if (error) 220 return (error); 221 if (req->newptr == NULL) 222 return (error); 223 if (v == 0) 224 return (0); 225 226 mtx_lock_spin(&mprof_mtx); 227 bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf); 228 bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE); 229 first_free_mprof_buf = 0; 230 mtx_unlock_spin(&mprof_mtx); 231 return (0); 232 } 233 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 234 NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics"); 235 #endif 236 237 /* 238 * Function versions of the inlined __mtx_* macros. These are used by 239 * modules and can also be called from assembly language if needed. 240 */ 241 void 242 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 243 { 244 245 MPASS(curthread != NULL); 246 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 247 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 248 file, line)); 249 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 250 file, line); 251 _get_sleep_lock(m, curthread, opts, file, line); 252 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 253 line); 254 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 255 #ifdef MUTEX_PROFILING 256 /* don't reset the timer when/if recursing */ 257 if (m->mtx_acqtime == 0) { 258 m->mtx_filename = file; 259 m->mtx_lineno = line; 260 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0; 261 ++mutex_prof_acquisitions; 262 } 263 #endif 264 } 265 266 void 267 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 268 { 269 270 MPASS(curthread != NULL); 271 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 272 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 273 file, line)); 274 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 275 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 276 line); 277 mtx_assert(m, MA_OWNED); 278 #ifdef MUTEX_PROFILING 279 if (m->mtx_acqtime != 0) { 280 static const char *unknown = "(unknown)"; 281 struct mutex_prof *mpp; 282 u_int64_t acqtime, now; 283 const char *p, *q; 284 volatile u_int hash; 285 286 now = nanoseconds(); 287 acqtime = m->mtx_acqtime; 288 m->mtx_acqtime = 0; 289 if (now <= acqtime) 290 goto out; 291 for (p = m->mtx_filename; 292 p != NULL && strncmp(p, "../", 3) == 0; p += 3) 293 /* nothing */ ; 294 if (p == NULL || *p == '\0') 295 p = unknown; 296 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q) 297 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 298 mtx_lock_spin(&mprof_mtx); 299 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 300 if (mpp->line == m->mtx_lineno && 301 strcmp(mpp->file, p) == 0) 302 break; 303 if (mpp == NULL) { 304 /* Just exit if we cannot get a trace buffer */ 305 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 306 ++mutex_prof_rejected; 307 goto unlock; 308 } 309 mpp = &mprof_buf[first_free_mprof_buf++]; 310 mpp->name = mtx_name(m); 311 mpp->file = p; 312 mpp->line = m->mtx_lineno; 313 mpp->next = mprof_hash[hash]; 314 if (mprof_hash[hash] != NULL) 315 ++mutex_prof_collisions; 316 mprof_hash[hash] = mpp; 317 ++mutex_prof_records; 318 } 319 /* 320 * Record if the mutex has been held longer now than ever 321 * before. 322 */ 323 if (now - acqtime > mpp->cnt_max) 324 mpp->cnt_max = now - acqtime; 325 mpp->cnt_tot += now - acqtime; 326 mpp->cnt_cur++; 327 /* 328 * There's a small race, really we should cmpxchg 329 * 0 with the current value, but that would bill 330 * the contention to the wrong lock instance if 331 * it followed this also. 332 */ 333 mpp->cnt_contest_holding += m->mtx_contest_holding; 334 m->mtx_contest_holding = 0; 335 mpp->cnt_contest_locking += m->mtx_contest_locking; 336 m->mtx_contest_locking = 0; 337 unlock: 338 mtx_unlock_spin(&mprof_mtx); 339 } 340 out: 341 #endif 342 _rel_sleep_lock(m, curthread, opts, file, line); 343 } 344 345 void 346 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 347 { 348 349 MPASS(curthread != NULL); 350 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 351 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 352 m->mtx_object.lo_name, file, line)); 353 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 354 file, line); 355 #if defined(SMP) || LOCK_DEBUG > 0 || 1 356 _get_spin_lock(m, curthread, opts, file, line); 357 #else 358 critical_enter(); 359 #endif 360 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 361 line); 362 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 363 } 364 365 void 366 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 367 { 368 369 MPASS(curthread != NULL); 370 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 371 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 372 m->mtx_object.lo_name, file, line)); 373 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 374 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 375 line); 376 mtx_assert(m, MA_OWNED); 377 #if defined(SMP) || LOCK_DEBUG > 0 || 1 378 _rel_spin_lock(m); 379 #else 380 critical_exit(); 381 #endif 382 } 383 384 /* 385 * The important part of mtx_trylock{,_flags}() 386 * Tries to acquire lock `m.' If this function is called on a mutex that 387 * is already owned, it will recursively acquire the lock. 388 */ 389 int 390 _mtx_trylock(struct mtx *m, int opts, const char *file, int line) 391 { 392 int rval; 393 394 MPASS(curthread != NULL); 395 396 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) { 397 m->mtx_recurse++; 398 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 399 rval = 1; 400 } else 401 rval = _obtain_lock(m, curthread); 402 403 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 404 if (rval) 405 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 406 file, line); 407 408 return (rval); 409 } 410 411 /* 412 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 413 * 414 * We call this if the lock is either contested (i.e. we need to go to 415 * sleep waiting for it), or if we need to recurse on it. 416 */ 417 void 418 _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 419 { 420 struct turnstile *ts; 421 struct thread *td = curthread; 422 #if defined(SMP) && defined(ADAPTIVE_MUTEXES) 423 struct thread *owner; 424 #endif 425 uintptr_t v; 426 #ifdef KTR 427 int cont_logged = 0; 428 #endif 429 #ifdef MUTEX_PROFILING 430 int contested; 431 #endif 432 433 if (mtx_owned(m)) { 434 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0, 435 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 436 m->mtx_object.lo_name, file, line)); 437 m->mtx_recurse++; 438 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 439 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 440 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 441 return; 442 } 443 444 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 445 CTR4(KTR_LOCK, 446 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 447 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 448 449 #ifdef MUTEX_PROFILING 450 contested = 0; 451 #endif 452 while (!_obtain_lock(m, td)) { 453 #ifdef MUTEX_PROFILING 454 contested = 1; 455 atomic_add_int(&m->mtx_contest_holding, 1); 456 #endif 457 ts = turnstile_lookup(&m->mtx_object); 458 v = m->mtx_lock; 459 460 /* 461 * Check if the lock has been released while spinning for 462 * the turnstile chain lock. 463 */ 464 if (v == MTX_UNOWNED) { 465 turnstile_release(&m->mtx_object); 466 #ifdef __i386__ 467 ia32_pause(); 468 #endif 469 continue; 470 } 471 472 /* 473 * The mutex was marked contested on release. This means that 474 * there are other threads blocked on it. Grab ownership of 475 * it and propagate its priority to the current thread if 476 * necessary. 477 */ 478 if (v == MTX_CONTESTED) { 479 MPASS(ts != NULL); 480 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 481 turnstile_claim(ts); 482 break; 483 } 484 485 /* 486 * If the mutex isn't already contested and a failure occurs 487 * setting the contested bit, the mutex was either released 488 * or the state of the MTX_RECURSED bit changed. 489 */ 490 if ((v & MTX_CONTESTED) == 0 && 491 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 492 (void *)(v | MTX_CONTESTED))) { 493 turnstile_release(&m->mtx_object); 494 #ifdef __i386__ 495 ia32_pause(); 496 #endif 497 continue; 498 } 499 500 #if defined(SMP) && defined(ADAPTIVE_MUTEXES) 501 /* 502 * If the current owner of the lock is executing on another 503 * CPU, spin instead of blocking. 504 */ 505 owner = (struct thread *)(v & MTX_FLAGMASK); 506 if (m != &Giant && TD_IS_RUNNING(owner)) { 507 turnstile_release(&m->mtx_object); 508 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 509 #ifdef __i386__ 510 ia32_pause(); 511 #endif 512 } 513 continue; 514 } 515 #endif /* SMP && ADAPTIVE_MUTEXES */ 516 517 /* 518 * We definitely must sleep for this lock. 519 */ 520 mtx_assert(m, MA_NOTOWNED); 521 522 #ifdef KTR 523 if (!cont_logged) { 524 CTR6(KTR_CONTENTION, 525 "contention: %p at %s:%d wants %s, taken by %s:%d", 526 td, file, line, m->mtx_object.lo_name, 527 WITNESS_FILE(&m->mtx_object), 528 WITNESS_LINE(&m->mtx_object)); 529 cont_logged = 1; 530 } 531 #endif 532 533 /* 534 * Block on the turnstile. 535 */ 536 turnstile_wait(ts, &m->mtx_object, mtx_owner(m)); 537 } 538 539 #ifdef KTR 540 if (cont_logged) { 541 CTR4(KTR_CONTENTION, 542 "contention end: %s acquired by %p at %s:%d", 543 m->mtx_object.lo_name, td, file, line); 544 } 545 #endif 546 #ifdef MUTEX_PROFILING 547 if (contested) 548 m->mtx_contest_locking++; 549 m->mtx_contest_holding = 0; 550 #endif 551 return; 552 } 553 554 /* 555 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 556 * 557 * This is only called if we need to actually spin for the lock. Recursion 558 * is handled inline. 559 */ 560 void 561 _mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 562 { 563 int i = 0; 564 565 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 566 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 567 568 for (;;) { 569 if (_obtain_lock(m, curthread)) 570 break; 571 572 /* Give interrupts a chance while we spin. */ 573 critical_exit(); 574 while (m->mtx_lock != MTX_UNOWNED) { 575 if (i++ < 10000000) { 576 #ifdef __i386__ 577 ia32_pause(); 578 #endif 579 continue; 580 } 581 if (i < 60000000) 582 DELAY(1); 583 #ifdef DDB 584 else if (!db_active) { 585 #else 586 else { 587 #endif 588 printf("spin lock %s held by %p for > 5 seconds\n", 589 m->mtx_object.lo_name, (void *)m->mtx_lock); 590 #ifdef WITNESS 591 witness_display_spinlock(&m->mtx_object, 592 mtx_owner(m)); 593 #endif 594 panic("spin lock held too long"); 595 } 596 #ifdef __i386__ 597 ia32_pause(); 598 #endif 599 } 600 critical_enter(); 601 } 602 603 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 604 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 605 606 return; 607 } 608 609 /* 610 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 611 * 612 * We are only called here if the lock is recursed or contested (i.e. we 613 * need to wake up a blocked thread). 614 */ 615 void 616 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 617 { 618 struct turnstile *ts; 619 struct thread *td, *td1; 620 621 if (mtx_recursed(m)) { 622 if (--(m->mtx_recurse) == 0) 623 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 624 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 625 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 626 return; 627 } 628 629 ts = turnstile_lookup(&m->mtx_object); 630 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 631 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 632 633 #if defined(SMP) && defined(ADAPTIVE_MUTEXES) 634 if (ts == NULL) { 635 _release_lock_quick(m); 636 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 637 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 638 turnstile_release(&m->mtx_object); 639 return; 640 } 641 #else 642 MPASS(ts != NULL); 643 #endif 644 /* XXX */ 645 td1 = turnstile_head(ts); 646 if (turnstile_signal(ts)) { 647 _release_lock_quick(m); 648 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 649 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 650 } else { 651 m->mtx_lock = MTX_CONTESTED; 652 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 653 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", 654 m); 655 } 656 turnstile_unpend(ts); 657 658 /* 659 * XXX: This is just a hack until preemption is done. However, 660 * once preemption is done we need to either wrap the 661 * turnstile_signal() and release of the actual lock in an 662 * extra critical section or change the preemption code to 663 * always just set a flag and never do instant-preempts. 664 */ 665 td = curthread; 666 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority) 667 return; 668 mtx_lock_spin(&sched_lock); 669 if (!TD_IS_RUNNING(td1)) { 670 #ifdef notyet 671 if (td->td_ithd != NULL) { 672 struct ithd *it = td->td_ithd; 673 674 if (it->it_interrupted) { 675 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 676 CTR2(KTR_LOCK, 677 "_mtx_unlock_sleep: %p interrupted %p", 678 it, it->it_interrupted); 679 intr_thd_fixup(it); 680 } 681 } 682 #endif 683 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 684 CTR2(KTR_LOCK, 685 "_mtx_unlock_sleep: %p switching out lock=%p", m, 686 (void *)m->mtx_lock); 687 688 mi_switch(SW_INVOL); 689 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 690 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 691 m, (void *)m->mtx_lock); 692 } 693 mtx_unlock_spin(&sched_lock); 694 695 return; 696 } 697 698 /* 699 * All the unlocking of MTX_SPIN locks is done inline. 700 * See the _rel_spin_lock() macro for the details. 701 */ 702 703 /* 704 * The backing function for the INVARIANTS-enabled mtx_assert() 705 */ 706 #ifdef INVARIANT_SUPPORT 707 void 708 _mtx_assert(struct mtx *m, int what, const char *file, int line) 709 { 710 711 if (panicstr != NULL) 712 return; 713 switch (what) { 714 case MA_OWNED: 715 case MA_OWNED | MA_RECURSED: 716 case MA_OWNED | MA_NOTRECURSED: 717 if (!mtx_owned(m)) 718 panic("mutex %s not owned at %s:%d", 719 m->mtx_object.lo_name, file, line); 720 if (mtx_recursed(m)) { 721 if ((what & MA_NOTRECURSED) != 0) 722 panic("mutex %s recursed at %s:%d", 723 m->mtx_object.lo_name, file, line); 724 } else if ((what & MA_RECURSED) != 0) { 725 panic("mutex %s unrecursed at %s:%d", 726 m->mtx_object.lo_name, file, line); 727 } 728 break; 729 case MA_NOTOWNED: 730 if (mtx_owned(m)) 731 panic("mutex %s owned at %s:%d", 732 m->mtx_object.lo_name, file, line); 733 break; 734 default: 735 panic("unknown mtx_assert at %s:%d", file, line); 736 } 737 } 738 #endif 739 740 /* 741 * The MUTEX_DEBUG-enabled mtx_validate() 742 * 743 * Most of these checks have been moved off into the LO_INITIALIZED flag 744 * maintained by the witness code. 745 */ 746 #ifdef MUTEX_DEBUG 747 748 void mtx_validate(struct mtx *); 749 750 void 751 mtx_validate(struct mtx *m) 752 { 753 754 /* 755 * XXX: When kernacc() does not require Giant we can reenable this check 756 */ 757 #ifdef notyet 758 /* 759 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 760 * we can re-enable the kernacc() checks. 761 */ 762 #ifndef __alpha__ 763 /* 764 * Can't call kernacc() from early init386(), especially when 765 * initializing Giant mutex, because some stuff in kernacc() 766 * requires Giant itself. 767 */ 768 if (!cold) 769 if (!kernacc((caddr_t)m, sizeof(m), 770 VM_PROT_READ | VM_PROT_WRITE)) 771 panic("Can't read and write to mutex %p", m); 772 #endif 773 #endif 774 } 775 #endif 776 777 /* 778 * General init routine used by the MTX_SYSINIT() macro. 779 */ 780 void 781 mtx_sysinit(void *arg) 782 { 783 struct mtx_args *margs = arg; 784 785 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 786 } 787 788 /* 789 * Mutex initialization routine; initialize lock `m' of type contained in 790 * `opts' with options contained in `opts' and name `name.' The optional 791 * lock type `type' is used as a general lock category name for use with 792 * witness. 793 */ 794 void 795 mtx_init(struct mtx *m, const char *name, const char *type, int opts) 796 { 797 struct lock_object *lock; 798 799 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 800 MTX_NOWITNESS | MTX_DUPOK)) == 0); 801 802 #ifdef MUTEX_DEBUG 803 /* Diagnostic and error correction */ 804 mtx_validate(m); 805 #endif 806 807 lock = &m->mtx_object; 808 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 809 ("mutex \"%s\" %p already initialized", name, m)); 810 bzero(m, sizeof(*m)); 811 if (opts & MTX_SPIN) 812 lock->lo_class = &lock_class_mtx_spin; 813 else 814 lock->lo_class = &lock_class_mtx_sleep; 815 lock->lo_name = name; 816 lock->lo_type = type != NULL ? type : name; 817 if (opts & MTX_QUIET) 818 lock->lo_flags = LO_QUIET; 819 if (opts & MTX_RECURSE) 820 lock->lo_flags |= LO_RECURSABLE; 821 if ((opts & MTX_NOWITNESS) == 0) 822 lock->lo_flags |= LO_WITNESS; 823 if (opts & MTX_DUPOK) 824 lock->lo_flags |= LO_DUPOK; 825 826 m->mtx_lock = MTX_UNOWNED; 827 828 LOCK_LOG_INIT(lock, opts); 829 830 WITNESS_INIT(lock); 831 } 832 833 /* 834 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 835 * passed in as a flag here because if the corresponding mtx_init() was 836 * called with MTX_QUIET set, then it will already be set in the mutex's 837 * flags. 838 */ 839 void 840 mtx_destroy(struct mtx *m) 841 { 842 843 LOCK_LOG_DESTROY(&m->mtx_object, 0); 844 845 if (!mtx_owned(m)) 846 MPASS(mtx_unowned(m)); 847 else { 848 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 849 850 /* Tell witness this isn't locked to make it happy. */ 851 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 852 __LINE__); 853 } 854 855 WITNESS_DESTROY(&m->mtx_object); 856 } 857 858 /* 859 * Intialize the mutex code and system mutexes. This is called from the MD 860 * startup code prior to mi_startup(). The per-CPU data space needs to be 861 * setup before this is called. 862 */ 863 void 864 mutex_init(void) 865 { 866 867 /* Setup thread0 so that mutexes work. */ 868 LIST_INIT(&thread0.td_contested); 869 870 /* Setup turnstiles so that sleep mutexes work. */ 871 init_turnstiles(); 872 873 /* 874 * Initialize mutexes. 875 */ 876 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 877 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 878 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 879 mtx_lock(&Giant); 880 } 881