1 /*- 2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * Machine independent bits of reader/writer lock implementation. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_ddb.h" 38 #include "opt_kdtrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 43 #include <sys/kernel.h> 44 #include <sys/kdb.h> 45 #include <sys/ktr.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/rmlock.h> 50 #include <sys/sched.h> 51 #include <sys/smp.h> 52 #include <sys/turnstile.h> 53 #include <sys/lock_profile.h> 54 #include <machine/cpu.h> 55 56 #ifdef DDB 57 #include <ddb/ddb.h> 58 #endif 59 60 #define RMPF_ONQUEUE 1 61 #define RMPF_SIGNAL 2 62 63 /* 64 * To support usage of rmlock in CVs and msleep yet another list for the 65 * priority tracker would be needed. Using this lock for cv and msleep also 66 * does not seem very useful 67 */ 68 69 static void assert_rm(const struct lock_object *lock, int what); 70 static void lock_rm(struct lock_object *lock, int how); 71 #ifdef KDTRACE_HOOKS 72 static int owner_rm(const struct lock_object *lock, struct thread **owner); 73 #endif 74 static int unlock_rm(struct lock_object *lock); 75 76 struct lock_class lock_class_rm = { 77 .lc_name = "rm", 78 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 79 .lc_assert = assert_rm, 80 #if 0 81 #ifdef DDB 82 .lc_ddb_show = db_show_rwlock, 83 #endif 84 #endif 85 .lc_lock = lock_rm, 86 .lc_unlock = unlock_rm, 87 #ifdef KDTRACE_HOOKS 88 .lc_owner = owner_rm, 89 #endif 90 }; 91 92 static void 93 assert_rm(const struct lock_object *lock, int what) 94 { 95 96 panic("assert_rm called"); 97 } 98 99 static void 100 lock_rm(struct lock_object *lock, int how) 101 { 102 103 panic("lock_rm called"); 104 } 105 106 static int 107 unlock_rm(struct lock_object *lock) 108 { 109 110 panic("unlock_rm called"); 111 } 112 113 #ifdef KDTRACE_HOOKS 114 static int 115 owner_rm(const struct lock_object *lock, struct thread **owner) 116 { 117 118 panic("owner_rm called"); 119 } 120 #endif 121 122 static struct mtx rm_spinlock; 123 124 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN); 125 126 /* 127 * Add or remove tracker from per-cpu list. 128 * 129 * The per-cpu list can be traversed at any time in forward direction from an 130 * interrupt on the *local* cpu. 131 */ 132 static void inline 133 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker) 134 { 135 struct rm_queue *next; 136 137 /* Initialize all tracker pointers */ 138 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue; 139 next = pc->pc_rm_queue.rmq_next; 140 tracker->rmp_cpuQueue.rmq_next = next; 141 142 /* rmq_prev is not used during froward traversal. */ 143 next->rmq_prev = &tracker->rmp_cpuQueue; 144 145 /* Update pointer to first element. */ 146 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue; 147 } 148 149 static void inline 150 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker) 151 { 152 struct rm_queue *next, *prev; 153 154 next = tracker->rmp_cpuQueue.rmq_next; 155 prev = tracker->rmp_cpuQueue.rmq_prev; 156 157 /* Not used during forward traversal. */ 158 next->rmq_prev = prev; 159 160 /* Remove from list. */ 161 prev->rmq_next = next; 162 } 163 164 static void 165 rm_cleanIPI(void *arg) 166 { 167 struct pcpu *pc; 168 struct rmlock *rm = arg; 169 struct rm_priotracker *tracker; 170 struct rm_queue *queue; 171 pc = pcpu_find(curcpu); 172 173 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; 174 queue = queue->rmq_next) { 175 tracker = (struct rm_priotracker *)queue; 176 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) { 177 tracker->rmp_flags = RMPF_ONQUEUE; 178 mtx_lock_spin(&rm_spinlock); 179 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, 180 rmp_qentry); 181 mtx_unlock_spin(&rm_spinlock); 182 } 183 } 184 } 185 186 CTASSERT((RM_SLEEPABLE & LO_CLASSFLAGS) == RM_SLEEPABLE); 187 188 void 189 rm_init_flags(struct rmlock *rm, const char *name, int opts) 190 { 191 int liflags; 192 193 liflags = 0; 194 if (!(opts & RM_NOWITNESS)) 195 liflags |= LO_WITNESS; 196 if (opts & RM_RECURSE) 197 liflags |= LO_RECURSABLE; 198 rm->rm_writecpus = all_cpus; 199 LIST_INIT(&rm->rm_activeReaders); 200 if (opts & RM_SLEEPABLE) { 201 liflags |= RM_SLEEPABLE; 202 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_RECURSE); 203 } else 204 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS); 205 lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags); 206 } 207 208 void 209 rm_init(struct rmlock *rm, const char *name) 210 { 211 212 rm_init_flags(rm, name, 0); 213 } 214 215 void 216 rm_destroy(struct rmlock *rm) 217 { 218 219 if (rm->lock_object.lo_flags & RM_SLEEPABLE) 220 sx_destroy(&rm->rm_lock_sx); 221 else 222 mtx_destroy(&rm->rm_lock_mtx); 223 lock_destroy(&rm->lock_object); 224 } 225 226 int 227 rm_wowned(const struct rmlock *rm) 228 { 229 230 if (rm->lock_object.lo_flags & RM_SLEEPABLE) 231 return (sx_xlocked(&rm->rm_lock_sx)); 232 else 233 return (mtx_owned(&rm->rm_lock_mtx)); 234 } 235 236 void 237 rm_sysinit(void *arg) 238 { 239 struct rm_args *args = arg; 240 241 rm_init(args->ra_rm, args->ra_desc); 242 } 243 244 void 245 rm_sysinit_flags(void *arg) 246 { 247 struct rm_args_flags *args = arg; 248 249 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts); 250 } 251 252 static int 253 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) 254 { 255 struct pcpu *pc; 256 struct rm_queue *queue; 257 struct rm_priotracker *atracker; 258 259 critical_enter(); 260 pc = pcpu_find(curcpu); 261 262 /* Check if we just need to do a proper critical_exit. */ 263 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) { 264 critical_exit(); 265 return (1); 266 } 267 268 /* Remove our tracker from the per-cpu list. */ 269 rm_tracker_remove(pc, tracker); 270 271 /* Check to see if the IPI granted us the lock after all. */ 272 if (tracker->rmp_flags) { 273 /* Just add back tracker - we hold the lock. */ 274 rm_tracker_add(pc, tracker); 275 critical_exit(); 276 return (1); 277 } 278 279 /* 280 * We allow readers to aquire a lock even if a writer is blocked if 281 * the lock is recursive and the reader already holds the lock. 282 */ 283 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) { 284 /* 285 * Just grant the lock if this thread already has a tracker 286 * for this lock on the per-cpu queue. 287 */ 288 for (queue = pc->pc_rm_queue.rmq_next; 289 queue != &pc->pc_rm_queue; queue = queue->rmq_next) { 290 atracker = (struct rm_priotracker *)queue; 291 if ((atracker->rmp_rmlock == rm) && 292 (atracker->rmp_thread == tracker->rmp_thread)) { 293 mtx_lock_spin(&rm_spinlock); 294 LIST_INSERT_HEAD(&rm->rm_activeReaders, 295 tracker, rmp_qentry); 296 tracker->rmp_flags = RMPF_ONQUEUE; 297 mtx_unlock_spin(&rm_spinlock); 298 rm_tracker_add(pc, tracker); 299 critical_exit(); 300 return (1); 301 } 302 } 303 } 304 305 sched_unpin(); 306 critical_exit(); 307 308 if (trylock) { 309 if (rm->lock_object.lo_flags & RM_SLEEPABLE) { 310 if (!sx_try_xlock(&rm->rm_lock_sx)) 311 return (0); 312 } else { 313 if (!mtx_trylock(&rm->rm_lock_mtx)) 314 return (0); 315 } 316 } else { 317 if (rm->lock_object.lo_flags & RM_SLEEPABLE) 318 sx_xlock(&rm->rm_lock_sx); 319 else 320 mtx_lock(&rm->rm_lock_mtx); 321 } 322 323 critical_enter(); 324 pc = pcpu_find(curcpu); 325 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus); 326 rm_tracker_add(pc, tracker); 327 sched_pin(); 328 critical_exit(); 329 330 if (rm->lock_object.lo_flags & RM_SLEEPABLE) 331 sx_xunlock(&rm->rm_lock_sx); 332 else 333 mtx_unlock(&rm->rm_lock_mtx); 334 335 return (1); 336 } 337 338 int 339 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) 340 { 341 struct thread *td = curthread; 342 struct pcpu *pc; 343 344 if (SCHEDULER_STOPPED()) 345 return (1); 346 347 tracker->rmp_flags = 0; 348 tracker->rmp_thread = td; 349 tracker->rmp_rmlock = rm; 350 351 td->td_critnest++; /* critical_enter(); */ 352 353 __compiler_membar(); 354 355 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ 356 357 rm_tracker_add(pc, tracker); 358 359 sched_pin(); 360 361 __compiler_membar(); 362 363 td->td_critnest--; 364 365 /* 366 * Fast path to combine two common conditions into a single 367 * conditional jump. 368 */ 369 if (0 == (td->td_owepreempt | 370 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))) 371 return (1); 372 373 /* We do not have a read token and need to acquire one. */ 374 return _rm_rlock_hard(rm, tracker, trylock); 375 } 376 377 static void 378 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) 379 { 380 381 if (td->td_owepreempt) { 382 td->td_critnest++; 383 critical_exit(); 384 } 385 386 if (!tracker->rmp_flags) 387 return; 388 389 mtx_lock_spin(&rm_spinlock); 390 LIST_REMOVE(tracker, rmp_qentry); 391 392 if (tracker->rmp_flags & RMPF_SIGNAL) { 393 struct rmlock *rm; 394 struct turnstile *ts; 395 396 rm = tracker->rmp_rmlock; 397 398 turnstile_chain_lock(&rm->lock_object); 399 mtx_unlock_spin(&rm_spinlock); 400 401 ts = turnstile_lookup(&rm->lock_object); 402 403 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE); 404 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 405 turnstile_chain_unlock(&rm->lock_object); 406 } else 407 mtx_unlock_spin(&rm_spinlock); 408 } 409 410 void 411 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker) 412 { 413 struct pcpu *pc; 414 struct thread *td = tracker->rmp_thread; 415 416 if (SCHEDULER_STOPPED()) 417 return; 418 419 td->td_critnest++; /* critical_enter(); */ 420 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ 421 rm_tracker_remove(pc, tracker); 422 td->td_critnest--; 423 sched_unpin(); 424 425 if (0 == (td->td_owepreempt | tracker->rmp_flags)) 426 return; 427 428 _rm_unlock_hard(td, tracker); 429 } 430 431 void 432 _rm_wlock(struct rmlock *rm) 433 { 434 struct rm_priotracker *prio; 435 struct turnstile *ts; 436 cpuset_t readcpus; 437 438 if (SCHEDULER_STOPPED()) 439 return; 440 441 if (rm->lock_object.lo_flags & RM_SLEEPABLE) 442 sx_xlock(&rm->rm_lock_sx); 443 else 444 mtx_lock(&rm->rm_lock_mtx); 445 446 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { 447 /* Get all read tokens back */ 448 readcpus = all_cpus; 449 CPU_NAND(&readcpus, &rm->rm_writecpus); 450 rm->rm_writecpus = all_cpus; 451 452 /* 453 * Assumes rm->rm_writecpus update is visible on other CPUs 454 * before rm_cleanIPI is called. 455 */ 456 #ifdef SMP 457 smp_rendezvous_cpus(readcpus, 458 smp_no_rendevous_barrier, 459 rm_cleanIPI, 460 smp_no_rendevous_barrier, 461 rm); 462 463 #else 464 rm_cleanIPI(rm); 465 #endif 466 467 mtx_lock_spin(&rm_spinlock); 468 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { 469 ts = turnstile_trywait(&rm->lock_object); 470 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; 471 mtx_unlock_spin(&rm_spinlock); 472 turnstile_wait(ts, prio->rmp_thread, 473 TS_EXCLUSIVE_QUEUE); 474 mtx_lock_spin(&rm_spinlock); 475 } 476 mtx_unlock_spin(&rm_spinlock); 477 } 478 } 479 480 void 481 _rm_wunlock(struct rmlock *rm) 482 { 483 484 if (rm->lock_object.lo_flags & RM_SLEEPABLE) 485 sx_xunlock(&rm->rm_lock_sx); 486 else 487 mtx_unlock(&rm->rm_lock_mtx); 488 } 489 490 #ifdef LOCK_DEBUG 491 492 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line) 493 { 494 495 if (SCHEDULER_STOPPED()) 496 return; 497 498 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 499 ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d", 500 curthread, rm->lock_object.lo_name, file, line)); 501 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, 502 file, line, NULL); 503 504 _rm_wlock(rm); 505 506 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line); 507 508 if (rm->lock_object.lo_flags & RM_SLEEPABLE) 509 WITNESS_LOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE, 510 file, line); 511 else 512 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); 513 514 curthread->td_locks++; 515 516 } 517 518 void 519 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) 520 { 521 522 if (SCHEDULER_STOPPED()) 523 return; 524 525 curthread->td_locks--; 526 if (rm->lock_object.lo_flags & RM_SLEEPABLE) 527 WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE, 528 file, line); 529 else 530 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); 531 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line); 532 _rm_wunlock(rm); 533 } 534 535 int 536 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 537 int trylock, const char *file, int line) 538 { 539 540 if (SCHEDULER_STOPPED()) 541 return (1); 542 543 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 544 ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d", 545 curthread, rm->lock_object.lo_name, file, line)); 546 if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE)) 547 WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER, 548 file, line, NULL); 549 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL); 550 551 if (_rm_rlock(rm, tracker, trylock)) { 552 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line); 553 554 WITNESS_LOCK(&rm->lock_object, 0, file, line); 555 556 curthread->td_locks++; 557 558 return (1); 559 } 560 561 return (0); 562 } 563 564 void 565 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 566 const char *file, int line) 567 { 568 569 if (SCHEDULER_STOPPED()) 570 return; 571 572 curthread->td_locks--; 573 WITNESS_UNLOCK(&rm->lock_object, 0, file, line); 574 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line); 575 _rm_runlock(rm, tracker); 576 } 577 578 #else 579 580 /* 581 * Just strip out file and line arguments if no lock debugging is enabled in 582 * the kernel - we are called from a kernel module. 583 */ 584 void 585 _rm_wlock_debug(struct rmlock *rm, const char *file, int line) 586 { 587 588 _rm_wlock(rm); 589 } 590 591 void 592 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) 593 { 594 595 _rm_wunlock(rm); 596 } 597 598 int 599 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 600 int trylock, const char *file, int line) 601 { 602 603 return _rm_rlock(rm, tracker, trylock); 604 } 605 606 void 607 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 608 const char *file, int line) 609 { 610 611 _rm_runlock(rm, tracker); 612 } 613 614 #endif 615