1 /*- 2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * Machine independent bits of reader/writer lock implementation. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_ddb.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 42 #include <sys/kernel.h> 43 #include <sys/ktr.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/proc.h> 47 #include <sys/rmlock.h> 48 #include <sys/sched.h> 49 #include <sys/smp.h> 50 #include <sys/systm.h> 51 #include <sys/turnstile.h> 52 #include <sys/lock_profile.h> 53 #include <machine/cpu.h> 54 55 #ifdef DDB 56 #include <ddb/ddb.h> 57 #endif 58 59 #define RMPF_ONQUEUE 1 60 #define RMPF_SIGNAL 2 61 62 /* 63 * To support usage of rmlock in CVs and msleep yet another list for the 64 * priority tracker would be needed. Using this lock for cv and msleep also 65 * does not seem very useful 66 */ 67 68 static __inline void compiler_memory_barrier(void) { 69 __asm __volatile("":::"memory"); 70 } 71 72 static void assert_rm(struct lock_object *lock, int what); 73 static void lock_rm(struct lock_object *lock, int how); 74 static int unlock_rm(struct lock_object *lock); 75 76 struct lock_class lock_class_rm = { 77 .lc_name = "rm", 78 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 79 .lc_assert = assert_rm, 80 #if 0 81 #ifdef DDB 82 .lc_ddb_show = db_show_rwlock, 83 #endif 84 #endif 85 .lc_lock = lock_rm, 86 .lc_unlock = unlock_rm, 87 }; 88 89 static void 90 assert_rm(struct lock_object *lock, int what) 91 { 92 93 panic("assert_rm called"); 94 } 95 96 static void 97 lock_rm(struct lock_object *lock, int how) 98 { 99 100 panic("lock_rm called"); 101 } 102 103 static int 104 unlock_rm(struct lock_object *lock) 105 { 106 107 panic("unlock_rm called"); 108 } 109 110 static struct mtx rm_spinlock; 111 112 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN); 113 114 /* 115 * Add or remove tracker from per cpu list. 116 * 117 * The per cpu list can be traversed at any time in forward direction from an 118 * interrupt on the *local* cpu. 119 */ 120 static void inline 121 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker) 122 { 123 struct rm_queue *next; 124 125 /* Initialize all tracker pointers */ 126 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue; 127 next = pc->pc_rm_queue.rmq_next; 128 tracker->rmp_cpuQueue.rmq_next = next; 129 130 /* rmq_prev is not used during froward traversal. */ 131 next->rmq_prev = &tracker->rmp_cpuQueue; 132 133 /* Update pointer to first element. */ 134 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue; 135 } 136 137 static void inline 138 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker) 139 { 140 struct rm_queue *next, *prev; 141 142 next = tracker->rmp_cpuQueue.rmq_next; 143 prev = tracker->rmp_cpuQueue.rmq_prev; 144 145 /* Not used during forward traversal. */ 146 next->rmq_prev = prev; 147 148 /* Remove from list. */ 149 prev->rmq_next = next; 150 } 151 152 static void 153 rm_cleanIPI(void *arg) 154 { 155 struct pcpu *pc; 156 struct rmlock *rm = arg; 157 struct rm_priotracker *tracker; 158 struct rm_queue *queue; 159 pc = pcpu_find(curcpu); 160 161 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; 162 queue = queue->rmq_next) { 163 tracker = (struct rm_priotracker *)queue; 164 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) { 165 tracker->rmp_flags = RMPF_ONQUEUE; 166 mtx_lock_spin(&rm_spinlock); 167 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, 168 rmp_qentry); 169 mtx_unlock_spin(&rm_spinlock); 170 } 171 } 172 } 173 174 void 175 rm_init(struct rmlock *rm, const char *name, int opts) 176 { 177 178 rm->rm_noreadtoken = 1; 179 LIST_INIT(&rm->rm_activeReaders); 180 mtx_init(&rm->rm_lock, name, "RM_MTX",MTX_NOWITNESS); 181 lock_init(&rm->lock_object, &lock_class_rm, name, NULL, 182 (opts & LO_RECURSABLE)| LO_WITNESS); 183 } 184 185 void 186 rm_destroy(struct rmlock *rm) 187 { 188 189 mtx_destroy(&rm->rm_lock); 190 lock_destroy(&rm->lock_object); 191 } 192 193 int 194 rm_wowned(struct rmlock *rm) 195 { 196 197 return (mtx_owned(&rm->rm_lock)); 198 } 199 200 void 201 rm_sysinit(void *arg) 202 { 203 204 struct rm_args *args = arg; 205 rm_init(args->ra_rm, args->ra_desc, args->ra_opts); 206 } 207 208 static void 209 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker) 210 { 211 struct pcpu *pc; 212 struct rm_queue *queue; 213 struct rm_priotracker *atracker; 214 215 critical_enter(); 216 pc = pcpu_find(curcpu); 217 218 /* Check if we just need to do a proper critical_exit. */ 219 if (0 == rm->rm_noreadtoken) { 220 critical_exit(); 221 return; 222 } 223 224 /* Remove our tracker from the per cpu list. */ 225 rm_tracker_remove(pc, tracker); 226 227 /* Check to see if the IPI granted us the lock after all. */ 228 if (tracker->rmp_flags) { 229 /* Just add back tracker - we hold the lock. */ 230 rm_tracker_add(pc, tracker); 231 critical_exit(); 232 return; 233 } 234 235 /* 236 * We allow readers to aquire a lock even if a writer is blocked if 237 * the lock is recursive and the reader already holds the lock. 238 */ 239 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) { 240 /* 241 * Just grand the lock if this thread already have a tracker 242 * for this lock on the per cpu queue. 243 */ 244 for (queue = pc->pc_rm_queue.rmq_next; 245 queue != &pc->pc_rm_queue; queue = queue->rmq_next) { 246 atracker = (struct rm_priotracker *)queue; 247 if ((atracker->rmp_rmlock == rm) && 248 (atracker->rmp_thread == tracker->rmp_thread)) { 249 mtx_lock_spin(&rm_spinlock); 250 LIST_INSERT_HEAD(&rm->rm_activeReaders, 251 tracker, rmp_qentry); 252 tracker->rmp_flags = RMPF_ONQUEUE; 253 mtx_unlock_spin(&rm_spinlock); 254 rm_tracker_add(pc, tracker); 255 critical_exit(); 256 return; 257 } 258 } 259 } 260 261 sched_unpin(); 262 critical_exit(); 263 264 mtx_lock(&rm->rm_lock); 265 rm->rm_noreadtoken = 0; 266 critical_enter(); 267 268 pc = pcpu_find(curcpu); 269 rm_tracker_add(pc, tracker); 270 sched_pin(); 271 critical_exit(); 272 273 mtx_unlock(&rm->rm_lock); 274 } 275 276 void 277 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker) 278 { 279 struct thread *td = curthread; 280 struct pcpu *pc; 281 282 tracker->rmp_flags = 0; 283 tracker->rmp_thread = td; 284 tracker->rmp_rmlock = rm; 285 286 td->td_critnest++; /* critical_enter(); */ 287 288 compiler_memory_barrier(); 289 290 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ 291 292 rm_tracker_add(pc, tracker); 293 294 td->td_pinned++; /* sched_pin(); */ 295 296 compiler_memory_barrier(); 297 298 td->td_critnest--; 299 300 /* 301 * Fast path to combine two common conditions into a single 302 * conditional jump. 303 */ 304 if (0 == (td->td_owepreempt | rm->rm_noreadtoken)) 305 return; 306 307 /* We do not have a read token and need to acquire one. */ 308 _rm_rlock_hard(rm, tracker); 309 } 310 311 static void 312 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) 313 { 314 315 if (td->td_owepreempt) { 316 td->td_critnest++; 317 critical_exit(); 318 } 319 320 if (!tracker->rmp_flags) 321 return; 322 323 mtx_lock_spin(&rm_spinlock); 324 LIST_REMOVE(tracker, rmp_qentry); 325 326 if (tracker->rmp_flags & RMPF_SIGNAL) { 327 struct rmlock *rm; 328 struct turnstile *ts; 329 330 rm = tracker->rmp_rmlock; 331 332 turnstile_chain_lock(&rm->lock_object); 333 mtx_unlock_spin(&rm_spinlock); 334 335 ts = turnstile_lookup(&rm->lock_object); 336 337 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE); 338 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 339 turnstile_chain_unlock(&rm->lock_object); 340 } else 341 mtx_unlock_spin(&rm_spinlock); 342 } 343 344 void 345 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker) 346 { 347 struct pcpu *pc; 348 struct thread *td = tracker->rmp_thread; 349 350 td->td_critnest++; /* critical_enter(); */ 351 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ 352 rm_tracker_remove(pc, tracker); 353 td->td_critnest--; 354 td->td_pinned--; /* sched_unpin(); */ 355 356 if (0 == (td->td_owepreempt | tracker->rmp_flags)) 357 return; 358 359 _rm_unlock_hard(td, tracker); 360 } 361 362 void 363 _rm_wlock(struct rmlock *rm) 364 { 365 struct rm_priotracker *prio; 366 struct turnstile *ts; 367 368 mtx_lock(&rm->rm_lock); 369 370 if (rm->rm_noreadtoken == 0) { 371 /* Get all read tokens back */ 372 373 rm->rm_noreadtoken = 1; 374 375 /* 376 * Assumes rm->rm_noreadtoken update is visible on other CPUs 377 * before rm_cleanIPI is called. 378 */ 379 #ifdef SMP 380 smp_rendezvous(smp_no_rendevous_barrier, 381 rm_cleanIPI, 382 smp_no_rendevous_barrier, 383 rm); 384 385 #else 386 rm_cleanIPI(rm); 387 #endif 388 389 mtx_lock_spin(&rm_spinlock); 390 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { 391 ts = turnstile_trywait(&rm->lock_object); 392 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; 393 mtx_unlock_spin(&rm_spinlock); 394 turnstile_wait(ts, prio->rmp_thread, 395 TS_EXCLUSIVE_QUEUE); 396 mtx_lock_spin(&rm_spinlock); 397 } 398 mtx_unlock_spin(&rm_spinlock); 399 } 400 } 401 402 void 403 _rm_wunlock(struct rmlock *rm) 404 { 405 406 mtx_unlock(&rm->rm_lock); 407 } 408 409 #ifdef LOCK_DEBUG 410 411 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line) 412 { 413 414 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, 415 file, line, NULL); 416 417 _rm_wlock(rm); 418 419 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line); 420 421 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); 422 423 curthread->td_locks++; 424 425 } 426 427 void 428 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) 429 { 430 431 curthread->td_locks--; 432 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); 433 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line); 434 _rm_wunlock(rm); 435 } 436 437 void 438 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 439 const char *file, int line) 440 { 441 442 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL); 443 444 _rm_rlock(rm, tracker); 445 446 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line); 447 448 WITNESS_LOCK(&rm->lock_object, 0, file, line); 449 450 curthread->td_locks++; 451 } 452 453 void 454 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 455 const char *file, int line) 456 { 457 458 curthread->td_locks--; 459 WITNESS_UNLOCK(&rm->lock_object, 0, file, line); 460 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line); 461 _rm_runlock(rm, tracker); 462 } 463 464 #else 465 466 /* 467 * Just strip out file and line arguments if no lock debugging is enabled in 468 * the kernel - we are called from a kernel module. 469 */ 470 void 471 _rm_wlock_debug(struct rmlock *rm, const char *file, int line) 472 { 473 474 _rm_wlock(rm); 475 } 476 477 void 478 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) 479 { 480 481 _rm_wunlock(rm); 482 } 483 484 void 485 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 486 const char *file, int line) 487 { 488 489 _rm_rlock(rm, tracker); 490 } 491 492 void 493 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 494 const char *file, int line) { 495 496 _rm_runlock(rm, tracker); 497 } 498 499 #endif 500