1 // SPDX-License-Identifier: GPL-2.0 2 /* kernel/rwsem.c: R/W semaphores, public implementation 3 * 4 * Written by David Howells (dhowells@redhat.com). 5 * Derived from asm-i386/semaphore.h 6 * 7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com> 8 * and Michel Lespinasse <walken@google.com> 9 * 10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com> 11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes. 12 * 13 * Rwsem count bit fields re-definition and rwsem rearchitecture by 14 * Waiman Long <longman@redhat.com> and 15 * Peter Zijlstra <peterz@infradead.org>. 16 */ 17 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/sched.h> 21 #include <linux/sched/rt.h> 22 #include <linux/sched/task.h> 23 #include <linux/sched/debug.h> 24 #include <linux/sched/wake_q.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/clock.h> 27 #include <linux/export.h> 28 #include <linux/rwsem.h> 29 #include <linux/atomic.h> 30 #include <trace/events/lock.h> 31 32 #ifndef CONFIG_PREEMPT_RT 33 #include "lock_events.h" 34 35 /* 36 * The least significant 2 bits of the owner value has the following 37 * meanings when set. 38 * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers 39 * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock 40 * 41 * When the rwsem is reader-owned and a spinning writer has timed out, 42 * the nonspinnable bit will be set to disable optimistic spinning. 43 44 * When a writer acquires a rwsem, it puts its task_struct pointer 45 * into the owner field. It is cleared after an unlock. 46 * 47 * When a reader acquires a rwsem, it will also puts its task_struct 48 * pointer into the owner field with the RWSEM_READER_OWNED bit set. 49 * On unlock, the owner field will largely be left untouched. So 50 * for a free or reader-owned rwsem, the owner value may contain 51 * information about the last reader that acquires the rwsem. 52 * 53 * That information may be helpful in debugging cases where the system 54 * seems to hang on a reader owned rwsem especially if only one reader 55 * is involved. Ideally we would like to track all the readers that own 56 * a rwsem, but the overhead is simply too big. 57 * 58 * A fast path reader optimistic lock stealing is supported when the rwsem 59 * is previously owned by a writer and the following conditions are met: 60 * - rwsem is not currently writer owned 61 * - the handoff isn't set. 62 */ 63 #define RWSEM_READER_OWNED (1UL << 0) 64 #define RWSEM_NONSPINNABLE (1UL << 1) 65 #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE) 66 67 #ifdef CONFIG_DEBUG_RWSEMS 68 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \ 69 if (!debug_locks_silent && \ 70 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\ 71 #c, atomic_long_read(&(sem)->count), \ 72 (unsigned long) sem->magic, \ 73 atomic_long_read(&(sem)->owner), (long)current, \ 74 list_empty(&(sem)->wait_list) ? "" : "not ")) \ 75 debug_locks_off(); \ 76 } while (0) 77 #else 78 # define DEBUG_RWSEMS_WARN_ON(c, sem) 79 #endif 80 81 /* 82 * On 64-bit architectures, the bit definitions of the count are: 83 * 84 * Bit 0 - writer locked bit 85 * Bit 1 - waiters present bit 86 * Bit 2 - lock handoff bit 87 * Bits 3-7 - reserved 88 * Bits 8-62 - 55-bit reader count 89 * Bit 63 - read fail bit 90 * 91 * On 32-bit architectures, the bit definitions of the count are: 92 * 93 * Bit 0 - writer locked bit 94 * Bit 1 - waiters present bit 95 * Bit 2 - lock handoff bit 96 * Bits 3-7 - reserved 97 * Bits 8-30 - 23-bit reader count 98 * Bit 31 - read fail bit 99 * 100 * It is not likely that the most significant bit (read fail bit) will ever 101 * be set. This guard bit is still checked anyway in the down_read() fastpath 102 * just in case we need to use up more of the reader bits for other purpose 103 * in the future. 104 * 105 * atomic_long_fetch_add() is used to obtain reader lock, whereas 106 * atomic_long_cmpxchg() will be used to obtain writer lock. 107 * 108 * There are three places where the lock handoff bit may be set or cleared. 109 * 1) rwsem_mark_wake() for readers -- set, clear 110 * 2) rwsem_try_write_lock() for writers -- set, clear 111 * 3) rwsem_del_waiter() -- clear 112 * 113 * For all the above cases, wait_lock will be held. A writer must also 114 * be the first one in the wait_list to be eligible for setting the handoff 115 * bit. So concurrent setting/clearing of handoff bit is not possible. 116 */ 117 #define RWSEM_WRITER_LOCKED (1UL << 0) 118 #define RWSEM_FLAG_WAITERS (1UL << 1) 119 #define RWSEM_FLAG_HANDOFF (1UL << 2) 120 #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1)) 121 122 #define RWSEM_READER_SHIFT 8 123 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT) 124 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1)) 125 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED 126 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK) 127 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\ 128 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL) 129 130 /* 131 * All writes to owner are protected by WRITE_ONCE() to make sure that 132 * store tearing can't happen as optimistic spinners may read and use 133 * the owner value concurrently without lock. Read from owner, however, 134 * may not need READ_ONCE() as long as the pointer value is only used 135 * for comparison and isn't being dereferenced. 136 * 137 * Both rwsem_{set,clear}_owner() functions should be in the same 138 * preempt disable section as the atomic op that changes sem->count. 139 */ 140 static inline void rwsem_set_owner(struct rw_semaphore *sem) 141 { 142 lockdep_assert_preemption_disabled(); 143 atomic_long_set(&sem->owner, (long)current); 144 } 145 146 static inline void rwsem_clear_owner(struct rw_semaphore *sem) 147 { 148 lockdep_assert_preemption_disabled(); 149 atomic_long_set(&sem->owner, 0); 150 } 151 152 /* 153 * Test the flags in the owner field. 154 */ 155 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags) 156 { 157 return atomic_long_read(&sem->owner) & flags; 158 } 159 160 /* 161 * The task_struct pointer of the last owning reader will be left in 162 * the owner field. 163 * 164 * Note that the owner value just indicates the task has owned the rwsem 165 * previously, it may not be the real owner or one of the real owners 166 * anymore when that field is examined, so take it with a grain of salt. 167 * 168 * The reader non-spinnable bit is preserved. 169 */ 170 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, 171 struct task_struct *owner) 172 { 173 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED | 174 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE); 175 176 atomic_long_set(&sem->owner, val); 177 } 178 179 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) 180 { 181 __rwsem_set_reader_owned(sem, current); 182 } 183 184 /* 185 * Return true if the rwsem is owned by a reader. 186 */ 187 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem) 188 { 189 #ifdef CONFIG_DEBUG_RWSEMS 190 /* 191 * Check the count to see if it is write-locked. 192 */ 193 long count = atomic_long_read(&sem->count); 194 195 if (count & RWSEM_WRITER_MASK) 196 return false; 197 #endif 198 return rwsem_test_oflags(sem, RWSEM_READER_OWNED); 199 } 200 201 #ifdef CONFIG_DEBUG_RWSEMS 202 /* 203 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there 204 * is a task pointer in owner of a reader-owned rwsem, it will be the 205 * real owner or one of the real owners. The only exception is when the 206 * unlock is done by up_read_non_owner(). 207 */ 208 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) 209 { 210 unsigned long val = atomic_long_read(&sem->owner); 211 212 while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) { 213 if (atomic_long_try_cmpxchg(&sem->owner, &val, 214 val & RWSEM_OWNER_FLAGS_MASK)) 215 return; 216 } 217 } 218 #else 219 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) 220 { 221 } 222 #endif 223 224 /* 225 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag 226 * remains set. Otherwise, the operation will be aborted. 227 */ 228 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem) 229 { 230 unsigned long owner = atomic_long_read(&sem->owner); 231 232 do { 233 if (!(owner & RWSEM_READER_OWNED)) 234 break; 235 if (owner & RWSEM_NONSPINNABLE) 236 break; 237 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner, 238 owner | RWSEM_NONSPINNABLE)); 239 } 240 241 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp) 242 { 243 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); 244 245 if (WARN_ON_ONCE(*cntp < 0)) 246 rwsem_set_nonspinnable(sem); 247 248 if (!(*cntp & RWSEM_READ_FAILED_MASK)) { 249 rwsem_set_reader_owned(sem); 250 return true; 251 } 252 253 return false; 254 } 255 256 static inline bool rwsem_write_trylock(struct rw_semaphore *sem) 257 { 258 long tmp = RWSEM_UNLOCKED_VALUE; 259 bool ret = false; 260 261 preempt_disable(); 262 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { 263 rwsem_set_owner(sem); 264 ret = true; 265 } 266 267 preempt_enable(); 268 return ret; 269 } 270 271 /* 272 * Return just the real task structure pointer of the owner 273 */ 274 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem) 275 { 276 return (struct task_struct *) 277 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK); 278 } 279 280 /* 281 * Return the real task structure pointer of the owner and the embedded 282 * flags in the owner. pflags must be non-NULL. 283 */ 284 static inline struct task_struct * 285 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags) 286 { 287 unsigned long owner = atomic_long_read(&sem->owner); 288 289 *pflags = owner & RWSEM_OWNER_FLAGS_MASK; 290 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK); 291 } 292 293 /* 294 * Guide to the rw_semaphore's count field. 295 * 296 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned 297 * by a writer. 298 * 299 * The lock is owned by readers when 300 * (1) the RWSEM_WRITER_LOCKED isn't set in count, 301 * (2) some of the reader bits are set in count, and 302 * (3) the owner field has RWSEM_READ_OWNED bit set. 303 * 304 * Having some reader bits set is not enough to guarantee a readers owned 305 * lock as the readers may be in the process of backing out from the count 306 * and a writer has just released the lock. So another writer may steal 307 * the lock immediately after that. 308 */ 309 310 /* 311 * Initialize an rwsem: 312 */ 313 void __init_rwsem(struct rw_semaphore *sem, const char *name, 314 struct lock_class_key *key) 315 { 316 #ifdef CONFIG_DEBUG_LOCK_ALLOC 317 /* 318 * Make sure we are not reinitializing a held semaphore: 319 */ 320 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); 321 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); 322 #endif 323 #ifdef CONFIG_DEBUG_RWSEMS 324 sem->magic = sem; 325 #endif 326 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); 327 raw_spin_lock_init(&sem->wait_lock); 328 INIT_LIST_HEAD(&sem->wait_list); 329 atomic_long_set(&sem->owner, 0L); 330 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 331 osq_lock_init(&sem->osq); 332 #endif 333 } 334 EXPORT_SYMBOL(__init_rwsem); 335 336 enum rwsem_waiter_type { 337 RWSEM_WAITING_FOR_WRITE, 338 RWSEM_WAITING_FOR_READ 339 }; 340 341 struct rwsem_waiter { 342 struct list_head list; 343 struct task_struct *task; 344 enum rwsem_waiter_type type; 345 unsigned long timeout; 346 bool handoff_set; 347 }; 348 #define rwsem_first_waiter(sem) \ 349 list_first_entry(&sem->wait_list, struct rwsem_waiter, list) 350 351 enum rwsem_wake_type { 352 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */ 353 RWSEM_WAKE_READERS, /* Wake readers only */ 354 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ 355 }; 356 357 /* 358 * The typical HZ value is either 250 or 1000. So set the minimum waiting 359 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait 360 * queue before initiating the handoff protocol. 361 */ 362 #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250) 363 364 /* 365 * Magic number to batch-wakeup waiting readers, even when writers are 366 * also present in the queue. This both limits the amount of work the 367 * waking thread must do and also prevents any potential counter overflow, 368 * however unlikely. 369 */ 370 #define MAX_READERS_WAKEUP 0x100 371 372 static inline void 373 rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) 374 { 375 lockdep_assert_held(&sem->wait_lock); 376 list_add_tail(&waiter->list, &sem->wait_list); 377 /* caller will set RWSEM_FLAG_WAITERS */ 378 } 379 380 /* 381 * Remove a waiter from the wait_list and clear flags. 382 * 383 * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of 384 * this function. Modify with care. 385 * 386 * Return: true if wait_list isn't empty and false otherwise 387 */ 388 static inline bool 389 rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) 390 { 391 lockdep_assert_held(&sem->wait_lock); 392 list_del(&waiter->list); 393 if (likely(!list_empty(&sem->wait_list))) 394 return true; 395 396 atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); 397 return false; 398 } 399 400 /* 401 * handle the lock release when processes blocked on it that can now run 402 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must 403 * have been set. 404 * - there must be someone on the queue 405 * - the wait_lock must be held by the caller 406 * - tasks are marked for wakeup, the caller must later invoke wake_up_q() 407 * to actually wakeup the blocked task(s) and drop the reference count, 408 * preferably when the wait_lock is released 409 * - woken process blocks are discarded from the list after having task zeroed 410 * - writers are only marked woken if downgrading is false 411 * 412 * Implies rwsem_del_waiter() for all woken readers. 413 */ 414 static void rwsem_mark_wake(struct rw_semaphore *sem, 415 enum rwsem_wake_type wake_type, 416 struct wake_q_head *wake_q) 417 { 418 struct rwsem_waiter *waiter, *tmp; 419 long oldcount, woken = 0, adjustment = 0; 420 struct list_head wlist; 421 422 lockdep_assert_held(&sem->wait_lock); 423 424 /* 425 * Take a peek at the queue head waiter such that we can determine 426 * the wakeup(s) to perform. 427 */ 428 waiter = rwsem_first_waiter(sem); 429 430 if (waiter->type == RWSEM_WAITING_FOR_WRITE) { 431 if (wake_type == RWSEM_WAKE_ANY) { 432 /* 433 * Mark writer at the front of the queue for wakeup. 434 * Until the task is actually later awoken later by 435 * the caller, other writers are able to steal it. 436 * Readers, on the other hand, will block as they 437 * will notice the queued writer. 438 */ 439 wake_q_add(wake_q, waiter->task); 440 lockevent_inc(rwsem_wake_writer); 441 } 442 443 return; 444 } 445 446 /* 447 * No reader wakeup if there are too many of them already. 448 */ 449 if (unlikely(atomic_long_read(&sem->count) < 0)) 450 return; 451 452 /* 453 * Writers might steal the lock before we grant it to the next reader. 454 * We prefer to do the first reader grant before counting readers 455 * so we can bail out early if a writer stole the lock. 456 */ 457 if (wake_type != RWSEM_WAKE_READ_OWNED) { 458 struct task_struct *owner; 459 460 adjustment = RWSEM_READER_BIAS; 461 oldcount = atomic_long_fetch_add(adjustment, &sem->count); 462 if (unlikely(oldcount & RWSEM_WRITER_MASK)) { 463 /* 464 * When we've been waiting "too" long (for writers 465 * to give up the lock), request a HANDOFF to 466 * force the issue. 467 */ 468 if (time_after(jiffies, waiter->timeout)) { 469 if (!(oldcount & RWSEM_FLAG_HANDOFF)) { 470 adjustment -= RWSEM_FLAG_HANDOFF; 471 lockevent_inc(rwsem_rlock_handoff); 472 } 473 waiter->handoff_set = true; 474 } 475 476 atomic_long_add(-adjustment, &sem->count); 477 return; 478 } 479 /* 480 * Set it to reader-owned to give spinners an early 481 * indication that readers now have the lock. 482 * The reader nonspinnable bit seen at slowpath entry of 483 * the reader is copied over. 484 */ 485 owner = waiter->task; 486 __rwsem_set_reader_owned(sem, owner); 487 } 488 489 /* 490 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the 491 * queue. We know that the woken will be at least 1 as we accounted 492 * for above. Note we increment the 'active part' of the count by the 493 * number of readers before waking any processes up. 494 * 495 * This is an adaptation of the phase-fair R/W locks where at the 496 * reader phase (first waiter is a reader), all readers are eligible 497 * to acquire the lock at the same time irrespective of their order 498 * in the queue. The writers acquire the lock according to their 499 * order in the queue. 500 * 501 * We have to do wakeup in 2 passes to prevent the possibility that 502 * the reader count may be decremented before it is incremented. It 503 * is because the to-be-woken waiter may not have slept yet. So it 504 * may see waiter->task got cleared, finish its critical section and 505 * do an unlock before the reader count increment. 506 * 507 * 1) Collect the read-waiters in a separate list, count them and 508 * fully increment the reader count in rwsem. 509 * 2) For each waiters in the new list, clear waiter->task and 510 * put them into wake_q to be woken up later. 511 */ 512 INIT_LIST_HEAD(&wlist); 513 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { 514 if (waiter->type == RWSEM_WAITING_FOR_WRITE) 515 continue; 516 517 woken++; 518 list_move_tail(&waiter->list, &wlist); 519 520 /* 521 * Limit # of readers that can be woken up per wakeup call. 522 */ 523 if (unlikely(woken >= MAX_READERS_WAKEUP)) 524 break; 525 } 526 527 adjustment = woken * RWSEM_READER_BIAS - adjustment; 528 lockevent_cond_inc(rwsem_wake_reader, woken); 529 530 oldcount = atomic_long_read(&sem->count); 531 if (list_empty(&sem->wait_list)) { 532 /* 533 * Combined with list_move_tail() above, this implies 534 * rwsem_del_waiter(). 535 */ 536 adjustment -= RWSEM_FLAG_WAITERS; 537 if (oldcount & RWSEM_FLAG_HANDOFF) 538 adjustment -= RWSEM_FLAG_HANDOFF; 539 } else if (woken) { 540 /* 541 * When we've woken a reader, we no longer need to force 542 * writers to give up the lock and we can clear HANDOFF. 543 */ 544 if (oldcount & RWSEM_FLAG_HANDOFF) 545 adjustment -= RWSEM_FLAG_HANDOFF; 546 } 547 548 if (adjustment) 549 atomic_long_add(adjustment, &sem->count); 550 551 /* 2nd pass */ 552 list_for_each_entry_safe(waiter, tmp, &wlist, list) { 553 struct task_struct *tsk; 554 555 tsk = waiter->task; 556 get_task_struct(tsk); 557 558 /* 559 * Ensure calling get_task_struct() before setting the reader 560 * waiter to nil such that rwsem_down_read_slowpath() cannot 561 * race with do_exit() by always holding a reference count 562 * to the task to wakeup. 563 */ 564 smp_store_release(&waiter->task, NULL); 565 /* 566 * Ensure issuing the wakeup (either by us or someone else) 567 * after setting the reader waiter to nil. 568 */ 569 wake_q_add_safe(wake_q, tsk); 570 } 571 } 572 573 /* 574 * Remove a waiter and try to wake up other waiters in the wait queue 575 * This function is called from the out_nolock path of both the reader and 576 * writer slowpaths with wait_lock held. It releases the wait_lock and 577 * optionally wake up waiters before it returns. 578 */ 579 static inline void 580 rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter, 581 struct wake_q_head *wake_q) 582 __releases(&sem->wait_lock) 583 { 584 bool first = rwsem_first_waiter(sem) == waiter; 585 586 wake_q_init(wake_q); 587 588 /* 589 * If the wait_list isn't empty and the waiter to be deleted is 590 * the first waiter, we wake up the remaining waiters as they may 591 * be eligible to acquire or spin on the lock. 592 */ 593 if (rwsem_del_waiter(sem, waiter) && first) 594 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, wake_q); 595 raw_spin_unlock_irq(&sem->wait_lock); 596 if (!wake_q_empty(wake_q)) 597 wake_up_q(wake_q); 598 } 599 600 /* 601 * This function must be called with the sem->wait_lock held to prevent 602 * race conditions between checking the rwsem wait list and setting the 603 * sem->count accordingly. 604 * 605 * Implies rwsem_del_waiter() on success. 606 */ 607 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, 608 struct rwsem_waiter *waiter) 609 { 610 struct rwsem_waiter *first = rwsem_first_waiter(sem); 611 long count, new; 612 613 lockdep_assert_held(&sem->wait_lock); 614 615 count = atomic_long_read(&sem->count); 616 do { 617 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF); 618 619 if (has_handoff) { 620 /* 621 * Honor handoff bit and yield only when the first 622 * waiter is the one that set it. Otherwisee, we 623 * still try to acquire the rwsem. 624 */ 625 if (first->handoff_set && (waiter != first)) 626 return false; 627 628 /* 629 * First waiter can inherit a previously set handoff 630 * bit and spin on rwsem if lock acquisition fails. 631 */ 632 if (waiter == first) 633 waiter->handoff_set = true; 634 } 635 636 new = count; 637 638 if (count & RWSEM_LOCK_MASK) { 639 if (has_handoff || (!rt_task(waiter->task) && 640 !time_after(jiffies, waiter->timeout))) 641 return false; 642 643 new |= RWSEM_FLAG_HANDOFF; 644 } else { 645 new |= RWSEM_WRITER_LOCKED; 646 new &= ~RWSEM_FLAG_HANDOFF; 647 648 if (list_is_singular(&sem->wait_list)) 649 new &= ~RWSEM_FLAG_WAITERS; 650 } 651 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); 652 653 /* 654 * We have either acquired the lock with handoff bit cleared or 655 * set the handoff bit. 656 */ 657 if (new & RWSEM_FLAG_HANDOFF) { 658 waiter->handoff_set = true; 659 lockevent_inc(rwsem_wlock_handoff); 660 return false; 661 } 662 663 /* 664 * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on 665 * success. 666 */ 667 list_del(&waiter->list); 668 rwsem_set_owner(sem); 669 return true; 670 } 671 672 /* 673 * The rwsem_spin_on_owner() function returns the following 4 values 674 * depending on the lock owner state. 675 * OWNER_NULL : owner is currently NULL 676 * OWNER_WRITER: when owner changes and is a writer 677 * OWNER_READER: when owner changes and the new owner may be a reader. 678 * OWNER_NONSPINNABLE: 679 * when optimistic spinning has to stop because either the 680 * owner stops running, is unknown, or its timeslice has 681 * been used up. 682 */ 683 enum owner_state { 684 OWNER_NULL = 1 << 0, 685 OWNER_WRITER = 1 << 1, 686 OWNER_READER = 1 << 2, 687 OWNER_NONSPINNABLE = 1 << 3, 688 }; 689 690 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 691 /* 692 * Try to acquire write lock before the writer has been put on wait queue. 693 */ 694 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) 695 { 696 long count = atomic_long_read(&sem->count); 697 698 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) { 699 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, 700 count | RWSEM_WRITER_LOCKED)) { 701 rwsem_set_owner(sem); 702 lockevent_inc(rwsem_opt_lock); 703 return true; 704 } 705 } 706 return false; 707 } 708 709 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) 710 { 711 struct task_struct *owner; 712 unsigned long flags; 713 bool ret = true; 714 715 if (need_resched()) { 716 lockevent_inc(rwsem_opt_fail); 717 return false; 718 } 719 720 preempt_disable(); 721 /* 722 * Disable preemption is equal to the RCU read-side crital section, 723 * thus the task_strcut structure won't go away. 724 */ 725 owner = rwsem_owner_flags(sem, &flags); 726 /* 727 * Don't check the read-owner as the entry may be stale. 728 */ 729 if ((flags & RWSEM_NONSPINNABLE) || 730 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner))) 731 ret = false; 732 preempt_enable(); 733 734 lockevent_cond_inc(rwsem_opt_fail, !ret); 735 return ret; 736 } 737 738 #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER) 739 740 static inline enum owner_state 741 rwsem_owner_state(struct task_struct *owner, unsigned long flags) 742 { 743 if (flags & RWSEM_NONSPINNABLE) 744 return OWNER_NONSPINNABLE; 745 746 if (flags & RWSEM_READER_OWNED) 747 return OWNER_READER; 748 749 return owner ? OWNER_WRITER : OWNER_NULL; 750 } 751 752 static noinline enum owner_state 753 rwsem_spin_on_owner(struct rw_semaphore *sem) 754 { 755 struct task_struct *new, *owner; 756 unsigned long flags, new_flags; 757 enum owner_state state; 758 759 lockdep_assert_preemption_disabled(); 760 761 owner = rwsem_owner_flags(sem, &flags); 762 state = rwsem_owner_state(owner, flags); 763 if (state != OWNER_WRITER) 764 return state; 765 766 for (;;) { 767 /* 768 * When a waiting writer set the handoff flag, it may spin 769 * on the owner as well. Once that writer acquires the lock, 770 * we can spin on it. So we don't need to quit even when the 771 * handoff bit is set. 772 */ 773 new = rwsem_owner_flags(sem, &new_flags); 774 if ((new != owner) || (new_flags != flags)) { 775 state = rwsem_owner_state(new, new_flags); 776 break; 777 } 778 779 /* 780 * Ensure we emit the owner->on_cpu, dereference _after_ 781 * checking sem->owner still matches owner, if that fails, 782 * owner might point to free()d memory, if it still matches, 783 * our spinning context already disabled preemption which is 784 * equal to RCU read-side crital section ensures the memory 785 * stays valid. 786 */ 787 barrier(); 788 789 if (need_resched() || !owner_on_cpu(owner)) { 790 state = OWNER_NONSPINNABLE; 791 break; 792 } 793 794 cpu_relax(); 795 } 796 797 return state; 798 } 799 800 /* 801 * Calculate reader-owned rwsem spinning threshold for writer 802 * 803 * The more readers own the rwsem, the longer it will take for them to 804 * wind down and free the rwsem. So the empirical formula used to 805 * determine the actual spinning time limit here is: 806 * 807 * Spinning threshold = (10 + nr_readers/2)us 808 * 809 * The limit is capped to a maximum of 25us (30 readers). This is just 810 * a heuristic and is subjected to change in the future. 811 */ 812 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem) 813 { 814 long count = atomic_long_read(&sem->count); 815 int readers = count >> RWSEM_READER_SHIFT; 816 u64 delta; 817 818 if (readers > 30) 819 readers = 30; 820 delta = (20 + readers) * NSEC_PER_USEC / 2; 821 822 return sched_clock() + delta; 823 } 824 825 static bool rwsem_optimistic_spin(struct rw_semaphore *sem) 826 { 827 bool taken = false; 828 int prev_owner_state = OWNER_NULL; 829 int loop = 0; 830 u64 rspin_threshold = 0; 831 832 preempt_disable(); 833 834 /* sem->wait_lock should not be held when doing optimistic spinning */ 835 if (!osq_lock(&sem->osq)) 836 goto done; 837 838 /* 839 * Optimistically spin on the owner field and attempt to acquire the 840 * lock whenever the owner changes. Spinning will be stopped when: 841 * 1) the owning writer isn't running; or 842 * 2) readers own the lock and spinning time has exceeded limit. 843 */ 844 for (;;) { 845 enum owner_state owner_state; 846 847 owner_state = rwsem_spin_on_owner(sem); 848 if (!(owner_state & OWNER_SPINNABLE)) 849 break; 850 851 /* 852 * Try to acquire the lock 853 */ 854 taken = rwsem_try_write_lock_unqueued(sem); 855 856 if (taken) 857 break; 858 859 /* 860 * Time-based reader-owned rwsem optimistic spinning 861 */ 862 if (owner_state == OWNER_READER) { 863 /* 864 * Re-initialize rspin_threshold every time when 865 * the owner state changes from non-reader to reader. 866 * This allows a writer to steal the lock in between 867 * 2 reader phases and have the threshold reset at 868 * the beginning of the 2nd reader phase. 869 */ 870 if (prev_owner_state != OWNER_READER) { 871 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE)) 872 break; 873 rspin_threshold = rwsem_rspin_threshold(sem); 874 loop = 0; 875 } 876 877 /* 878 * Check time threshold once every 16 iterations to 879 * avoid calling sched_clock() too frequently so 880 * as to reduce the average latency between the times 881 * when the lock becomes free and when the spinner 882 * is ready to do a trylock. 883 */ 884 else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) { 885 rwsem_set_nonspinnable(sem); 886 lockevent_inc(rwsem_opt_nospin); 887 break; 888 } 889 } 890 891 /* 892 * An RT task cannot do optimistic spinning if it cannot 893 * be sure the lock holder is running or live-lock may 894 * happen if the current task and the lock holder happen 895 * to run in the same CPU. However, aborting optimistic 896 * spinning while a NULL owner is detected may miss some 897 * opportunity where spinning can continue without causing 898 * problem. 899 * 900 * There are 2 possible cases where an RT task may be able 901 * to continue spinning. 902 * 903 * 1) The lock owner is in the process of releasing the 904 * lock, sem->owner is cleared but the lock has not 905 * been released yet. 906 * 2) The lock was free and owner cleared, but another 907 * task just comes in and acquire the lock before 908 * we try to get it. The new owner may be a spinnable 909 * writer. 910 * 911 * To take advantage of two scenarios listed above, the RT 912 * task is made to retry one more time to see if it can 913 * acquire the lock or continue spinning on the new owning 914 * writer. Of course, if the time lag is long enough or the 915 * new owner is not a writer or spinnable, the RT task will 916 * quit spinning. 917 * 918 * If the owner is a writer, the need_resched() check is 919 * done inside rwsem_spin_on_owner(). If the owner is not 920 * a writer, need_resched() check needs to be done here. 921 */ 922 if (owner_state != OWNER_WRITER) { 923 if (need_resched()) 924 break; 925 if (rt_task(current) && 926 (prev_owner_state != OWNER_WRITER)) 927 break; 928 } 929 prev_owner_state = owner_state; 930 931 /* 932 * The cpu_relax() call is a compiler barrier which forces 933 * everything in this loop to be re-loaded. We don't need 934 * memory barriers as we'll eventually observe the right 935 * values at the cost of a few extra spins. 936 */ 937 cpu_relax(); 938 } 939 osq_unlock(&sem->osq); 940 done: 941 preempt_enable(); 942 lockevent_cond_inc(rwsem_opt_fail, !taken); 943 return taken; 944 } 945 946 /* 947 * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should 948 * only be called when the reader count reaches 0. 949 */ 950 static inline void clear_nonspinnable(struct rw_semaphore *sem) 951 { 952 if (unlikely(rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))) 953 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner); 954 } 955 956 #else 957 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) 958 { 959 return false; 960 } 961 962 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem) 963 { 964 return false; 965 } 966 967 static inline void clear_nonspinnable(struct rw_semaphore *sem) { } 968 969 static inline enum owner_state 970 rwsem_spin_on_owner(struct rw_semaphore *sem) 971 { 972 return OWNER_NONSPINNABLE; 973 } 974 #endif 975 976 /* 977 * Prepare to wake up waiter(s) in the wait queue by putting them into the 978 * given wake_q if the rwsem lock owner isn't a writer. If rwsem is likely 979 * reader-owned, wake up read lock waiters in queue front or wake up any 980 * front waiter otherwise. 981 982 * This is being called from both reader and writer slow paths. 983 */ 984 static inline void rwsem_cond_wake_waiter(struct rw_semaphore *sem, long count, 985 struct wake_q_head *wake_q) 986 { 987 enum rwsem_wake_type wake_type; 988 989 if (count & RWSEM_WRITER_MASK) 990 return; 991 992 if (count & RWSEM_READER_MASK) { 993 wake_type = RWSEM_WAKE_READERS; 994 } else { 995 wake_type = RWSEM_WAKE_ANY; 996 clear_nonspinnable(sem); 997 } 998 rwsem_mark_wake(sem, wake_type, wake_q); 999 } 1000 1001 /* 1002 * Wait for the read lock to be granted 1003 */ 1004 static struct rw_semaphore __sched * 1005 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state) 1006 { 1007 long adjustment = -RWSEM_READER_BIAS; 1008 long rcnt = (count >> RWSEM_READER_SHIFT); 1009 struct rwsem_waiter waiter; 1010 DEFINE_WAKE_Q(wake_q); 1011 1012 /* 1013 * To prevent a constant stream of readers from starving a sleeping 1014 * waiter, don't attempt optimistic lock stealing if the lock is 1015 * currently owned by readers. 1016 */ 1017 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) && 1018 (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED)) 1019 goto queue; 1020 1021 /* 1022 * Reader optimistic lock stealing. 1023 */ 1024 if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) { 1025 rwsem_set_reader_owned(sem); 1026 lockevent_inc(rwsem_rlock_steal); 1027 1028 /* 1029 * Wake up other readers in the wait queue if it is 1030 * the first reader. 1031 */ 1032 if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) { 1033 raw_spin_lock_irq(&sem->wait_lock); 1034 if (!list_empty(&sem->wait_list)) 1035 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, 1036 &wake_q); 1037 raw_spin_unlock_irq(&sem->wait_lock); 1038 wake_up_q(&wake_q); 1039 } 1040 return sem; 1041 } 1042 1043 queue: 1044 waiter.task = current; 1045 waiter.type = RWSEM_WAITING_FOR_READ; 1046 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; 1047 waiter.handoff_set = false; 1048 1049 raw_spin_lock_irq(&sem->wait_lock); 1050 if (list_empty(&sem->wait_list)) { 1051 /* 1052 * In case the wait queue is empty and the lock isn't owned 1053 * by a writer, this reader can exit the slowpath and return 1054 * immediately as its RWSEM_READER_BIAS has already been set 1055 * in the count. 1056 */ 1057 if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) { 1058 /* Provide lock ACQUIRE */ 1059 smp_acquire__after_ctrl_dep(); 1060 raw_spin_unlock_irq(&sem->wait_lock); 1061 rwsem_set_reader_owned(sem); 1062 lockevent_inc(rwsem_rlock_fast); 1063 return sem; 1064 } 1065 adjustment += RWSEM_FLAG_WAITERS; 1066 } 1067 rwsem_add_waiter(sem, &waiter); 1068 1069 /* we're now waiting on the lock, but no longer actively locking */ 1070 count = atomic_long_add_return(adjustment, &sem->count); 1071 1072 rwsem_cond_wake_waiter(sem, count, &wake_q); 1073 raw_spin_unlock_irq(&sem->wait_lock); 1074 1075 if (!wake_q_empty(&wake_q)) 1076 wake_up_q(&wake_q); 1077 1078 trace_contention_begin(sem, LCB_F_READ); 1079 1080 /* wait to be given the lock */ 1081 for (;;) { 1082 set_current_state(state); 1083 if (!smp_load_acquire(&waiter.task)) { 1084 /* Matches rwsem_mark_wake()'s smp_store_release(). */ 1085 break; 1086 } 1087 if (signal_pending_state(state, current)) { 1088 raw_spin_lock_irq(&sem->wait_lock); 1089 if (waiter.task) 1090 goto out_nolock; 1091 raw_spin_unlock_irq(&sem->wait_lock); 1092 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */ 1093 break; 1094 } 1095 schedule(); 1096 lockevent_inc(rwsem_sleep_reader); 1097 } 1098 1099 __set_current_state(TASK_RUNNING); 1100 lockevent_inc(rwsem_rlock); 1101 trace_contention_end(sem, 0); 1102 return sem; 1103 1104 out_nolock: 1105 rwsem_del_wake_waiter(sem, &waiter, &wake_q); 1106 __set_current_state(TASK_RUNNING); 1107 lockevent_inc(rwsem_rlock_fail); 1108 trace_contention_end(sem, -EINTR); 1109 return ERR_PTR(-EINTR); 1110 } 1111 1112 /* 1113 * Wait until we successfully acquire the write lock 1114 */ 1115 static struct rw_semaphore __sched * 1116 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) 1117 { 1118 struct rwsem_waiter waiter; 1119 DEFINE_WAKE_Q(wake_q); 1120 1121 /* do optimistic spinning and steal lock if possible */ 1122 if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) { 1123 /* rwsem_optimistic_spin() implies ACQUIRE on success */ 1124 return sem; 1125 } 1126 1127 /* 1128 * Optimistic spinning failed, proceed to the slowpath 1129 * and block until we can acquire the sem. 1130 */ 1131 waiter.task = current; 1132 waiter.type = RWSEM_WAITING_FOR_WRITE; 1133 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; 1134 waiter.handoff_set = false; 1135 1136 raw_spin_lock_irq(&sem->wait_lock); 1137 rwsem_add_waiter(sem, &waiter); 1138 1139 /* we're now waiting on the lock */ 1140 if (rwsem_first_waiter(sem) != &waiter) { 1141 rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count), 1142 &wake_q); 1143 if (!wake_q_empty(&wake_q)) { 1144 /* 1145 * We want to minimize wait_lock hold time especially 1146 * when a large number of readers are to be woken up. 1147 */ 1148 raw_spin_unlock_irq(&sem->wait_lock); 1149 wake_up_q(&wake_q); 1150 raw_spin_lock_irq(&sem->wait_lock); 1151 } 1152 } else { 1153 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count); 1154 } 1155 1156 /* wait until we successfully acquire the lock */ 1157 set_current_state(state); 1158 trace_contention_begin(sem, LCB_F_WRITE); 1159 1160 for (;;) { 1161 if (rwsem_try_write_lock(sem, &waiter)) { 1162 /* rwsem_try_write_lock() implies ACQUIRE on success */ 1163 break; 1164 } 1165 1166 raw_spin_unlock_irq(&sem->wait_lock); 1167 1168 if (signal_pending_state(state, current)) 1169 goto out_nolock; 1170 1171 /* 1172 * After setting the handoff bit and failing to acquire 1173 * the lock, attempt to spin on owner to accelerate lock 1174 * transfer. If the previous owner is a on-cpu writer and it 1175 * has just released the lock, OWNER_NULL will be returned. 1176 * In this case, we attempt to acquire the lock again 1177 * without sleeping. 1178 */ 1179 if (waiter.handoff_set) { 1180 enum owner_state owner_state; 1181 1182 preempt_disable(); 1183 owner_state = rwsem_spin_on_owner(sem); 1184 preempt_enable(); 1185 1186 if (owner_state == OWNER_NULL) 1187 goto trylock_again; 1188 } 1189 1190 schedule(); 1191 lockevent_inc(rwsem_sleep_writer); 1192 set_current_state(state); 1193 trylock_again: 1194 raw_spin_lock_irq(&sem->wait_lock); 1195 } 1196 __set_current_state(TASK_RUNNING); 1197 raw_spin_unlock_irq(&sem->wait_lock); 1198 lockevent_inc(rwsem_wlock); 1199 trace_contention_end(sem, 0); 1200 return sem; 1201 1202 out_nolock: 1203 __set_current_state(TASK_RUNNING); 1204 raw_spin_lock_irq(&sem->wait_lock); 1205 rwsem_del_wake_waiter(sem, &waiter, &wake_q); 1206 lockevent_inc(rwsem_wlock_fail); 1207 trace_contention_end(sem, -EINTR); 1208 return ERR_PTR(-EINTR); 1209 } 1210 1211 /* 1212 * handle waking up a waiter on the semaphore 1213 * - up_read/up_write has decremented the active part of count if we come here 1214 */ 1215 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) 1216 { 1217 unsigned long flags; 1218 DEFINE_WAKE_Q(wake_q); 1219 1220 raw_spin_lock_irqsave(&sem->wait_lock, flags); 1221 1222 if (!list_empty(&sem->wait_list)) 1223 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); 1224 1225 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 1226 wake_up_q(&wake_q); 1227 1228 return sem; 1229 } 1230 1231 /* 1232 * downgrade a write lock into a read lock 1233 * - caller incremented waiting part of count and discovered it still negative 1234 * - just wake up any readers at the front of the queue 1235 */ 1236 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) 1237 { 1238 unsigned long flags; 1239 DEFINE_WAKE_Q(wake_q); 1240 1241 raw_spin_lock_irqsave(&sem->wait_lock, flags); 1242 1243 if (!list_empty(&sem->wait_list)) 1244 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q); 1245 1246 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 1247 wake_up_q(&wake_q); 1248 1249 return sem; 1250 } 1251 1252 /* 1253 * lock for reading 1254 */ 1255 static inline int __down_read_common(struct rw_semaphore *sem, int state) 1256 { 1257 long count; 1258 1259 if (!rwsem_read_trylock(sem, &count)) { 1260 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) 1261 return -EINTR; 1262 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); 1263 } 1264 return 0; 1265 } 1266 1267 static inline void __down_read(struct rw_semaphore *sem) 1268 { 1269 __down_read_common(sem, TASK_UNINTERRUPTIBLE); 1270 } 1271 1272 static inline int __down_read_interruptible(struct rw_semaphore *sem) 1273 { 1274 return __down_read_common(sem, TASK_INTERRUPTIBLE); 1275 } 1276 1277 static inline int __down_read_killable(struct rw_semaphore *sem) 1278 { 1279 return __down_read_common(sem, TASK_KILLABLE); 1280 } 1281 1282 static inline int __down_read_trylock(struct rw_semaphore *sem) 1283 { 1284 long tmp; 1285 1286 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); 1287 1288 tmp = atomic_long_read(&sem->count); 1289 while (!(tmp & RWSEM_READ_FAILED_MASK)) { 1290 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, 1291 tmp + RWSEM_READER_BIAS)) { 1292 rwsem_set_reader_owned(sem); 1293 return 1; 1294 } 1295 } 1296 return 0; 1297 } 1298 1299 /* 1300 * lock for writing 1301 */ 1302 static inline int __down_write_common(struct rw_semaphore *sem, int state) 1303 { 1304 if (unlikely(!rwsem_write_trylock(sem))) { 1305 if (IS_ERR(rwsem_down_write_slowpath(sem, state))) 1306 return -EINTR; 1307 } 1308 1309 return 0; 1310 } 1311 1312 static inline void __down_write(struct rw_semaphore *sem) 1313 { 1314 __down_write_common(sem, TASK_UNINTERRUPTIBLE); 1315 } 1316 1317 static inline int __down_write_killable(struct rw_semaphore *sem) 1318 { 1319 return __down_write_common(sem, TASK_KILLABLE); 1320 } 1321 1322 static inline int __down_write_trylock(struct rw_semaphore *sem) 1323 { 1324 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); 1325 return rwsem_write_trylock(sem); 1326 } 1327 1328 /* 1329 * unlock after reading 1330 */ 1331 static inline void __up_read(struct rw_semaphore *sem) 1332 { 1333 long tmp; 1334 1335 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); 1336 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); 1337 1338 rwsem_clear_reader_owned(sem); 1339 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); 1340 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem); 1341 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) == 1342 RWSEM_FLAG_WAITERS)) { 1343 clear_nonspinnable(sem); 1344 rwsem_wake(sem); 1345 } 1346 } 1347 1348 /* 1349 * unlock after writing 1350 */ 1351 static inline void __up_write(struct rw_semaphore *sem) 1352 { 1353 long tmp; 1354 1355 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); 1356 /* 1357 * sem->owner may differ from current if the ownership is transferred 1358 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits. 1359 */ 1360 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) && 1361 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem); 1362 1363 preempt_disable(); 1364 rwsem_clear_owner(sem); 1365 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); 1366 preempt_enable(); 1367 if (unlikely(tmp & RWSEM_FLAG_WAITERS)) 1368 rwsem_wake(sem); 1369 } 1370 1371 /* 1372 * downgrade write lock to read lock 1373 */ 1374 static inline void __downgrade_write(struct rw_semaphore *sem) 1375 { 1376 long tmp; 1377 1378 /* 1379 * When downgrading from exclusive to shared ownership, 1380 * anything inside the write-locked region cannot leak 1381 * into the read side. In contrast, anything in the 1382 * read-locked region is ok to be re-ordered into the 1383 * write side. As such, rely on RELEASE semantics. 1384 */ 1385 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem); 1386 tmp = atomic_long_fetch_add_release( 1387 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count); 1388 rwsem_set_reader_owned(sem); 1389 if (tmp & RWSEM_FLAG_WAITERS) 1390 rwsem_downgrade_wake(sem); 1391 } 1392 1393 #else /* !CONFIG_PREEMPT_RT */ 1394 1395 #define RT_MUTEX_BUILD_MUTEX 1396 #include "rtmutex.c" 1397 1398 #define rwbase_set_and_save_current_state(state) \ 1399 set_current_state(state) 1400 1401 #define rwbase_restore_current_state() \ 1402 __set_current_state(TASK_RUNNING) 1403 1404 #define rwbase_rtmutex_lock_state(rtm, state) \ 1405 __rt_mutex_lock(rtm, state) 1406 1407 #define rwbase_rtmutex_slowlock_locked(rtm, state) \ 1408 __rt_mutex_slowlock_locked(rtm, NULL, state) 1409 1410 #define rwbase_rtmutex_unlock(rtm) \ 1411 __rt_mutex_unlock(rtm) 1412 1413 #define rwbase_rtmutex_trylock(rtm) \ 1414 __rt_mutex_trylock(rtm) 1415 1416 #define rwbase_signal_pending_state(state, current) \ 1417 signal_pending_state(state, current) 1418 1419 #define rwbase_schedule() \ 1420 schedule() 1421 1422 #include "rwbase_rt.c" 1423 1424 void __init_rwsem(struct rw_semaphore *sem, const char *name, 1425 struct lock_class_key *key) 1426 { 1427 init_rwbase_rt(&(sem)->rwbase); 1428 1429 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1430 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); 1431 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); 1432 #endif 1433 } 1434 EXPORT_SYMBOL(__init_rwsem); 1435 1436 static inline void __down_read(struct rw_semaphore *sem) 1437 { 1438 rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); 1439 } 1440 1441 static inline int __down_read_interruptible(struct rw_semaphore *sem) 1442 { 1443 return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE); 1444 } 1445 1446 static inline int __down_read_killable(struct rw_semaphore *sem) 1447 { 1448 return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE); 1449 } 1450 1451 static inline int __down_read_trylock(struct rw_semaphore *sem) 1452 { 1453 return rwbase_read_trylock(&sem->rwbase); 1454 } 1455 1456 static inline void __up_read(struct rw_semaphore *sem) 1457 { 1458 rwbase_read_unlock(&sem->rwbase, TASK_NORMAL); 1459 } 1460 1461 static inline void __sched __down_write(struct rw_semaphore *sem) 1462 { 1463 rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); 1464 } 1465 1466 static inline int __sched __down_write_killable(struct rw_semaphore *sem) 1467 { 1468 return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE); 1469 } 1470 1471 static inline int __down_write_trylock(struct rw_semaphore *sem) 1472 { 1473 return rwbase_write_trylock(&sem->rwbase); 1474 } 1475 1476 static inline void __up_write(struct rw_semaphore *sem) 1477 { 1478 rwbase_write_unlock(&sem->rwbase); 1479 } 1480 1481 static inline void __downgrade_write(struct rw_semaphore *sem) 1482 { 1483 rwbase_write_downgrade(&sem->rwbase); 1484 } 1485 1486 /* Debug stubs for the common API */ 1487 #define DEBUG_RWSEMS_WARN_ON(c, sem) 1488 1489 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, 1490 struct task_struct *owner) 1491 { 1492 } 1493 1494 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem) 1495 { 1496 int count = atomic_read(&sem->rwbase.readers); 1497 1498 return count < 0 && count != READER_BIAS; 1499 } 1500 1501 #endif /* CONFIG_PREEMPT_RT */ 1502 1503 /* 1504 * lock for reading 1505 */ 1506 void __sched down_read(struct rw_semaphore *sem) 1507 { 1508 might_sleep(); 1509 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); 1510 1511 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); 1512 } 1513 EXPORT_SYMBOL(down_read); 1514 1515 int __sched down_read_interruptible(struct rw_semaphore *sem) 1516 { 1517 might_sleep(); 1518 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); 1519 1520 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) { 1521 rwsem_release(&sem->dep_map, _RET_IP_); 1522 return -EINTR; 1523 } 1524 1525 return 0; 1526 } 1527 EXPORT_SYMBOL(down_read_interruptible); 1528 1529 int __sched down_read_killable(struct rw_semaphore *sem) 1530 { 1531 might_sleep(); 1532 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); 1533 1534 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { 1535 rwsem_release(&sem->dep_map, _RET_IP_); 1536 return -EINTR; 1537 } 1538 1539 return 0; 1540 } 1541 EXPORT_SYMBOL(down_read_killable); 1542 1543 /* 1544 * trylock for reading -- returns 1 if successful, 0 if contention 1545 */ 1546 int down_read_trylock(struct rw_semaphore *sem) 1547 { 1548 int ret = __down_read_trylock(sem); 1549 1550 if (ret == 1) 1551 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); 1552 return ret; 1553 } 1554 EXPORT_SYMBOL(down_read_trylock); 1555 1556 /* 1557 * lock for writing 1558 */ 1559 void __sched down_write(struct rw_semaphore *sem) 1560 { 1561 might_sleep(); 1562 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); 1563 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 1564 } 1565 EXPORT_SYMBOL(down_write); 1566 1567 /* 1568 * lock for writing 1569 */ 1570 int __sched down_write_killable(struct rw_semaphore *sem) 1571 { 1572 might_sleep(); 1573 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); 1574 1575 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, 1576 __down_write_killable)) { 1577 rwsem_release(&sem->dep_map, _RET_IP_); 1578 return -EINTR; 1579 } 1580 1581 return 0; 1582 } 1583 EXPORT_SYMBOL(down_write_killable); 1584 1585 /* 1586 * trylock for writing -- returns 1 if successful, 0 if contention 1587 */ 1588 int down_write_trylock(struct rw_semaphore *sem) 1589 { 1590 int ret = __down_write_trylock(sem); 1591 1592 if (ret == 1) 1593 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); 1594 1595 return ret; 1596 } 1597 EXPORT_SYMBOL(down_write_trylock); 1598 1599 /* 1600 * release a read lock 1601 */ 1602 void up_read(struct rw_semaphore *sem) 1603 { 1604 rwsem_release(&sem->dep_map, _RET_IP_); 1605 __up_read(sem); 1606 } 1607 EXPORT_SYMBOL(up_read); 1608 1609 /* 1610 * release a write lock 1611 */ 1612 void up_write(struct rw_semaphore *sem) 1613 { 1614 rwsem_release(&sem->dep_map, _RET_IP_); 1615 __up_write(sem); 1616 } 1617 EXPORT_SYMBOL(up_write); 1618 1619 /* 1620 * downgrade write lock to read lock 1621 */ 1622 void downgrade_write(struct rw_semaphore *sem) 1623 { 1624 lock_downgrade(&sem->dep_map, _RET_IP_); 1625 __downgrade_write(sem); 1626 } 1627 EXPORT_SYMBOL(downgrade_write); 1628 1629 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1630 1631 void down_read_nested(struct rw_semaphore *sem, int subclass) 1632 { 1633 might_sleep(); 1634 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); 1635 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); 1636 } 1637 EXPORT_SYMBOL(down_read_nested); 1638 1639 int down_read_killable_nested(struct rw_semaphore *sem, int subclass) 1640 { 1641 might_sleep(); 1642 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); 1643 1644 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { 1645 rwsem_release(&sem->dep_map, _RET_IP_); 1646 return -EINTR; 1647 } 1648 1649 return 0; 1650 } 1651 EXPORT_SYMBOL(down_read_killable_nested); 1652 1653 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) 1654 { 1655 might_sleep(); 1656 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); 1657 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 1658 } 1659 EXPORT_SYMBOL(_down_write_nest_lock); 1660 1661 void down_read_non_owner(struct rw_semaphore *sem) 1662 { 1663 might_sleep(); 1664 __down_read(sem); 1665 __rwsem_set_reader_owned(sem, NULL); 1666 } 1667 EXPORT_SYMBOL(down_read_non_owner); 1668 1669 void down_write_nested(struct rw_semaphore *sem, int subclass) 1670 { 1671 might_sleep(); 1672 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); 1673 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 1674 } 1675 EXPORT_SYMBOL(down_write_nested); 1676 1677 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass) 1678 { 1679 might_sleep(); 1680 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); 1681 1682 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, 1683 __down_write_killable)) { 1684 rwsem_release(&sem->dep_map, _RET_IP_); 1685 return -EINTR; 1686 } 1687 1688 return 0; 1689 } 1690 EXPORT_SYMBOL(down_write_killable_nested); 1691 1692 void up_read_non_owner(struct rw_semaphore *sem) 1693 { 1694 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); 1695 __up_read(sem); 1696 } 1697 EXPORT_SYMBOL(up_read_non_owner); 1698 1699 #endif 1700