1 // SPDX-License-Identifier: GPL-2.0 2 /* kernel/rwsem.c: R/W semaphores, public implementation 3 * 4 * Written by David Howells (dhowells@redhat.com). 5 * Derived from asm-i386/semaphore.h 6 * 7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com> 8 * and Michel Lespinasse <walken@google.com> 9 * 10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com> 11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes. 12 * 13 * Rwsem count bit fields re-definition and rwsem rearchitecture by 14 * Waiman Long <longman@redhat.com> and 15 * Peter Zijlstra <peterz@infradead.org>. 16 */ 17 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/sched.h> 21 #include <linux/sched/rt.h> 22 #include <linux/sched/task.h> 23 #include <linux/sched/debug.h> 24 #include <linux/sched/wake_q.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/clock.h> 27 #include <linux/export.h> 28 #include <linux/rwsem.h> 29 #include <linux/atomic.h> 30 31 #include "rwsem.h" 32 #include "lock_events.h" 33 34 /* 35 * The least significant 3 bits of the owner value has the following 36 * meanings when set. 37 * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers 38 * - Bit 1: RWSEM_RD_NONSPINNABLE - Readers cannot spin on this lock. 39 * - Bit 2: RWSEM_WR_NONSPINNABLE - Writers cannot spin on this lock. 40 * 41 * When the rwsem is either owned by an anonymous writer, or it is 42 * reader-owned, but a spinning writer has timed out, both nonspinnable 43 * bits will be set to disable optimistic spinning by readers and writers. 44 * In the later case, the last unlocking reader should then check the 45 * writer nonspinnable bit and clear it only to give writers preference 46 * to acquire the lock via optimistic spinning, but not readers. Similar 47 * action is also done in the reader slowpath. 48 49 * When a writer acquires a rwsem, it puts its task_struct pointer 50 * into the owner field. It is cleared after an unlock. 51 * 52 * When a reader acquires a rwsem, it will also puts its task_struct 53 * pointer into the owner field with the RWSEM_READER_OWNED bit set. 54 * On unlock, the owner field will largely be left untouched. So 55 * for a free or reader-owned rwsem, the owner value may contain 56 * information about the last reader that acquires the rwsem. 57 * 58 * That information may be helpful in debugging cases where the system 59 * seems to hang on a reader owned rwsem especially if only one reader 60 * is involved. Ideally we would like to track all the readers that own 61 * a rwsem, but the overhead is simply too big. 62 * 63 * Reader optimistic spinning is helpful when the reader critical section 64 * is short and there aren't that many readers around. It makes readers 65 * relatively more preferred than writers. When a writer times out spinning 66 * on a reader-owned lock and set the nospinnable bits, there are two main 67 * reasons for that. 68 * 69 * 1) The reader critical section is long, perhaps the task sleeps after 70 * acquiring the read lock. 71 * 2) There are just too many readers contending the lock causing it to 72 * take a while to service all of them. 73 * 74 * In the former case, long reader critical section will impede the progress 75 * of writers which is usually more important for system performance. In 76 * the later case, reader optimistic spinning tends to make the reader 77 * groups that contain readers that acquire the lock together smaller 78 * leading to more of them. That may hurt performance in some cases. In 79 * other words, the setting of nonspinnable bits indicates that reader 80 * optimistic spinning may not be helpful for those workloads that cause 81 * it. 82 * 83 * Therefore, any writers that had observed the setting of the writer 84 * nonspinnable bit for a given rwsem after they fail to acquire the lock 85 * via optimistic spinning will set the reader nonspinnable bit once they 86 * acquire the write lock. Similarly, readers that observe the setting 87 * of reader nonspinnable bit at slowpath entry will set the reader 88 * nonspinnable bits when they acquire the read lock via the wakeup path. 89 * 90 * Once the reader nonspinnable bit is on, it will only be reset when 91 * a writer is able to acquire the rwsem in the fast path or somehow a 92 * reader or writer in the slowpath doesn't observe the nonspinable bit. 93 * 94 * This is to discourage reader optmistic spinning on that particular 95 * rwsem and make writers more preferred. This adaptive disabling of reader 96 * optimistic spinning will alleviate the negative side effect of this 97 * feature. 98 */ 99 #define RWSEM_READER_OWNED (1UL << 0) 100 #define RWSEM_RD_NONSPINNABLE (1UL << 1) 101 #define RWSEM_WR_NONSPINNABLE (1UL << 2) 102 #define RWSEM_NONSPINNABLE (RWSEM_RD_NONSPINNABLE | RWSEM_WR_NONSPINNABLE) 103 #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE) 104 105 #ifdef CONFIG_DEBUG_RWSEMS 106 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \ 107 if (!debug_locks_silent && \ 108 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\ 109 #c, atomic_long_read(&(sem)->count), \ 110 atomic_long_read(&(sem)->owner), (long)current, \ 111 list_empty(&(sem)->wait_list) ? "" : "not ")) \ 112 debug_locks_off(); \ 113 } while (0) 114 #else 115 # define DEBUG_RWSEMS_WARN_ON(c, sem) 116 #endif 117 118 /* 119 * On 64-bit architectures, the bit definitions of the count are: 120 * 121 * Bit 0 - writer locked bit 122 * Bit 1 - waiters present bit 123 * Bit 2 - lock handoff bit 124 * Bits 3-7 - reserved 125 * Bits 8-62 - 55-bit reader count 126 * Bit 63 - read fail bit 127 * 128 * On 32-bit architectures, the bit definitions of the count are: 129 * 130 * Bit 0 - writer locked bit 131 * Bit 1 - waiters present bit 132 * Bit 2 - lock handoff bit 133 * Bits 3-7 - reserved 134 * Bits 8-30 - 23-bit reader count 135 * Bit 31 - read fail bit 136 * 137 * It is not likely that the most significant bit (read fail bit) will ever 138 * be set. This guard bit is still checked anyway in the down_read() fastpath 139 * just in case we need to use up more of the reader bits for other purpose 140 * in the future. 141 * 142 * atomic_long_fetch_add() is used to obtain reader lock, whereas 143 * atomic_long_cmpxchg() will be used to obtain writer lock. 144 * 145 * There are three places where the lock handoff bit may be set or cleared. 146 * 1) rwsem_mark_wake() for readers. 147 * 2) rwsem_try_write_lock() for writers. 148 * 3) Error path of rwsem_down_write_slowpath(). 149 * 150 * For all the above cases, wait_lock will be held. A writer must also 151 * be the first one in the wait_list to be eligible for setting the handoff 152 * bit. So concurrent setting/clearing of handoff bit is not possible. 153 */ 154 #define RWSEM_WRITER_LOCKED (1UL << 0) 155 #define RWSEM_FLAG_WAITERS (1UL << 1) 156 #define RWSEM_FLAG_HANDOFF (1UL << 2) 157 #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1)) 158 159 #define RWSEM_READER_SHIFT 8 160 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT) 161 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1)) 162 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED 163 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK) 164 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\ 165 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL) 166 167 /* 168 * All writes to owner are protected by WRITE_ONCE() to make sure that 169 * store tearing can't happen as optimistic spinners may read and use 170 * the owner value concurrently without lock. Read from owner, however, 171 * may not need READ_ONCE() as long as the pointer value is only used 172 * for comparison and isn't being dereferenced. 173 */ 174 static inline void rwsem_set_owner(struct rw_semaphore *sem) 175 { 176 atomic_long_set(&sem->owner, (long)current); 177 } 178 179 static inline void rwsem_clear_owner(struct rw_semaphore *sem) 180 { 181 atomic_long_set(&sem->owner, 0); 182 } 183 184 /* 185 * Test the flags in the owner field. 186 */ 187 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags) 188 { 189 return atomic_long_read(&sem->owner) & flags; 190 } 191 192 /* 193 * The task_struct pointer of the last owning reader will be left in 194 * the owner field. 195 * 196 * Note that the owner value just indicates the task has owned the rwsem 197 * previously, it may not be the real owner or one of the real owners 198 * anymore when that field is examined, so take it with a grain of salt. 199 * 200 * The reader non-spinnable bit is preserved. 201 */ 202 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, 203 struct task_struct *owner) 204 { 205 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED | 206 (atomic_long_read(&sem->owner) & RWSEM_RD_NONSPINNABLE); 207 208 atomic_long_set(&sem->owner, val); 209 } 210 211 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) 212 { 213 __rwsem_set_reader_owned(sem, current); 214 } 215 216 /* 217 * Return true if the rwsem is owned by a reader. 218 */ 219 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem) 220 { 221 #ifdef CONFIG_DEBUG_RWSEMS 222 /* 223 * Check the count to see if it is write-locked. 224 */ 225 long count = atomic_long_read(&sem->count); 226 227 if (count & RWSEM_WRITER_MASK) 228 return false; 229 #endif 230 return rwsem_test_oflags(sem, RWSEM_READER_OWNED); 231 } 232 233 #ifdef CONFIG_DEBUG_RWSEMS 234 /* 235 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there 236 * is a task pointer in owner of a reader-owned rwsem, it will be the 237 * real owner or one of the real owners. The only exception is when the 238 * unlock is done by up_read_non_owner(). 239 */ 240 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) 241 { 242 unsigned long val = atomic_long_read(&sem->owner); 243 244 while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) { 245 if (atomic_long_try_cmpxchg(&sem->owner, &val, 246 val & RWSEM_OWNER_FLAGS_MASK)) 247 return; 248 } 249 } 250 #else 251 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) 252 { 253 } 254 #endif 255 256 /* 257 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag 258 * remains set. Otherwise, the operation will be aborted. 259 */ 260 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem) 261 { 262 unsigned long owner = atomic_long_read(&sem->owner); 263 264 do { 265 if (!(owner & RWSEM_READER_OWNED)) 266 break; 267 if (owner & RWSEM_NONSPINNABLE) 268 break; 269 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner, 270 owner | RWSEM_NONSPINNABLE)); 271 } 272 273 static inline bool rwsem_read_trylock(struct rw_semaphore *sem) 274 { 275 long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); 276 if (WARN_ON_ONCE(cnt < 0)) 277 rwsem_set_nonspinnable(sem); 278 return !(cnt & RWSEM_READ_FAILED_MASK); 279 } 280 281 /* 282 * Return just the real task structure pointer of the owner 283 */ 284 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem) 285 { 286 return (struct task_struct *) 287 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK); 288 } 289 290 /* 291 * Return the real task structure pointer of the owner and the embedded 292 * flags in the owner. pflags must be non-NULL. 293 */ 294 static inline struct task_struct * 295 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags) 296 { 297 unsigned long owner = atomic_long_read(&sem->owner); 298 299 *pflags = owner & RWSEM_OWNER_FLAGS_MASK; 300 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK); 301 } 302 303 /* 304 * Guide to the rw_semaphore's count field. 305 * 306 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned 307 * by a writer. 308 * 309 * The lock is owned by readers when 310 * (1) the RWSEM_WRITER_LOCKED isn't set in count, 311 * (2) some of the reader bits are set in count, and 312 * (3) the owner field has RWSEM_READ_OWNED bit set. 313 * 314 * Having some reader bits set is not enough to guarantee a readers owned 315 * lock as the readers may be in the process of backing out from the count 316 * and a writer has just released the lock. So another writer may steal 317 * the lock immediately after that. 318 */ 319 320 /* 321 * Initialize an rwsem: 322 */ 323 void __init_rwsem(struct rw_semaphore *sem, const char *name, 324 struct lock_class_key *key) 325 { 326 #ifdef CONFIG_DEBUG_LOCK_ALLOC 327 /* 328 * Make sure we are not reinitializing a held semaphore: 329 */ 330 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); 331 lockdep_init_map(&sem->dep_map, name, key, 0); 332 #endif 333 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); 334 raw_spin_lock_init(&sem->wait_lock); 335 INIT_LIST_HEAD(&sem->wait_list); 336 atomic_long_set(&sem->owner, 0L); 337 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 338 osq_lock_init(&sem->osq); 339 #endif 340 } 341 EXPORT_SYMBOL(__init_rwsem); 342 343 enum rwsem_waiter_type { 344 RWSEM_WAITING_FOR_WRITE, 345 RWSEM_WAITING_FOR_READ 346 }; 347 348 struct rwsem_waiter { 349 struct list_head list; 350 struct task_struct *task; 351 enum rwsem_waiter_type type; 352 unsigned long timeout; 353 unsigned long last_rowner; 354 }; 355 #define rwsem_first_waiter(sem) \ 356 list_first_entry(&sem->wait_list, struct rwsem_waiter, list) 357 358 enum rwsem_wake_type { 359 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */ 360 RWSEM_WAKE_READERS, /* Wake readers only */ 361 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ 362 }; 363 364 enum writer_wait_state { 365 WRITER_NOT_FIRST, /* Writer is not first in wait list */ 366 WRITER_FIRST, /* Writer is first in wait list */ 367 WRITER_HANDOFF /* Writer is first & handoff needed */ 368 }; 369 370 /* 371 * The typical HZ value is either 250 or 1000. So set the minimum waiting 372 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait 373 * queue before initiating the handoff protocol. 374 */ 375 #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250) 376 377 /* 378 * Magic number to batch-wakeup waiting readers, even when writers are 379 * also present in the queue. This both limits the amount of work the 380 * waking thread must do and also prevents any potential counter overflow, 381 * however unlikely. 382 */ 383 #define MAX_READERS_WAKEUP 0x100 384 385 /* 386 * handle the lock release when processes blocked on it that can now run 387 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must 388 * have been set. 389 * - there must be someone on the queue 390 * - the wait_lock must be held by the caller 391 * - tasks are marked for wakeup, the caller must later invoke wake_up_q() 392 * to actually wakeup the blocked task(s) and drop the reference count, 393 * preferably when the wait_lock is released 394 * - woken process blocks are discarded from the list after having task zeroed 395 * - writers are only marked woken if downgrading is false 396 */ 397 static void rwsem_mark_wake(struct rw_semaphore *sem, 398 enum rwsem_wake_type wake_type, 399 struct wake_q_head *wake_q) 400 { 401 struct rwsem_waiter *waiter, *tmp; 402 long oldcount, woken = 0, adjustment = 0; 403 struct list_head wlist; 404 405 lockdep_assert_held(&sem->wait_lock); 406 407 /* 408 * Take a peek at the queue head waiter such that we can determine 409 * the wakeup(s) to perform. 410 */ 411 waiter = rwsem_first_waiter(sem); 412 413 if (waiter->type == RWSEM_WAITING_FOR_WRITE) { 414 if (wake_type == RWSEM_WAKE_ANY) { 415 /* 416 * Mark writer at the front of the queue for wakeup. 417 * Until the task is actually later awoken later by 418 * the caller, other writers are able to steal it. 419 * Readers, on the other hand, will block as they 420 * will notice the queued writer. 421 */ 422 wake_q_add(wake_q, waiter->task); 423 lockevent_inc(rwsem_wake_writer); 424 } 425 426 return; 427 } 428 429 /* 430 * No reader wakeup if there are too many of them already. 431 */ 432 if (unlikely(atomic_long_read(&sem->count) < 0)) 433 return; 434 435 /* 436 * Writers might steal the lock before we grant it to the next reader. 437 * We prefer to do the first reader grant before counting readers 438 * so we can bail out early if a writer stole the lock. 439 */ 440 if (wake_type != RWSEM_WAKE_READ_OWNED) { 441 struct task_struct *owner; 442 443 adjustment = RWSEM_READER_BIAS; 444 oldcount = atomic_long_fetch_add(adjustment, &sem->count); 445 if (unlikely(oldcount & RWSEM_WRITER_MASK)) { 446 /* 447 * When we've been waiting "too" long (for writers 448 * to give up the lock), request a HANDOFF to 449 * force the issue. 450 */ 451 if (!(oldcount & RWSEM_FLAG_HANDOFF) && 452 time_after(jiffies, waiter->timeout)) { 453 adjustment -= RWSEM_FLAG_HANDOFF; 454 lockevent_inc(rwsem_rlock_handoff); 455 } 456 457 atomic_long_add(-adjustment, &sem->count); 458 return; 459 } 460 /* 461 * Set it to reader-owned to give spinners an early 462 * indication that readers now have the lock. 463 * The reader nonspinnable bit seen at slowpath entry of 464 * the reader is copied over. 465 */ 466 owner = waiter->task; 467 if (waiter->last_rowner & RWSEM_RD_NONSPINNABLE) { 468 owner = (void *)((unsigned long)owner | RWSEM_RD_NONSPINNABLE); 469 lockevent_inc(rwsem_opt_norspin); 470 } 471 __rwsem_set_reader_owned(sem, owner); 472 } 473 474 /* 475 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the 476 * queue. We know that the woken will be at least 1 as we accounted 477 * for above. Note we increment the 'active part' of the count by the 478 * number of readers before waking any processes up. 479 * 480 * This is an adaptation of the phase-fair R/W locks where at the 481 * reader phase (first waiter is a reader), all readers are eligible 482 * to acquire the lock at the same time irrespective of their order 483 * in the queue. The writers acquire the lock according to their 484 * order in the queue. 485 * 486 * We have to do wakeup in 2 passes to prevent the possibility that 487 * the reader count may be decremented before it is incremented. It 488 * is because the to-be-woken waiter may not have slept yet. So it 489 * may see waiter->task got cleared, finish its critical section and 490 * do an unlock before the reader count increment. 491 * 492 * 1) Collect the read-waiters in a separate list, count them and 493 * fully increment the reader count in rwsem. 494 * 2) For each waiters in the new list, clear waiter->task and 495 * put them into wake_q to be woken up later. 496 */ 497 INIT_LIST_HEAD(&wlist); 498 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { 499 if (waiter->type == RWSEM_WAITING_FOR_WRITE) 500 continue; 501 502 woken++; 503 list_move_tail(&waiter->list, &wlist); 504 505 /* 506 * Limit # of readers that can be woken up per wakeup call. 507 */ 508 if (woken >= MAX_READERS_WAKEUP) 509 break; 510 } 511 512 adjustment = woken * RWSEM_READER_BIAS - adjustment; 513 lockevent_cond_inc(rwsem_wake_reader, woken); 514 if (list_empty(&sem->wait_list)) { 515 /* hit end of list above */ 516 adjustment -= RWSEM_FLAG_WAITERS; 517 } 518 519 /* 520 * When we've woken a reader, we no longer need to force writers 521 * to give up the lock and we can clear HANDOFF. 522 */ 523 if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF)) 524 adjustment -= RWSEM_FLAG_HANDOFF; 525 526 if (adjustment) 527 atomic_long_add(adjustment, &sem->count); 528 529 /* 2nd pass */ 530 list_for_each_entry_safe(waiter, tmp, &wlist, list) { 531 struct task_struct *tsk; 532 533 tsk = waiter->task; 534 get_task_struct(tsk); 535 536 /* 537 * Ensure calling get_task_struct() before setting the reader 538 * waiter to nil such that rwsem_down_read_slowpath() cannot 539 * race with do_exit() by always holding a reference count 540 * to the task to wakeup. 541 */ 542 smp_store_release(&waiter->task, NULL); 543 /* 544 * Ensure issuing the wakeup (either by us or someone else) 545 * after setting the reader waiter to nil. 546 */ 547 wake_q_add_safe(wake_q, tsk); 548 } 549 } 550 551 /* 552 * This function must be called with the sem->wait_lock held to prevent 553 * race conditions between checking the rwsem wait list and setting the 554 * sem->count accordingly. 555 * 556 * If wstate is WRITER_HANDOFF, it will make sure that either the handoff 557 * bit is set or the lock is acquired with handoff bit cleared. 558 */ 559 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, 560 enum writer_wait_state wstate) 561 { 562 long count, new; 563 564 lockdep_assert_held(&sem->wait_lock); 565 566 count = atomic_long_read(&sem->count); 567 do { 568 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF); 569 570 if (has_handoff && wstate == WRITER_NOT_FIRST) 571 return false; 572 573 new = count; 574 575 if (count & RWSEM_LOCK_MASK) { 576 if (has_handoff || (wstate != WRITER_HANDOFF)) 577 return false; 578 579 new |= RWSEM_FLAG_HANDOFF; 580 } else { 581 new |= RWSEM_WRITER_LOCKED; 582 new &= ~RWSEM_FLAG_HANDOFF; 583 584 if (list_is_singular(&sem->wait_list)) 585 new &= ~RWSEM_FLAG_WAITERS; 586 } 587 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); 588 589 /* 590 * We have either acquired the lock with handoff bit cleared or 591 * set the handoff bit. 592 */ 593 if (new & RWSEM_FLAG_HANDOFF) 594 return false; 595 596 rwsem_set_owner(sem); 597 return true; 598 } 599 600 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 601 /* 602 * Try to acquire read lock before the reader is put on wait queue. 603 * Lock acquisition isn't allowed if the rwsem is locked or a writer handoff 604 * is ongoing. 605 */ 606 static inline bool rwsem_try_read_lock_unqueued(struct rw_semaphore *sem) 607 { 608 long count = atomic_long_read(&sem->count); 609 610 if (count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF)) 611 return false; 612 613 count = atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, &sem->count); 614 if (!(count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) { 615 rwsem_set_reader_owned(sem); 616 lockevent_inc(rwsem_opt_rlock); 617 return true; 618 } 619 620 /* Back out the change */ 621 atomic_long_add(-RWSEM_READER_BIAS, &sem->count); 622 return false; 623 } 624 625 /* 626 * Try to acquire write lock before the writer has been put on wait queue. 627 */ 628 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) 629 { 630 long count = atomic_long_read(&sem->count); 631 632 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) { 633 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, 634 count | RWSEM_WRITER_LOCKED)) { 635 rwsem_set_owner(sem); 636 lockevent_inc(rwsem_opt_wlock); 637 return true; 638 } 639 } 640 return false; 641 } 642 643 static inline bool owner_on_cpu(struct task_struct *owner) 644 { 645 /* 646 * As lock holder preemption issue, we both skip spinning if 647 * task is not on cpu or its cpu is preempted 648 */ 649 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 650 } 651 652 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem, 653 unsigned long nonspinnable) 654 { 655 struct task_struct *owner; 656 unsigned long flags; 657 bool ret = true; 658 659 BUILD_BUG_ON(!(RWSEM_OWNER_UNKNOWN & RWSEM_NONSPINNABLE)); 660 661 if (need_resched()) { 662 lockevent_inc(rwsem_opt_fail); 663 return false; 664 } 665 666 preempt_disable(); 667 rcu_read_lock(); 668 owner = rwsem_owner_flags(sem, &flags); 669 if ((flags & nonspinnable) || (owner && !owner_on_cpu(owner))) 670 ret = false; 671 rcu_read_unlock(); 672 preempt_enable(); 673 674 lockevent_cond_inc(rwsem_opt_fail, !ret); 675 return ret; 676 } 677 678 /* 679 * The rwsem_spin_on_owner() function returns the folowing 4 values 680 * depending on the lock owner state. 681 * OWNER_NULL : owner is currently NULL 682 * OWNER_WRITER: when owner changes and is a writer 683 * OWNER_READER: when owner changes and the new owner may be a reader. 684 * OWNER_NONSPINNABLE: 685 * when optimistic spinning has to stop because either the 686 * owner stops running, is unknown, or its timeslice has 687 * been used up. 688 */ 689 enum owner_state { 690 OWNER_NULL = 1 << 0, 691 OWNER_WRITER = 1 << 1, 692 OWNER_READER = 1 << 2, 693 OWNER_NONSPINNABLE = 1 << 3, 694 }; 695 #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER) 696 697 static inline enum owner_state 698 rwsem_owner_state(struct task_struct *owner, unsigned long flags, unsigned long nonspinnable) 699 { 700 if (flags & nonspinnable) 701 return OWNER_NONSPINNABLE; 702 703 if (flags & RWSEM_READER_OWNED) 704 return OWNER_READER; 705 706 return owner ? OWNER_WRITER : OWNER_NULL; 707 } 708 709 static noinline enum owner_state 710 rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable) 711 { 712 struct task_struct *new, *owner; 713 unsigned long flags, new_flags; 714 enum owner_state state; 715 716 owner = rwsem_owner_flags(sem, &flags); 717 state = rwsem_owner_state(owner, flags, nonspinnable); 718 if (state != OWNER_WRITER) 719 return state; 720 721 rcu_read_lock(); 722 for (;;) { 723 if (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF) { 724 state = OWNER_NONSPINNABLE; 725 break; 726 } 727 728 new = rwsem_owner_flags(sem, &new_flags); 729 if ((new != owner) || (new_flags != flags)) { 730 state = rwsem_owner_state(new, new_flags, nonspinnable); 731 break; 732 } 733 734 /* 735 * Ensure we emit the owner->on_cpu, dereference _after_ 736 * checking sem->owner still matches owner, if that fails, 737 * owner might point to free()d memory, if it still matches, 738 * the rcu_read_lock() ensures the memory stays valid. 739 */ 740 barrier(); 741 742 if (need_resched() || !owner_on_cpu(owner)) { 743 state = OWNER_NONSPINNABLE; 744 break; 745 } 746 747 cpu_relax(); 748 } 749 rcu_read_unlock(); 750 751 return state; 752 } 753 754 /* 755 * Calculate reader-owned rwsem spinning threshold for writer 756 * 757 * The more readers own the rwsem, the longer it will take for them to 758 * wind down and free the rwsem. So the empirical formula used to 759 * determine the actual spinning time limit here is: 760 * 761 * Spinning threshold = (10 + nr_readers/2)us 762 * 763 * The limit is capped to a maximum of 25us (30 readers). This is just 764 * a heuristic and is subjected to change in the future. 765 */ 766 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem) 767 { 768 long count = atomic_long_read(&sem->count); 769 int readers = count >> RWSEM_READER_SHIFT; 770 u64 delta; 771 772 if (readers > 30) 773 readers = 30; 774 delta = (20 + readers) * NSEC_PER_USEC / 2; 775 776 return sched_clock() + delta; 777 } 778 779 static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock) 780 { 781 bool taken = false; 782 int prev_owner_state = OWNER_NULL; 783 int loop = 0; 784 u64 rspin_threshold = 0; 785 unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE 786 : RWSEM_RD_NONSPINNABLE; 787 788 preempt_disable(); 789 790 /* sem->wait_lock should not be held when doing optimistic spinning */ 791 if (!osq_lock(&sem->osq)) 792 goto done; 793 794 /* 795 * Optimistically spin on the owner field and attempt to acquire the 796 * lock whenever the owner changes. Spinning will be stopped when: 797 * 1) the owning writer isn't running; or 798 * 2) readers own the lock and spinning time has exceeded limit. 799 */ 800 for (;;) { 801 enum owner_state owner_state; 802 803 owner_state = rwsem_spin_on_owner(sem, nonspinnable); 804 if (!(owner_state & OWNER_SPINNABLE)) 805 break; 806 807 /* 808 * Try to acquire the lock 809 */ 810 taken = wlock ? rwsem_try_write_lock_unqueued(sem) 811 : rwsem_try_read_lock_unqueued(sem); 812 813 if (taken) 814 break; 815 816 /* 817 * Time-based reader-owned rwsem optimistic spinning 818 */ 819 if (wlock && (owner_state == OWNER_READER)) { 820 /* 821 * Re-initialize rspin_threshold every time when 822 * the owner state changes from non-reader to reader. 823 * This allows a writer to steal the lock in between 824 * 2 reader phases and have the threshold reset at 825 * the beginning of the 2nd reader phase. 826 */ 827 if (prev_owner_state != OWNER_READER) { 828 if (rwsem_test_oflags(sem, nonspinnable)) 829 break; 830 rspin_threshold = rwsem_rspin_threshold(sem); 831 loop = 0; 832 } 833 834 /* 835 * Check time threshold once every 16 iterations to 836 * avoid calling sched_clock() too frequently so 837 * as to reduce the average latency between the times 838 * when the lock becomes free and when the spinner 839 * is ready to do a trylock. 840 */ 841 else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) { 842 rwsem_set_nonspinnable(sem); 843 lockevent_inc(rwsem_opt_nospin); 844 break; 845 } 846 } 847 848 /* 849 * An RT task cannot do optimistic spinning if it cannot 850 * be sure the lock holder is running or live-lock may 851 * happen if the current task and the lock holder happen 852 * to run in the same CPU. However, aborting optimistic 853 * spinning while a NULL owner is detected may miss some 854 * opportunity where spinning can continue without causing 855 * problem. 856 * 857 * There are 2 possible cases where an RT task may be able 858 * to continue spinning. 859 * 860 * 1) The lock owner is in the process of releasing the 861 * lock, sem->owner is cleared but the lock has not 862 * been released yet. 863 * 2) The lock was free and owner cleared, but another 864 * task just comes in and acquire the lock before 865 * we try to get it. The new owner may be a spinnable 866 * writer. 867 * 868 * To take advantage of two scenarios listed agove, the RT 869 * task is made to retry one more time to see if it can 870 * acquire the lock or continue spinning on the new owning 871 * writer. Of course, if the time lag is long enough or the 872 * new owner is not a writer or spinnable, the RT task will 873 * quit spinning. 874 * 875 * If the owner is a writer, the need_resched() check is 876 * done inside rwsem_spin_on_owner(). If the owner is not 877 * a writer, need_resched() check needs to be done here. 878 */ 879 if (owner_state != OWNER_WRITER) { 880 if (need_resched()) 881 break; 882 if (rt_task(current) && 883 (prev_owner_state != OWNER_WRITER)) 884 break; 885 } 886 prev_owner_state = owner_state; 887 888 /* 889 * The cpu_relax() call is a compiler barrier which forces 890 * everything in this loop to be re-loaded. We don't need 891 * memory barriers as we'll eventually observe the right 892 * values at the cost of a few extra spins. 893 */ 894 cpu_relax(); 895 } 896 osq_unlock(&sem->osq); 897 done: 898 preempt_enable(); 899 lockevent_cond_inc(rwsem_opt_fail, !taken); 900 return taken; 901 } 902 903 /* 904 * Clear the owner's RWSEM_WR_NONSPINNABLE bit if it is set. This should 905 * only be called when the reader count reaches 0. 906 * 907 * This give writers better chance to acquire the rwsem first before 908 * readers when the rwsem was being held by readers for a relatively long 909 * period of time. Race can happen that an optimistic spinner may have 910 * just stolen the rwsem and set the owner, but just clearing the 911 * RWSEM_WR_NONSPINNABLE bit will do no harm anyway. 912 */ 913 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem) 914 { 915 if (rwsem_test_oflags(sem, RWSEM_WR_NONSPINNABLE)) 916 atomic_long_andnot(RWSEM_WR_NONSPINNABLE, &sem->owner); 917 } 918 919 /* 920 * This function is called when the reader fails to acquire the lock via 921 * optimistic spinning. In this case we will still attempt to do a trylock 922 * when comparing the rwsem state right now with the state when entering 923 * the slowpath indicates that the reader is still in a valid reader phase. 924 * This happens when the following conditions are true: 925 * 926 * 1) The lock is currently reader owned, and 927 * 2) The lock is previously not reader-owned or the last read owner changes. 928 * 929 * In the former case, we have transitioned from a writer phase to a 930 * reader-phase while spinning. In the latter case, it means the reader 931 * phase hasn't ended when we entered the optimistic spinning loop. In 932 * both cases, the reader is eligible to acquire the lock. This is the 933 * secondary path where a read lock is acquired optimistically. 934 * 935 * The reader non-spinnable bit wasn't set at time of entry or it will 936 * not be here at all. 937 */ 938 static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem, 939 unsigned long last_rowner) 940 { 941 unsigned long owner = atomic_long_read(&sem->owner); 942 943 if (!(owner & RWSEM_READER_OWNED)) 944 return false; 945 946 if (((owner ^ last_rowner) & ~RWSEM_OWNER_FLAGS_MASK) && 947 rwsem_try_read_lock_unqueued(sem)) { 948 lockevent_inc(rwsem_opt_rlock2); 949 lockevent_add(rwsem_opt_fail, -1); 950 return true; 951 } 952 return false; 953 } 954 #else 955 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem, 956 unsigned long nonspinnable) 957 { 958 return false; 959 } 960 961 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock) 962 { 963 return false; 964 } 965 966 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem) { } 967 968 static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem, 969 unsigned long last_rowner) 970 { 971 return false; 972 } 973 #endif 974 975 /* 976 * Wait for the read lock to be granted 977 */ 978 static struct rw_semaphore __sched * 979 rwsem_down_read_slowpath(struct rw_semaphore *sem, int state) 980 { 981 long count, adjustment = -RWSEM_READER_BIAS; 982 struct rwsem_waiter waiter; 983 DEFINE_WAKE_Q(wake_q); 984 bool wake = false; 985 986 /* 987 * Save the current read-owner of rwsem, if available, and the 988 * reader nonspinnable bit. 989 */ 990 waiter.last_rowner = atomic_long_read(&sem->owner); 991 if (!(waiter.last_rowner & RWSEM_READER_OWNED)) 992 waiter.last_rowner &= RWSEM_RD_NONSPINNABLE; 993 994 if (!rwsem_can_spin_on_owner(sem, RWSEM_RD_NONSPINNABLE)) 995 goto queue; 996 997 /* 998 * Undo read bias from down_read() and do optimistic spinning. 999 */ 1000 atomic_long_add(-RWSEM_READER_BIAS, &sem->count); 1001 adjustment = 0; 1002 if (rwsem_optimistic_spin(sem, false)) { 1003 /* 1004 * Wake up other readers in the wait list if the front 1005 * waiter is a reader. 1006 */ 1007 if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) { 1008 raw_spin_lock_irq(&sem->wait_lock); 1009 if (!list_empty(&sem->wait_list)) 1010 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, 1011 &wake_q); 1012 raw_spin_unlock_irq(&sem->wait_lock); 1013 wake_up_q(&wake_q); 1014 } 1015 return sem; 1016 } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) { 1017 return sem; 1018 } 1019 1020 queue: 1021 waiter.task = current; 1022 waiter.type = RWSEM_WAITING_FOR_READ; 1023 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; 1024 1025 raw_spin_lock_irq(&sem->wait_lock); 1026 if (list_empty(&sem->wait_list)) { 1027 /* 1028 * In case the wait queue is empty and the lock isn't owned 1029 * by a writer or has the handoff bit set, this reader can 1030 * exit the slowpath and return immediately as its 1031 * RWSEM_READER_BIAS has already been set in the count. 1032 */ 1033 if (adjustment && !(atomic_long_read(&sem->count) & 1034 (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) { 1035 raw_spin_unlock_irq(&sem->wait_lock); 1036 rwsem_set_reader_owned(sem); 1037 lockevent_inc(rwsem_rlock_fast); 1038 return sem; 1039 } 1040 adjustment += RWSEM_FLAG_WAITERS; 1041 } 1042 list_add_tail(&waiter.list, &sem->wait_list); 1043 1044 /* we're now waiting on the lock, but no longer actively locking */ 1045 if (adjustment) 1046 count = atomic_long_add_return(adjustment, &sem->count); 1047 else 1048 count = atomic_long_read(&sem->count); 1049 1050 /* 1051 * If there are no active locks, wake the front queued process(es). 1052 * 1053 * If there are no writers and we are first in the queue, 1054 * wake our own waiter to join the existing active readers ! 1055 */ 1056 if (!(count & RWSEM_LOCK_MASK)) { 1057 clear_wr_nonspinnable(sem); 1058 wake = true; 1059 } 1060 if (wake || (!(count & RWSEM_WRITER_MASK) && 1061 (adjustment & RWSEM_FLAG_WAITERS))) 1062 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); 1063 1064 raw_spin_unlock_irq(&sem->wait_lock); 1065 wake_up_q(&wake_q); 1066 1067 /* wait to be given the lock */ 1068 while (true) { 1069 set_current_state(state); 1070 if (!waiter.task) 1071 break; 1072 if (signal_pending_state(state, current)) { 1073 raw_spin_lock_irq(&sem->wait_lock); 1074 if (waiter.task) 1075 goto out_nolock; 1076 raw_spin_unlock_irq(&sem->wait_lock); 1077 break; 1078 } 1079 schedule(); 1080 lockevent_inc(rwsem_sleep_reader); 1081 } 1082 1083 __set_current_state(TASK_RUNNING); 1084 lockevent_inc(rwsem_rlock); 1085 return sem; 1086 out_nolock: 1087 list_del(&waiter.list); 1088 if (list_empty(&sem->wait_list)) { 1089 atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF, 1090 &sem->count); 1091 } 1092 raw_spin_unlock_irq(&sem->wait_lock); 1093 __set_current_state(TASK_RUNNING); 1094 lockevent_inc(rwsem_rlock_fail); 1095 return ERR_PTR(-EINTR); 1096 } 1097 1098 /* 1099 * This function is called by the a write lock owner. So the owner value 1100 * won't get changed by others. 1101 */ 1102 static inline void rwsem_disable_reader_optspin(struct rw_semaphore *sem, 1103 bool disable) 1104 { 1105 if (unlikely(disable)) { 1106 atomic_long_or(RWSEM_RD_NONSPINNABLE, &sem->owner); 1107 lockevent_inc(rwsem_opt_norspin); 1108 } 1109 } 1110 1111 /* 1112 * Wait until we successfully acquire the write lock 1113 */ 1114 static struct rw_semaphore * 1115 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) 1116 { 1117 long count; 1118 bool disable_rspin; 1119 enum writer_wait_state wstate; 1120 struct rwsem_waiter waiter; 1121 struct rw_semaphore *ret = sem; 1122 DEFINE_WAKE_Q(wake_q); 1123 1124 /* do optimistic spinning and steal lock if possible */ 1125 if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) && 1126 rwsem_optimistic_spin(sem, true)) 1127 return sem; 1128 1129 /* 1130 * Disable reader optimistic spinning for this rwsem after 1131 * acquiring the write lock when the setting of the nonspinnable 1132 * bits are observed. 1133 */ 1134 disable_rspin = atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE; 1135 1136 /* 1137 * Optimistic spinning failed, proceed to the slowpath 1138 * and block until we can acquire the sem. 1139 */ 1140 waiter.task = current; 1141 waiter.type = RWSEM_WAITING_FOR_WRITE; 1142 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; 1143 1144 raw_spin_lock_irq(&sem->wait_lock); 1145 1146 /* account for this before adding a new element to the list */ 1147 wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST; 1148 1149 list_add_tail(&waiter.list, &sem->wait_list); 1150 1151 /* we're now waiting on the lock */ 1152 if (wstate == WRITER_NOT_FIRST) { 1153 count = atomic_long_read(&sem->count); 1154 1155 /* 1156 * If there were already threads queued before us and: 1157 * 1) there are no no active locks, wake the front 1158 * queued process(es) as the handoff bit might be set. 1159 * 2) there are no active writers and some readers, the lock 1160 * must be read owned; so we try to wake any read lock 1161 * waiters that were queued ahead of us. 1162 */ 1163 if (count & RWSEM_WRITER_MASK) 1164 goto wait; 1165 1166 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK) 1167 ? RWSEM_WAKE_READERS 1168 : RWSEM_WAKE_ANY, &wake_q); 1169 1170 if (!wake_q_empty(&wake_q)) { 1171 /* 1172 * We want to minimize wait_lock hold time especially 1173 * when a large number of readers are to be woken up. 1174 */ 1175 raw_spin_unlock_irq(&sem->wait_lock); 1176 wake_up_q(&wake_q); 1177 wake_q_init(&wake_q); /* Used again, reinit */ 1178 raw_spin_lock_irq(&sem->wait_lock); 1179 } 1180 } else { 1181 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count); 1182 } 1183 1184 wait: 1185 /* wait until we successfully acquire the lock */ 1186 set_current_state(state); 1187 while (true) { 1188 if (rwsem_try_write_lock(sem, wstate)) 1189 break; 1190 1191 raw_spin_unlock_irq(&sem->wait_lock); 1192 1193 /* Block until there are no active lockers. */ 1194 for (;;) { 1195 if (signal_pending_state(state, current)) 1196 goto out_nolock; 1197 1198 schedule(); 1199 lockevent_inc(rwsem_sleep_writer); 1200 set_current_state(state); 1201 /* 1202 * If HANDOFF bit is set, unconditionally do 1203 * a trylock. 1204 */ 1205 if (wstate == WRITER_HANDOFF) 1206 break; 1207 1208 if ((wstate == WRITER_NOT_FIRST) && 1209 (rwsem_first_waiter(sem) == &waiter)) 1210 wstate = WRITER_FIRST; 1211 1212 count = atomic_long_read(&sem->count); 1213 if (!(count & RWSEM_LOCK_MASK)) 1214 break; 1215 1216 /* 1217 * The setting of the handoff bit is deferred 1218 * until rwsem_try_write_lock() is called. 1219 */ 1220 if ((wstate == WRITER_FIRST) && (rt_task(current) || 1221 time_after(jiffies, waiter.timeout))) { 1222 wstate = WRITER_HANDOFF; 1223 lockevent_inc(rwsem_wlock_handoff); 1224 break; 1225 } 1226 } 1227 1228 raw_spin_lock_irq(&sem->wait_lock); 1229 } 1230 __set_current_state(TASK_RUNNING); 1231 list_del(&waiter.list); 1232 rwsem_disable_reader_optspin(sem, disable_rspin); 1233 raw_spin_unlock_irq(&sem->wait_lock); 1234 lockevent_inc(rwsem_wlock); 1235 1236 return ret; 1237 1238 out_nolock: 1239 __set_current_state(TASK_RUNNING); 1240 raw_spin_lock_irq(&sem->wait_lock); 1241 list_del(&waiter.list); 1242 1243 if (unlikely(wstate == WRITER_HANDOFF)) 1244 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count); 1245 1246 if (list_empty(&sem->wait_list)) 1247 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count); 1248 else 1249 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); 1250 raw_spin_unlock_irq(&sem->wait_lock); 1251 wake_up_q(&wake_q); 1252 lockevent_inc(rwsem_wlock_fail); 1253 1254 return ERR_PTR(-EINTR); 1255 } 1256 1257 /* 1258 * handle waking up a waiter on the semaphore 1259 * - up_read/up_write has decremented the active part of count if we come here 1260 */ 1261 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count) 1262 { 1263 unsigned long flags; 1264 DEFINE_WAKE_Q(wake_q); 1265 1266 raw_spin_lock_irqsave(&sem->wait_lock, flags); 1267 1268 if (!list_empty(&sem->wait_list)) 1269 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); 1270 1271 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 1272 wake_up_q(&wake_q); 1273 1274 return sem; 1275 } 1276 1277 /* 1278 * downgrade a write lock into a read lock 1279 * - caller incremented waiting part of count and discovered it still negative 1280 * - just wake up any readers at the front of the queue 1281 */ 1282 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) 1283 { 1284 unsigned long flags; 1285 DEFINE_WAKE_Q(wake_q); 1286 1287 raw_spin_lock_irqsave(&sem->wait_lock, flags); 1288 1289 if (!list_empty(&sem->wait_list)) 1290 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q); 1291 1292 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 1293 wake_up_q(&wake_q); 1294 1295 return sem; 1296 } 1297 1298 /* 1299 * lock for reading 1300 */ 1301 inline void __down_read(struct rw_semaphore *sem) 1302 { 1303 if (!rwsem_read_trylock(sem)) { 1304 rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE); 1305 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); 1306 } else { 1307 rwsem_set_reader_owned(sem); 1308 } 1309 } 1310 1311 static inline int __down_read_killable(struct rw_semaphore *sem) 1312 { 1313 if (!rwsem_read_trylock(sem)) { 1314 if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE))) 1315 return -EINTR; 1316 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); 1317 } else { 1318 rwsem_set_reader_owned(sem); 1319 } 1320 return 0; 1321 } 1322 1323 static inline int __down_read_trylock(struct rw_semaphore *sem) 1324 { 1325 /* 1326 * Optimize for the case when the rwsem is not locked at all. 1327 */ 1328 long tmp = RWSEM_UNLOCKED_VALUE; 1329 1330 do { 1331 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, 1332 tmp + RWSEM_READER_BIAS)) { 1333 rwsem_set_reader_owned(sem); 1334 return 1; 1335 } 1336 } while (!(tmp & RWSEM_READ_FAILED_MASK)); 1337 return 0; 1338 } 1339 1340 /* 1341 * lock for writing 1342 */ 1343 static inline void __down_write(struct rw_semaphore *sem) 1344 { 1345 long tmp = RWSEM_UNLOCKED_VALUE; 1346 1347 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, 1348 RWSEM_WRITER_LOCKED))) 1349 rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE); 1350 else 1351 rwsem_set_owner(sem); 1352 } 1353 1354 static inline int __down_write_killable(struct rw_semaphore *sem) 1355 { 1356 long tmp = RWSEM_UNLOCKED_VALUE; 1357 1358 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, 1359 RWSEM_WRITER_LOCKED))) { 1360 if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE))) 1361 return -EINTR; 1362 } else { 1363 rwsem_set_owner(sem); 1364 } 1365 return 0; 1366 } 1367 1368 static inline int __down_write_trylock(struct rw_semaphore *sem) 1369 { 1370 long tmp = RWSEM_UNLOCKED_VALUE; 1371 1372 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, 1373 RWSEM_WRITER_LOCKED)) { 1374 rwsem_set_owner(sem); 1375 return true; 1376 } 1377 return false; 1378 } 1379 1380 /* 1381 * unlock after reading 1382 */ 1383 inline void __up_read(struct rw_semaphore *sem) 1384 { 1385 long tmp; 1386 1387 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); 1388 rwsem_clear_reader_owned(sem); 1389 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); 1390 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem); 1391 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) == 1392 RWSEM_FLAG_WAITERS)) { 1393 clear_wr_nonspinnable(sem); 1394 rwsem_wake(sem, tmp); 1395 } 1396 } 1397 1398 /* 1399 * unlock after writing 1400 */ 1401 static inline void __up_write(struct rw_semaphore *sem) 1402 { 1403 long tmp; 1404 1405 /* 1406 * sem->owner may differ from current if the ownership is transferred 1407 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits. 1408 */ 1409 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) && 1410 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem); 1411 rwsem_clear_owner(sem); 1412 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); 1413 if (unlikely(tmp & RWSEM_FLAG_WAITERS)) 1414 rwsem_wake(sem, tmp); 1415 } 1416 1417 /* 1418 * downgrade write lock to read lock 1419 */ 1420 static inline void __downgrade_write(struct rw_semaphore *sem) 1421 { 1422 long tmp; 1423 1424 /* 1425 * When downgrading from exclusive to shared ownership, 1426 * anything inside the write-locked region cannot leak 1427 * into the read side. In contrast, anything in the 1428 * read-locked region is ok to be re-ordered into the 1429 * write side. As such, rely on RELEASE semantics. 1430 */ 1431 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem); 1432 tmp = atomic_long_fetch_add_release( 1433 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count); 1434 rwsem_set_reader_owned(sem); 1435 if (tmp & RWSEM_FLAG_WAITERS) 1436 rwsem_downgrade_wake(sem); 1437 } 1438 1439 /* 1440 * lock for reading 1441 */ 1442 void __sched down_read(struct rw_semaphore *sem) 1443 { 1444 might_sleep(); 1445 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); 1446 1447 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); 1448 } 1449 EXPORT_SYMBOL(down_read); 1450 1451 int __sched down_read_killable(struct rw_semaphore *sem) 1452 { 1453 might_sleep(); 1454 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); 1455 1456 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { 1457 rwsem_release(&sem->dep_map, 1, _RET_IP_); 1458 return -EINTR; 1459 } 1460 1461 return 0; 1462 } 1463 EXPORT_SYMBOL(down_read_killable); 1464 1465 /* 1466 * trylock for reading -- returns 1 if successful, 0 if contention 1467 */ 1468 int down_read_trylock(struct rw_semaphore *sem) 1469 { 1470 int ret = __down_read_trylock(sem); 1471 1472 if (ret == 1) 1473 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); 1474 return ret; 1475 } 1476 EXPORT_SYMBOL(down_read_trylock); 1477 1478 /* 1479 * lock for writing 1480 */ 1481 void __sched down_write(struct rw_semaphore *sem) 1482 { 1483 might_sleep(); 1484 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); 1485 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 1486 } 1487 EXPORT_SYMBOL(down_write); 1488 1489 /* 1490 * lock for writing 1491 */ 1492 int __sched down_write_killable(struct rw_semaphore *sem) 1493 { 1494 might_sleep(); 1495 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); 1496 1497 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, 1498 __down_write_killable)) { 1499 rwsem_release(&sem->dep_map, 1, _RET_IP_); 1500 return -EINTR; 1501 } 1502 1503 return 0; 1504 } 1505 EXPORT_SYMBOL(down_write_killable); 1506 1507 /* 1508 * trylock for writing -- returns 1 if successful, 0 if contention 1509 */ 1510 int down_write_trylock(struct rw_semaphore *sem) 1511 { 1512 int ret = __down_write_trylock(sem); 1513 1514 if (ret == 1) 1515 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); 1516 1517 return ret; 1518 } 1519 EXPORT_SYMBOL(down_write_trylock); 1520 1521 /* 1522 * release a read lock 1523 */ 1524 void up_read(struct rw_semaphore *sem) 1525 { 1526 rwsem_release(&sem->dep_map, 1, _RET_IP_); 1527 __up_read(sem); 1528 } 1529 EXPORT_SYMBOL(up_read); 1530 1531 /* 1532 * release a write lock 1533 */ 1534 void up_write(struct rw_semaphore *sem) 1535 { 1536 rwsem_release(&sem->dep_map, 1, _RET_IP_); 1537 __up_write(sem); 1538 } 1539 EXPORT_SYMBOL(up_write); 1540 1541 /* 1542 * downgrade write lock to read lock 1543 */ 1544 void downgrade_write(struct rw_semaphore *sem) 1545 { 1546 lock_downgrade(&sem->dep_map, _RET_IP_); 1547 __downgrade_write(sem); 1548 } 1549 EXPORT_SYMBOL(downgrade_write); 1550 1551 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1552 1553 void down_read_nested(struct rw_semaphore *sem, int subclass) 1554 { 1555 might_sleep(); 1556 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); 1557 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); 1558 } 1559 EXPORT_SYMBOL(down_read_nested); 1560 1561 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) 1562 { 1563 might_sleep(); 1564 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); 1565 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 1566 } 1567 EXPORT_SYMBOL(_down_write_nest_lock); 1568 1569 void down_read_non_owner(struct rw_semaphore *sem) 1570 { 1571 might_sleep(); 1572 __down_read(sem); 1573 __rwsem_set_reader_owned(sem, NULL); 1574 } 1575 EXPORT_SYMBOL(down_read_non_owner); 1576 1577 void down_write_nested(struct rw_semaphore *sem, int subclass) 1578 { 1579 might_sleep(); 1580 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); 1581 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 1582 } 1583 EXPORT_SYMBOL(down_write_nested); 1584 1585 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass) 1586 { 1587 might_sleep(); 1588 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); 1589 1590 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, 1591 __down_write_killable)) { 1592 rwsem_release(&sem->dep_map, 1, _RET_IP_); 1593 return -EINTR; 1594 } 1595 1596 return 0; 1597 } 1598 EXPORT_SYMBOL(down_write_killable_nested); 1599 1600 void up_read_non_owner(struct rw_semaphore *sem) 1601 { 1602 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); 1603 __up_read(sem); 1604 } 1605 EXPORT_SYMBOL(up_read_non_owner); 1606 1607 #endif 1608