Lines Matching +full:use +full:- +full:rtm

1 // SPDX-License-Identifier: GPL-2.0
5 * Derived from asm-i386/semaphore.h
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
13 * Rwsem count bit fields re-definition and rwsem rearchitecture by
38 * - Bit 0: RWSEM_READER_OWNED - rwsem may be owned by readers (just a hint)
39 * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
41 * When the rwsem is reader-owned and a spinning writer has timed out,
50 * for a free or reader-owned rwsem, the owner value may contain
60 * - rwsem is not currently writer owned
61 * - the handoff isn't set.
71 #c, atomic_long_read(&(sem)->count), \
72 (unsigned long) sem->magic, \
73 atomic_long_read(&(sem)->owner), (long)current, \
74 list_empty(&(sem)->wait_list) ? "" : "not ")) \
82 * On 64-bit architectures, the bit definitions of the count are:
84 * Bit 0 - writer locked bit
85 * Bit 1 - waiters present bit
86 * Bit 2 - lock handoff bit
87 * Bits 3-7 - reserved
88 * Bits 8-62 - 55-bit reader count
89 * Bit 63 - read fail bit
91 * On 32-bit architectures, the bit definitions of the count are:
93 * Bit 0 - writer locked bit
94 * Bit 1 - waiters present bit
95 * Bit 2 - lock handoff bit
96 * Bits 3-7 - reserved
97 * Bits 8-30 - 23-bit reader count
98 * Bit 31 - read fail bit
102 * just in case we need to use up more of the reader bits for other purpose
109 * 1) rwsem_mark_wake() for readers -- set, clear
110 * 2) rwsem_try_write_lock() for writers -- set, clear
111 * 3) rwsem_del_waiter() -- clear
120 #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
124 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
132 * store tearing can't happen as optimistic spinners may read and use
138 * preempt disable section as the atomic op that changes sem->count.
143 atomic_long_set(&sem->owner, (long)current); in rwsem_set_owner()
149 atomic_long_set(&sem->owner, 0); in rwsem_clear_owner()
157 return atomic_long_read(&sem->owner) & flags; in rwsem_test_oflags()
168 * The reader non-spinnable bit is preserved.
174 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE); in __rwsem_set_reader_owned()
176 atomic_long_set(&sem->owner, val); in __rwsem_set_reader_owned()
191 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK); in rwsem_owner()
200 * Check the count to see if it is write-locked. in is_rwsem_reader_owned()
202 long count = atomic_long_read(&sem->count); in is_rwsem_reader_owned()
211 * is a task pointer in owner of a reader-owned rwsem, it will be the
217 unsigned long val = atomic_long_read(&sem->owner); in rwsem_clear_reader_owned()
220 if (atomic_long_try_cmpxchg(&sem->owner, &val, in rwsem_clear_reader_owned()
237 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_set_nonspinnable()
244 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner, in rwsem_set_nonspinnable()
250 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); in rwsem_read_trylock()
267 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { in rwsem_write_trylock()
277 * flags in the owner. pflags must be non-NULL.
282 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_owner_flags()
316 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); in __init_rwsem()
319 sem->magic = sem; in __init_rwsem()
321 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); in __init_rwsem()
322 raw_spin_lock_init(&sem->wait_lock); in __init_rwsem()
323 INIT_LIST_HEAD(&sem->wait_list); in __init_rwsem()
324 atomic_long_set(&sem->owner, 0L); in __init_rwsem()
326 osq_lock_init(&sem->osq); in __init_rwsem()
344 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
360 * Magic number to batch-wakeup waiting readers, even when writers are
370 lockdep_assert_held(&sem->wait_lock); in rwsem_add_waiter()
371 list_add_tail(&waiter->list, &sem->wait_list); in rwsem_add_waiter()
386 lockdep_assert_held(&sem->wait_lock); in rwsem_del_waiter()
387 list_del(&waiter->list); in rwsem_del_waiter()
388 if (likely(!list_empty(&sem->wait_list))) in rwsem_del_waiter()
391 atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); in rwsem_del_waiter()
397 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
399 * - there must be someone on the queue
400 * - the wait_lock must be held by the caller
401 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
404 * - woken process blocks are discarded from the list after having task zeroed
405 * - writers are only marked woken if downgrading is false
417 lockdep_assert_held(&sem->wait_lock); in rwsem_mark_wake()
425 if (waiter->type == RWSEM_WAITING_FOR_WRITE) { in rwsem_mark_wake()
434 wake_q_add(wake_q, waiter->task); in rwsem_mark_wake()
444 if (unlikely(atomic_long_read(&sem->count) < 0)) in rwsem_mark_wake()
456 oldcount = atomic_long_fetch_add(adjustment, &sem->count); in rwsem_mark_wake()
463 if (time_after(jiffies, waiter->timeout)) { in rwsem_mark_wake()
465 adjustment -= RWSEM_FLAG_HANDOFF; in rwsem_mark_wake()
468 waiter->handoff_set = true; in rwsem_mark_wake()
471 atomic_long_add(-adjustment, &sem->count); in rwsem_mark_wake()
475 * Set it to reader-owned to give spinners an early in rwsem_mark_wake()
480 owner = waiter->task; in rwsem_mark_wake()
490 * This is an adaptation of the phase-fair R/W locks where at the in rwsem_mark_wake()
498 * is because the to-be-woken waiter may not have slept yet. So it in rwsem_mark_wake()
499 * may see waiter->task got cleared, finish its critical section and in rwsem_mark_wake()
502 * 1) Collect the read-waiters in a separate list, count them and in rwsem_mark_wake()
504 * 2) For each waiters in the new list, clear waiter->task and in rwsem_mark_wake()
508 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { in rwsem_mark_wake()
509 if (waiter->type == RWSEM_WAITING_FOR_WRITE) in rwsem_mark_wake()
513 list_move_tail(&waiter->list, &wlist); in rwsem_mark_wake()
522 adjustment = woken * RWSEM_READER_BIAS - adjustment; in rwsem_mark_wake()
525 oldcount = atomic_long_read(&sem->count); in rwsem_mark_wake()
526 if (list_empty(&sem->wait_list)) { in rwsem_mark_wake()
531 adjustment -= RWSEM_FLAG_WAITERS; in rwsem_mark_wake()
533 adjustment -= RWSEM_FLAG_HANDOFF; in rwsem_mark_wake()
540 adjustment -= RWSEM_FLAG_HANDOFF; in rwsem_mark_wake()
544 atomic_long_add(adjustment, &sem->count); in rwsem_mark_wake()
550 tsk = waiter->task; in rwsem_mark_wake()
559 smp_store_release(&waiter->task, NULL); in rwsem_mark_wake()
577 __releases(&sem->wait_lock) in rwsem_del_wake_waiter()
590 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_del_wake_waiter()
596 * This function must be called with the sem->wait_lock held to prevent
598 * sem->count accordingly.
608 lockdep_assert_held(&sem->wait_lock); in rwsem_try_write_lock()
610 count = atomic_long_read(&sem->count); in rwsem_try_write_lock()
620 if (first->handoff_set && (waiter != first)) in rwsem_try_write_lock()
632 if (has_handoff || (!rt_or_dl_task(waiter->task) && in rwsem_try_write_lock()
633 !time_after(jiffies, waiter->timeout))) in rwsem_try_write_lock()
641 if (list_is_singular(&sem->wait_list)) in rwsem_try_write_lock()
644 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); in rwsem_try_write_lock()
652 first->handoff_set = true; in rwsem_try_write_lock()
661 list_del(&waiter->list); in rwsem_try_write_lock()
690 long count = atomic_long_read(&sem->count); in rwsem_try_write_lock_unqueued()
693 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, in rwsem_try_write_lock_unqueued()
715 * Disable preemption is equal to the RCU read-side crital section, in rwsem_can_spin_on_owner()
720 * Don't check the read-owner as the entry may be stale. in rwsem_can_spin_on_owner()
772 * Ensure we emit the owner->on_cpu, dereference _after_ in rwsem_spin_on_owner()
773 * checking sem->owner still matches owner, if that fails, in rwsem_spin_on_owner()
776 * equal to RCU read-side crital section ensures the memory in rwsem_spin_on_owner()
793 * Calculate reader-owned rwsem spinning threshold for writer
806 long count = atomic_long_read(&sem->count); in rwsem_rspin_threshold()
824 /* sem->wait_lock should not be held when doing optimistic spinning */ in rwsem_optimistic_spin()
825 if (!osq_lock(&sem->osq)) in rwsem_optimistic_spin()
850 * Time-based reader-owned rwsem optimistic spinning in rwsem_optimistic_spin()
854 * Re-initialize rspin_threshold every time when in rwsem_optimistic_spin()
855 * the owner state changes from non-reader to reader. in rwsem_optimistic_spin()
883 * be sure the lock holder is running or live-lock may in rwsem_optimistic_spin()
894 * lock, sem->owner is cleared but the lock has not in rwsem_optimistic_spin()
923 * everything in this loop to be re-loaded. We don't need in rwsem_optimistic_spin()
929 osq_unlock(&sem->osq); in rwsem_optimistic_spin()
942 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner); in clear_nonspinnable()
968 * reader-owned, wake up read lock waiters in queue front or wake up any
996 long adjustment = -RWSEM_READER_BIAS; in rwsem_down_read_slowpath()
1006 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) && in rwsem_down_read_slowpath()
1022 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1023 if (!list_empty(&sem->wait_list)) in rwsem_down_read_slowpath()
1026 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1038 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1039 if (list_empty(&sem->wait_list)) { in rwsem_down_read_slowpath()
1046 if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) { in rwsem_down_read_slowpath()
1049 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1059 count = atomic_long_add_return(adjustment, &sem->count); in rwsem_down_read_slowpath()
1062 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1077 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1080 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1081 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */ in rwsem_down_read_slowpath()
1097 trace_contention_end(sem, -EINTR); in rwsem_down_read_slowpath()
1098 return ERR_PTR(-EINTR); in rwsem_down_read_slowpath()
1125 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1130 rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count), in rwsem_down_write_slowpath()
1137 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1139 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1142 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count); in rwsem_down_write_slowpath()
1155 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1163 * transfer. If the previous owner is a on-cpu writer and it in rwsem_down_write_slowpath()
1180 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1183 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1190 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1193 trace_contention_end(sem, -EINTR); in rwsem_down_write_slowpath()
1194 return ERR_PTR(-EINTR); in rwsem_down_write_slowpath()
1199 * - up_read/up_write has decremented the active part of count if we come here
1206 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_wake()
1208 if (!list_empty(&sem->wait_list)) in rwsem_wake()
1211 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_wake()
1219 * - caller incremented waiting part of count and discovered it still negative
1220 * - just wake up any readers at the front of the queue
1227 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_downgrade_wake()
1229 if (!list_empty(&sem->wait_list)) in rwsem_downgrade_wake()
1232 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_downgrade_wake()
1249 ret = -EINTR; in __down_read_common()
1279 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __down_read_trylock()
1282 tmp = atomic_long_read(&sem->count); in __down_read_trylock()
1284 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_read_trylock()
1305 ret = -EINTR; in __down_write_common()
1326 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __down_write_trylock()
1340 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __up_read()
1345 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); in __up_read()
1362 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __up_write()
1364 * sem->owner may differ from current if the ownership is transferred in __up_write()
1372 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); in __up_write()
1387 * anything inside the write-locked region cannot leak in __downgrade_write()
1389 * read-locked region is ok to be re-ordered into the in __downgrade_write()
1395 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count); in __downgrade_write()
1413 #define rwbase_rtmutex_lock_state(rtm, state) \ argument
1414 __rt_mutex_lock(rtm, state)
1416 #define rwbase_rtmutex_slowlock_locked(rtm, state) \ argument
1417 __rt_mutex_slowlock_locked(rtm, NULL, state)
1419 #define rwbase_rtmutex_unlock(rtm) \ argument
1420 __rt_mutex_unlock(rtm)
1422 #define rwbase_rtmutex_trylock(rtm) \ argument
1423 __rt_mutex_trylock(rtm)
1442 init_rwbase_rt(&(sem)->rwbase); in __init_rwsem()
1446 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); in __init_rwsem()
1453 rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); in __down_read()
1458 return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE); in __down_read_interruptible()
1463 return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE); in __down_read_killable()
1468 return rwbase_read_trylock(&sem->rwbase); in __down_read_trylock()
1473 rwbase_read_unlock(&sem->rwbase, TASK_NORMAL); in __up_read()
1478 rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); in __down_write()
1483 return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE); in __down_write_killable()
1488 return rwbase_write_trylock(&sem->rwbase); in __down_write_trylock()
1493 rwbase_write_unlock(&sem->rwbase); in __up_write()
1498 rwbase_write_downgrade(&sem->rwbase); in __downgrade_write()
1511 int count = atomic_read(&sem->rwbase.readers); in is_rwsem_reader_owned()
1524 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read()
1533 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read_interruptible()
1536 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_interruptible()
1537 return -EINTR; in down_read_interruptible()
1547 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read_killable()
1550 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_killable()
1551 return -EINTR; in down_read_killable()
1559 * trylock for reading -- returns 1 if successful, 0 if contention
1566 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); in down_read_trylock()
1577 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); in down_write()
1588 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); in down_write_killable()
1592 rwsem_release(&sem->dep_map, _RET_IP_); in down_write_killable()
1593 return -EINTR; in down_write_killable()
1601 * trylock for writing -- returns 1 if successful, 0 if contention
1608 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); in down_write_trylock()
1619 rwsem_release(&sem->dep_map, _RET_IP_); in up_read()
1629 rwsem_release(&sem->dep_map, _RET_IP_); in up_write()
1639 lock_downgrade(&sem->dep_map, _RET_IP_); in downgrade_write()
1649 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); in down_read_nested()
1657 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); in down_read_killable_nested()
1660 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_killable_nested()
1661 return -EINTR; in down_read_killable_nested()
1671 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); in _down_write_nest_lock()
1681 * The owner value for a reader-owned lock is mostly for debugging in down_read_non_owner()
1683 * rwsem. So it is perfectly fine to set it in a preempt-enabled in down_read_non_owner()
1693 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); in down_write_nested()
1701 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); in down_write_killable_nested()
1705 rwsem_release(&sem->dep_map, _RET_IP_); in down_write_killable_nested()
1706 return -EINTR; in down_write_killable_nested()