Lines Matching +full:use +full:- +full:rtm
1 // SPDX-License-Identifier: GPL-2.0-only
4 * RT-specific reader/writer semaphores and reader/writer locks
14 * 2) Set the reader BIAS, so readers can use the fast path again
36 * for one reader after the other. We can't use multi-reader inheritance
41 * The risk of writer starvation is there, but the pathological use cases
44 * Fast-path orderings:
58 * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is in rwbase_read_trylock()
61 for (r = atomic_read(&rwb->readers); r < 0;) { in rwbase_read_trylock()
62 if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1))) in rwbase_read_trylock()
71 struct rt_mutex_base *rtm = &rwb->rtmutex; in __rwbase_read_lock() local
75 raw_spin_lock_irq(&rtm->wait_lock); in __rwbase_read_lock()
78 * Call into the slow lock path with the rtmutex->wait_lock in __rwbase_read_lock()
87 * unlock(m->wait_lock) in __rwbase_read_lock()
90 * lock(m->wait_lock) in __rwbase_read_lock()
91 * sem->writelocked=true in __rwbase_read_lock()
92 * unlock(m->wait_lock) in __rwbase_read_lock()
95 * sem->writelocked=false in __rwbase_read_lock()
113 ret = rwbase_rtmutex_slowlock_locked(rtm, state); in __rwbase_read_lock()
120 * rtmutex->wait_lock has to be unlocked in any case of course. in __rwbase_read_lock()
123 atomic_inc(&rwb->readers); in __rwbase_read_lock()
124 raw_spin_unlock_irq(&rtm->wait_lock); in __rwbase_read_lock()
126 rwbase_rtmutex_unlock(rtm); in __rwbase_read_lock()
136 lockdep_assert(!current->pi_blocked_on); in rwbase_read_lock()
147 struct rt_mutex_base *rtm = &rwb->rtmutex; in __rwbase_read_unlock() local
151 raw_spin_lock_irq(&rtm->wait_lock); in __rwbase_read_unlock()
155 * clean up rwb->readers it needs to acquire rtm->wait_lock. The in __rwbase_read_unlock()
158 owner = rt_mutex_owner(rtm); in __rwbase_read_unlock()
164 raw_spin_unlock_irq(&rtm->wait_lock); in __rwbase_read_unlock()
172 * rwb->readers can only hit 0 when a writer is waiting for the in rwbase_read_unlock()
177 if (unlikely(atomic_dec_and_test(&rwb->readers))) in rwbase_read_unlock()
184 struct rt_mutex_base *rtm = &rwb->rtmutex; in __rwbase_write_unlock() local
190 (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers); in __rwbase_write_unlock()
191 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); in __rwbase_write_unlock()
192 rwbase_rtmutex_unlock(rtm); in __rwbase_write_unlock()
197 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_unlock() local
200 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_unlock()
206 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_downgrade() local
209 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_downgrade()
211 __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags); in rwbase_write_downgrade()
217 lockdep_assert_held(&rwb->rtmutex.wait_lock); in __rwbase_write_trylock()
223 if (!atomic_read_acquire(&rwb->readers)) { in __rwbase_write_trylock()
224 atomic_set(&rwb->readers, WRITER_BIAS); in __rwbase_write_trylock()
234 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_lock() local
238 if (rwbase_rtmutex_lock_state(rtm, state)) in rwbase_write_lock()
239 return -EINTR; in rwbase_write_lock()
242 atomic_sub(READER_BIAS, &rwb->readers); in rwbase_write_lock()
246 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_lock()
258 trace_contention_end(rwb, -EINTR); in rwbase_write_lock()
259 return -EINTR; in rwbase_write_lock()
265 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); in rwbase_write_lock()
267 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_lock()
275 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); in rwbase_write_lock()
282 struct rt_mutex_base *rtm = &rwb->rtmutex; in rwbase_write_trylock() local
285 if (!rwbase_rtmutex_trylock(rtm)) in rwbase_write_trylock()
288 atomic_sub(READER_BIAS, &rwb->readers); in rwbase_write_trylock()
290 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_trylock()
292 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); in rwbase_write_trylock()