Lines Matching refs:lock

25 #include <trace/events/lock.h>
36 * MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable
42 * This queued spinlock implementation is based on the MCS lock, however to
46 * In particular; where the traditional MCS lock consists of a tail pointer
55 * number. With one byte for the lock value and 3 bytes for the tail, only a
56 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
60 * We also change the first spinner to spin on the lock bit instead of its
61 * node; whereby avoiding the need to carry a node from lock to unlock, and
62 * preserving existing lock API. This also makes the unlock code simpler and
90 static __always_inline void __pv_kick_node(struct qspinlock *lock,
92 static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
111 * @lock: Pointer to queued spinlock structure
114 * (queue tail, pending bit, lock value)
130 void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
141 if (virt_spin_lock(lock))
152 val = atomic_cond_read_relaxed(&lock->val,
167 val = queued_fetch_set_pending_acquire(lock);
180 clear_pending(lock);
191 * store-release that clears the locked bit and create lock
197 smp_cond_load_acquire(&lock->locked, !VAL);
204 clear_pending_set_locked(lock);
219 trace_contention_begin(lock, LCB_F_SPIN);
226 * we fall back to spinning on the lock directly without using
232 while (!queued_spin_trylock(lock))
260 if (queued_spin_trylock(lock))
277 old = xchg_tail(lock, tail);
294 * While waiting for the MCS lock, the next pointer may have
295 * been set by another lock waiter. We optimistically load
311 * store-release that clears the locked bit and create lock
316 * the lock and return a non-zero value. So we have to skip the
325 if ((val = pv_wait_head_or_lock(lock, node)))
328 val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
332 * claim the lock:
334 * n,0,0 -> 0,0,1 : lock, uncontended
335 * *,*,0 -> *,*,1 : lock, contended
337 * If the queue head is the only one in the queue (lock value == tail)
338 * and nobody is pending, clear the tail code and grab the lock.
339 * Otherwise, we only need to grab the lock.
344 * of lock stealing; therefore we must also allow:
353 if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
362 set_locked(lock);
371 pv_kick_node(lock, next);
374 trace_contention_end(lock, 0);