Lines Matching +full:wait +full:- +full:queue
1 /* SPDX-License-Identifier: GPL-2.0 */
16 * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
17 * pv_kick(cpu) -- wakes a suspended vcpu
27 * Queue Node Adaptive Spinning
29 * A queue node vCPU will stop spinning if the vCPU in the previous node is
31 * mitigates the slight slowdown for non-overcommitted guest with this
32 * aggressive wait-early mechanism.
41 * Queue node uses: VCPU_RUNNING & VCPU_HALTED.
42 * Queue head uses: VCPU_RUNNING & VCPU_HASHED.
63 * The pending bit is set by the queue head vCPU of the MCS wait queue in
67 * enter the MCS wait queue. So lock starvation shouldn't happen as long
72 * mode spinning on the lock unless the MCS wait queue is empty. In this
74 * become the queue head and set the pending bit.
85 * present in the MCS wait queue but the pending bit isn't set. in pv_hybrid_queued_unfair_trylock()
88 int val = atomic_read(&lock->val); in pv_hybrid_queued_unfair_trylock()
92 try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) { in pv_hybrid_queued_unfair_trylock()
106 * The pending bit is used by the queue head vCPU to indicate that it
112 WRITE_ONCE(lock->pending, 1); in set_pending()
124 return !READ_ONCE(lock->locked) && in trylock_clear_pending()
125 try_cmpxchg_acquire(&lock->locked_pending, &old, _Q_LOCKED_VAL); in trylock_clear_pending()
130 atomic_or(_Q_PENDING_VAL, &lock->val); in set_pending()
137 old = atomic_read(&lock->val); in trylock_clear_pending()
145 } while (!atomic_try_cmpxchg_acquire (&lock->val, &old, new)); in trylock_clear_pending()
154 * Hashing is done on a per-cacheline basis to minimize the need to access
160 * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
192 * Allocate space from bootmem which should be page-size aligned in __pv_init_lock_hash()
204 for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \
206 offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
217 if (try_cmpxchg(&he->lock, &old, lock)) { in pv_hash()
218 WRITE_ONCE(he->node, node); in pv_hash()
220 return &he->lock; in pv_hash()
243 if (READ_ONCE(he->lock) == lock) { in pv_unhash()
244 node = READ_ONCE(he->node); in pv_unhash()
245 WRITE_ONCE(he->lock, NULL); in pv_unhash()
253 * having the lock owner do the unhash -- IFF the unlock sees the in pv_unhash()
269 return READ_ONCE(prev->state) != VCPU_RUNNING; in pv_wait_early()
281 pn->cpu = smp_processor_id(); in pv_init_node()
282 pn->state = VCPU_RUNNING; in pv_init_node()
286 * Wait for node->locked to become true, halt the vcpu after a short spin.
298 for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) { in pv_wait_node()
299 if (READ_ONCE(node->locked)) in pv_wait_node()
309 * Order pn->state vs pn->locked thusly: in pv_wait_node()
311 * [S] pn->state = VCPU_HALTED [S] next->locked = 1 in pv_wait_node()
313 * [L] pn->locked [RmW] pn->state = VCPU_HASHED in pv_wait_node()
317 smp_store_mb(pn->state, VCPU_HALTED); in pv_wait_node()
319 if (!READ_ONCE(node->locked)) { in pv_wait_node()
322 pv_wait(&pn->state, VCPU_HALTED); in pv_wait_node()
330 cmpxchg(&pn->state, VCPU_HALTED, VCPU_RUNNING); in pv_wait_node()
334 * spurious wakeup and the vCPU should wait again. However, in pv_wait_node()
340 !READ_ONCE(node->locked)); in pv_wait_node()
344 * By now our node->locked should be 1 and our caller will not actually in pv_wait_node()
345 * spin-wait for it. We do however rely on our caller to do a in pv_wait_node()
346 * load-acquire for us. in pv_wait_node()
351 * Called after setting next->locked = 1 when we're the lock owner.
364 * observe its next->locked value and advance itself. in pv_kick_node()
368 * The write to next->locked in arch_mcs_spin_unlock_contended() in pv_kick_node()
369 * must be ordered before the read of pn->state in the cmpxchg() in pv_kick_node()
373 * dependency will order the reading of pn->state before any in pv_kick_node()
377 if (!try_cmpxchg_relaxed(&pn->state, &old, VCPU_HASHED)) in pv_kick_node()
387 WRITE_ONCE(lock->locked, _Q_SLOW_VAL); in pv_kick_node()
392 * Wait for l->locked to become clear and acquire the lock;
410 if (READ_ONCE(pn->state) == VCPU_HASHED) in pv_wait_head_or_lock()
420 * Set correct vCPU state to be used by queue node wait-early in pv_wait_head_or_lock()
423 WRITE_ONCE(pn->state, VCPU_RUNNING); in pv_wait_head_or_lock()
430 for (loop = SPIN_THRESHOLD; loop; loop--) { in pv_wait_head_or_lock()
446 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL in pv_wait_head_or_lock()
448 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> in pv_wait_head_or_lock()
452 if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { in pv_wait_head_or_lock()
458 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in pv_wait_head_or_lock()
463 WRITE_ONCE(pn->state, VCPU_HASHED); in pv_wait_head_or_lock()
466 pv_wait(&lock->locked, _Q_SLOW_VAL); in pv_wait_head_or_lock()
469 * Because of lock stealing, the queue head vCPU may not be in pv_wait_head_or_lock()
470 * able to acquire the lock before it has to wait again. in pv_wait_head_or_lock()
481 return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); in pv_wait_head_or_lock()
485 * Include the architecture specific callee-save thunk of the
487 * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
506 (unsigned long)lock, atomic_read(&lock->val)); in __pv_queued_spin_unlock_slowpath()
511 * A failed cmpxchg doesn't provide any memory-ordering guarantees, in __pv_queued_spin_unlock_slowpath()
529 smp_store_release(&lock->locked, 0); in __pv_queued_spin_unlock_slowpath()
539 pv_kick(node->cpu); in __pv_queued_spin_unlock_slowpath()
552 if (try_cmpxchg_release(&lock->locked, &locked, 0)) in __pv_queued_spin_unlock()