Lines Matching +full:line +full:- +full:orders

1 // SPDX-License-Identifier: GPL-2.0-or-later
67 u64 delta = sched_clock() - seen; in recently_sleepy()
108 return (val >> _Q_TAIL_CPU_OFFSET) - 1; in decode_tail_cpu()
142 " bne- 1b \n" in trylock_clean_tail()
146 : "r" (&lock->val), "r"(tail), "r" (newval), in trylock_clean_tail()
174 " bne- 1b \n" in publish_tail_cpu()
176 : "r" (&lock->val), "r" (tail), "r"(_Q_TAIL_CPU_MASK) in publish_tail_cpu()
190 " bne- 1b \n" in set_mustq()
192 : "r" (&lock->val), "r" (_Q_MUST_Q_VAL) in set_mustq()
206 " bne- 1b \n" in clear_mustq()
208 : "r" (&lock->val), "r" (_Q_MUST_Q_VAL) in clear_mustq()
225 " bne- 2f \n" in try_set_sleepy()
227 " bne- 1b \n" in try_set_sleepy()
230 : "r" (&lock->val), "r"(old), "r" (new) in try_set_sleepy()
269 * orders the release barrier in publish_tail_cpu performed by the in get_tail_qnode()
276 struct qnode *qnode = &qnodesp->nodes[idx]; in get_tail_qnode()
277 if (qnode->lock == lock) in get_tail_qnode()
319 if (READ_ONCE(lock->val) == val) { in __yield_to_locked_owner()
364 next = READ_ONCE(node->next); in propagate_sleepy()
368 if (next->sleepy) in propagate_sleepy()
373 next->sleepy = 1; in propagate_sleepy()
392 if (node->sleepy || vcpu_is_preempted(prev_cpu)) { in yield_to_prev()
393 u32 val = READ_ONCE(lock->val); in yield_to_prev()
396 if (node->next && !node->next->sleepy) { in yield_to_prev()
400 * to become "non-sleepy" if vCPU preemption in yield_to_prev()
405 node->next->sleepy = 1; in yield_to_prev()
412 node->sleepy = false; in yield_to_prev()
430 if (!READ_ONCE(node->locked)) { in yield_to_prev()
474 val = READ_ONCE(lock->val); in try_to_steal_lock()
511 * while the owner is preempted -- we won't interfere in try_to_steal_lock()
541 if (unlikely(qnodesp->count >= MAX_NODES)) { in queued_spin_lock_mcs_queue()
548 idx = qnodesp->count++; in queued_spin_lock_mcs_queue()
550 * Ensure that we increment the head node->count before initialising in queued_spin_lock_mcs_queue()
555 node = &qnodesp->nodes[idx]; in queued_spin_lock_mcs_queue()
556 node->next = NULL; in queued_spin_lock_mcs_queue()
557 node->lock = lock; in queued_spin_lock_mcs_queue()
558 node->cpu = smp_processor_id(); in queued_spin_lock_mcs_queue()
559 node->sleepy = 0; in queued_spin_lock_mcs_queue()
560 node->locked = 0; in queued_spin_lock_mcs_queue()
562 tail = encode_tail_cpu(node->cpu); in queued_spin_lock_mcs_queue()
580 WRITE_ONCE(prev->next, node); in queued_spin_lock_mcs_queue()
584 while (!READ_ONCE(node->locked)) { in queued_spin_lock_mcs_queue()
597 * like it could cause additional line transitions because in queued_spin_lock_mcs_queue()
601 next = READ_ONCE(node->next); in queued_spin_lock_mcs_queue()
613 val = READ_ONCE(lock->val); in queued_spin_lock_mcs_queue()
670 /* There is a next, must wait for node->next != NULL (MCS protocol) */ in queued_spin_lock_mcs_queue()
671 next = READ_ONCE(node->next); in queued_spin_lock_mcs_queue()
674 while (!(next = READ_ONCE(node->next))) in queued_spin_lock_mcs_queue()
683 * the acquire barrier we took the lock with orders that update vs in queued_spin_lock_mcs_queue()
688 int next_cpu = next->cpu; in queued_spin_lock_mcs_queue()
689 WRITE_ONCE(next->locked, 1); in queued_spin_lock_mcs_queue()
695 WRITE_ONCE(next->locked, 1); in queued_spin_lock_mcs_queue()
703 * values if an interrupt occurs after we increment qnodesp->count in queued_spin_lock_mcs_queue()
704 * but before node->lock is initialized. The barrier ensures that in queued_spin_lock_mcs_queue()
707 node->lock = NULL; in queued_spin_lock_mcs_queue()
709 qnodesp->count--; in queued_spin_lock_mcs_queue()