Lines Matching refs:val
105 static inline int decode_tail_cpu(u32 val) in decode_tail_cpu() argument
107 return (val >> _Q_TAIL_CPU_OFFSET) - 1; in decode_tail_cpu()
110 static inline int get_owner_cpu(u32 val) in get_owner_cpu() argument
112 return (val & _Q_OWNER_CPU_MASK) >> _Q_OWNER_CPU_OFFSET; in get_owner_cpu()
145 : "r" (&lock->val), "r"(tail), "r" (newval), in trylock_clean_tail()
175 : "r" (&lock->val), "r" (tail), "r"(_Q_TAIL_CPU_MASK) in publish_tail_cpu()
191 : "r" (&lock->val), "r" (_Q_MUST_Q_VAL) in set_mustq()
207 : "r" (&lock->val), "r" (_Q_MUST_Q_VAL) in clear_mustq()
229 : "r" (&lock->val), "r"(old), "r" (new) in try_set_sleepy()
235 static __always_inline void seen_sleepy_owner(struct qspinlock *lock, u32 val) in seen_sleepy_owner() argument
240 if (!(val & _Q_SLEEPY_VAL)) in seen_sleepy_owner()
241 try_set_sleepy(lock, val); in seen_sleepy_owner()
284 static __always_inline bool __yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt,… in __yield_to_locked_owner() argument
290 BUG_ON(!(val & _Q_LOCKED_VAL)); in __yield_to_locked_owner()
298 owner = get_owner_cpu(val); in __yield_to_locked_owner()
306 seen_sleepy_owner(lock, val); in __yield_to_locked_owner()
318 if (READ_ONCE(lock->val) == val) { in __yield_to_locked_owner()
337 static __always_inline bool yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt) in yield_to_locked_owner() argument
339 return __yield_to_locked_owner(lock, val, paravirt, false); in yield_to_locked_owner()
343 static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u32 val, bool paravi… in yield_head_to_locked_owner() argument
347 if ((val & _Q_MUST_Q_VAL) && pv_yield_allow_steal) in yield_head_to_locked_owner()
350 return __yield_to_locked_owner(lock, val, paravirt, mustq); in yield_head_to_locked_owner()
353 static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool paravirt) in propagate_sleepy() argument
370 owner = get_owner_cpu(val); in propagate_sleepy()
392 u32 val = READ_ONCE(lock->val); in yield_to_prev() local
394 if (val & _Q_LOCKED_VAL) { in yield_to_prev()
403 if (vcpu_is_preempted(get_owner_cpu(val))) in yield_to_prev()
407 preempted = yield_to_locked_owner(lock, val, paravirt); in yield_to_prev()
442 static __always_inline bool steal_break(u32 val, int iters, bool paravirt, bool sleepy) in steal_break() argument
449 int cpu = get_owner_cpu(val); in steal_break()
461 u32 val; in try_to_steal_lock() local
473 val = READ_ONCE(lock->val); in try_to_steal_lock()
474 if (val & _Q_MUST_Q_VAL) in try_to_steal_lock()
478 if (unlikely(!(val & _Q_LOCKED_VAL))) { in try_to_steal_lock()
484 preempted = yield_to_locked_owner(lock, val, paravirt); in try_to_steal_lock()
489 if (val & _Q_SLEEPY_VAL) { in try_to_steal_lock()
497 !(val & _Q_SLEEPY_VAL)) { in try_to_steal_lock()
498 if (try_set_sleepy(lock, val)) in try_to_steal_lock()
499 val |= _Q_SLEEPY_VAL; in try_to_steal_lock()
519 } while (!steal_break(val, iters, paravirt, sleepy)); in try_to_steal_lock()
530 u32 val, old, tail; in queued_spin_lock_mcs_queue() local
612 val = READ_ONCE(lock->val); in queued_spin_lock_mcs_queue()
613 if (!(val & _Q_LOCKED_VAL)) in queued_spin_lock_mcs_queue()
619 if (val & _Q_SLEEPY_VAL) { in queued_spin_lock_mcs_queue()
627 !(val & _Q_SLEEPY_VAL)) { in queued_spin_lock_mcs_queue()
628 if (try_set_sleepy(lock, val)) in queued_spin_lock_mcs_queue()
629 val |= _Q_SLEEPY_VAL; in queued_spin_lock_mcs_queue()
633 propagate_sleepy(node, val, paravirt); in queued_spin_lock_mcs_queue()
634 preempted = yield_head_to_locked_owner(lock, val, paravirt); in queued_spin_lock_mcs_queue()
653 val |= _Q_MUST_Q_VAL; in queued_spin_lock_mcs_queue()
741 static int steal_spins_set(void *data, u64 val) in steal_spins_set() argument
745 steal_spins = val; in steal_spins_set()
757 if (val && !steal_spins) { in steal_spins_set()
761 steal_spins = val; in steal_spins_set()
762 } else if (!val && steal_spins) { in steal_spins_set()
763 steal_spins = val; in steal_spins_set()
768 steal_spins = val; in steal_spins_set()
776 static int steal_spins_get(void *data, u64 *val) in steal_spins_get() argument
778 *val = steal_spins; in steal_spins_get()
785 static int remote_steal_spins_set(void *data, u64 val) in remote_steal_spins_set() argument
787 remote_steal_spins = val; in remote_steal_spins_set()
792 static int remote_steal_spins_get(void *data, u64 *val) in remote_steal_spins_get() argument
794 *val = remote_steal_spins; in remote_steal_spins_get()
801 static int head_spins_set(void *data, u64 val) in head_spins_set() argument
803 head_spins = val; in head_spins_set()
808 static int head_spins_get(void *data, u64 *val) in head_spins_get() argument
810 *val = head_spins; in head_spins_get()
817 static int pv_yield_owner_set(void *data, u64 val) in pv_yield_owner_set() argument
819 pv_yield_owner = !!val; in pv_yield_owner_set()
824 static int pv_yield_owner_get(void *data, u64 *val) in pv_yield_owner_get() argument
826 *val = pv_yield_owner; in pv_yield_owner_get()
833 static int pv_yield_allow_steal_set(void *data, u64 val) in pv_yield_allow_steal_set() argument
835 pv_yield_allow_steal = !!val; in pv_yield_allow_steal_set()
840 static int pv_yield_allow_steal_get(void *data, u64 *val) in pv_yield_allow_steal_get() argument
842 *val = pv_yield_allow_steal; in pv_yield_allow_steal_get()
849 static int pv_spin_on_preempted_owner_set(void *data, u64 val) in pv_spin_on_preempted_owner_set() argument
851 pv_spin_on_preempted_owner = !!val; in pv_spin_on_preempted_owner_set()
856 static int pv_spin_on_preempted_owner_get(void *data, u64 *val) in pv_spin_on_preempted_owner_get() argument
858 *val = pv_spin_on_preempted_owner; in pv_spin_on_preempted_owner_get()
865 static int pv_sleepy_lock_set(void *data, u64 val) in pv_sleepy_lock_set() argument
867 pv_sleepy_lock = !!val; in pv_sleepy_lock_set()
872 static int pv_sleepy_lock_get(void *data, u64 *val) in pv_sleepy_lock_get() argument
874 *val = pv_sleepy_lock; in pv_sleepy_lock_get()
881 static int pv_sleepy_lock_sticky_set(void *data, u64 val) in pv_sleepy_lock_sticky_set() argument
883 pv_sleepy_lock_sticky = !!val; in pv_sleepy_lock_sticky_set()
888 static int pv_sleepy_lock_sticky_get(void *data, u64 *val) in pv_sleepy_lock_sticky_get() argument
890 *val = pv_sleepy_lock_sticky; in pv_sleepy_lock_sticky_get()
897 static int pv_sleepy_lock_interval_ns_set(void *data, u64 val) in pv_sleepy_lock_interval_ns_set() argument
899 pv_sleepy_lock_interval_ns = val; in pv_sleepy_lock_interval_ns_set()
904 static int pv_sleepy_lock_interval_ns_get(void *data, u64 *val) in pv_sleepy_lock_interval_ns_get() argument
906 *val = pv_sleepy_lock_interval_ns; in pv_sleepy_lock_interval_ns_get()
913 static int pv_sleepy_lock_factor_set(void *data, u64 val) in pv_sleepy_lock_factor_set() argument
915 pv_sleepy_lock_factor = val; in pv_sleepy_lock_factor_set()
920 static int pv_sleepy_lock_factor_get(void *data, u64 *val) in pv_sleepy_lock_factor_get() argument
922 *val = pv_sleepy_lock_factor; in pv_sleepy_lock_factor_get()
929 static int pv_yield_prev_set(void *data, u64 val) in pv_yield_prev_set() argument
931 pv_yield_prev = !!val; in pv_yield_prev_set()
936 static int pv_yield_prev_get(void *data, u64 *val) in pv_yield_prev_get() argument
938 *val = pv_yield_prev; in pv_yield_prev_get()
945 static int pv_yield_sleepy_owner_set(void *data, u64 val) in pv_yield_sleepy_owner_set() argument
947 pv_yield_sleepy_owner = !!val; in pv_yield_sleepy_owner_set()
952 static int pv_yield_sleepy_owner_get(void *data, u64 *val) in pv_yield_sleepy_owner_get() argument
954 *val = pv_yield_sleepy_owner; in pv_yield_sleepy_owner_get()
961 static int pv_prod_head_set(void *data, u64 val) in pv_prod_head_set() argument
963 pv_prod_head = !!val; in pv_prod_head_set()
968 static int pv_prod_head_get(void *data, u64 *val) in pv_prod_head_get() argument
970 *val = pv_prod_head; in pv_prod_head_get()