Lines Matching refs:paravirt

77 static __always_inline int get_steal_spins(bool paravirt, bool sleepy)  in get_steal_spins()  argument
79 if (paravirt && sleepy) in get_steal_spins()
85 static __always_inline int get_remote_steal_spins(bool paravirt, bool sleepy) in get_remote_steal_spins() argument
87 if (paravirt && sleepy) in get_remote_steal_spins()
93 static __always_inline int get_head_spins(bool paravirt, bool sleepy) in get_head_spins() argument
95 if (paravirt && sleepy) in get_head_spins()
285 static __always_inline bool __yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt,… in __yield_to_locked_owner() argument
293 if (!paravirt) in __yield_to_locked_owner()
338 static __always_inline bool yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt) in yield_to_locked_owner() argument
340 return __yield_to_locked_owner(lock, val, paravirt, false); in yield_to_locked_owner()
344 …tic __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt) in yield_head_to_locked_owner() argument
351 return __yield_to_locked_owner(lock, val, paravirt, mustq); in yield_head_to_locked_owner()
354 static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool paravirt) in propagate_sleepy() argument
359 if (!paravirt) in propagate_sleepy()
377 …_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, int prev_cpu, bool paravirt) in yield_to_prev() argument
382 if (!paravirt) in yield_to_prev()
408 preempted = yield_to_locked_owner(lock, val, paravirt); in yield_to_prev()
443 static __always_inline bool steal_break(u32 val, int iters, bool paravirt, bool sleepy) in steal_break() argument
445 if (iters >= get_steal_spins(paravirt, sleepy)) in steal_break()
449 (iters >= get_remote_steal_spins(paravirt, sleepy))) { in steal_break()
457 static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool paravirt) in try_to_steal_lock() argument
485 preempted = yield_to_locked_owner(lock, val, paravirt); in try_to_steal_lock()
488 if (paravirt && pv_sleepy_lock) { in try_to_steal_lock()
520 } while (!steal_break(val, iters, paravirt, sleepy)); in try_to_steal_lock()
527 static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt) in queued_spin_lock_mcs_queue() argument
587 if (yield_to_prev(lock, node, prev_cpu, paravirt)) in queued_spin_lock_mcs_queue()
618 if (paravirt && pv_sleepy_lock && maybe_stealers) { in queued_spin_lock_mcs_queue()
634 propagate_sleepy(node, val, paravirt); in queued_spin_lock_mcs_queue()
635 preempted = yield_head_to_locked_owner(lock, val, paravirt); in queued_spin_lock_mcs_queue()
642 if (paravirt && preempted) { in queued_spin_lock_mcs_queue()
651 if (!mustq && iters >= get_head_spins(paravirt, sleepy)) { in queued_spin_lock_mcs_queue()
687 if (paravirt && pv_prod_head) { in queued_spin_lock_mcs_queue()