Lines Matching full:owner

49 	atomic_long_set(&lock->owner, 0);  in __mutex_init()
60 static inline struct task_struct *__owner_task(unsigned long owner) in __owner_task() argument
62 return (struct task_struct *)(owner & ~MUTEX_FLAGS); in __owner_task()
71 static inline unsigned long __owner_flags(unsigned long owner) in __owner_flags() argument
73 return owner & MUTEX_FLAGS; in __owner_flags()
79 unsigned long owner = atomic_long_read(&lock->owner); in mutex_get_owner() local
81 return (unsigned long)__owner_task(owner); in mutex_get_owner()
89 unsigned long owner, curr = (unsigned long)current; in __mutex_trylock_common() local
91 owner = atomic_long_read(&lock->owner); in __mutex_trylock_common()
93 unsigned long flags = __owner_flags(owner); in __mutex_trylock_common()
94 unsigned long task = owner & ~MUTEX_FLAGS; in __mutex_trylock_common()
113 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { in __mutex_trylock_common()
120 return __owner_task(owner); in __mutex_trylock_common()
157 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) in __mutex_trylock_fast()
167 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); in __mutex_unlock_fast()
173 atomic_long_or(flag, &lock->owner); in __mutex_set_flag()
178 atomic_long_andnot(flag, &lock->owner); in __mutex_clear_flag()
221 unsigned long owner = atomic_long_read(&lock->owner); in __mutex_handoff() local
226 MUTEX_WARN_ON(__owner_task(owner) != current); in __mutex_handoff()
227 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); in __mutex_handoff()
229 new = (owner & MUTEX_FLAG_WAITERS); in __mutex_handoff()
234 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) in __mutex_handoff()
320 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) in ww_mutex_spin_on_owner()
334 * Look out! "owner" is an entirely speculative pointer access and not
340 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, in mutex_spin_on_owner() argument
347 while (__mutex_owner(lock) == owner) { in mutex_spin_on_owner()
349 * Ensure we emit the owner->on_cpu, dereference _after_ in mutex_spin_on_owner()
350 * checking lock->owner still matches owner. And we already in mutex_spin_on_owner()
361 if (!owner_on_cpu(owner) || need_resched()) { in mutex_spin_on_owner()
382 struct task_struct *owner; in mutex_can_spin_on_owner() local
395 owner = __mutex_owner(lock); in mutex_can_spin_on_owner()
396 if (owner) in mutex_can_spin_on_owner()
397 retval = owner_on_cpu(owner); in mutex_can_spin_on_owner()
400 * If lock->owner is not set, the mutex has been released. Return true in mutex_can_spin_on_owner()
410 * We try to spin for acquisition when we find that the lock owner
412 * need to reschedule. The rationale is that if the lock owner is
425 * with the spinner at the head of the OSQ, if present, until the owner is
446 * MCS (queued) lock first before spinning on the owner field. in mutex_optimistic_spin()
453 struct task_struct *owner; in mutex_optimistic_spin() local
456 owner = __mutex_trylock_or_owner(lock); in mutex_optimistic_spin()
457 if (!owner) in mutex_optimistic_spin()
461 * There's an owner, wait for it to either in mutex_optimistic_spin()
464 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) in mutex_optimistic_spin()
586 * race and wound us here since they can't have a valid owner in __mutex_lock_common()
792 * race and wound us here, since they can't have a valid owner in ww_mutex_trylock()
919 unsigned long owner; in __mutex_unlock_slowpath() local
928 * Except when HANDOFF, in that case we must not clear the owner field, in __mutex_unlock_slowpath()
931 owner = atomic_long_read(&lock->owner); in __mutex_unlock_slowpath()
933 MUTEX_WARN_ON(__owner_task(owner) != current); in __mutex_unlock_slowpath()
934 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); in __mutex_unlock_slowpath()
936 if (owner & MUTEX_FLAG_HANDOFF) in __mutex_unlock_slowpath()
939 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) { in __mutex_unlock_slowpath()
940 if (owner & MUTEX_FLAG_WAITERS) in __mutex_unlock_slowpath()
962 if (owner & MUTEX_FLAG_HANDOFF) in __mutex_unlock_slowpath()