Lines Matching full:owner
37 * The least significant 2 bits of the owner value has the following
46 * into the owner field. It is cleared after an unlock.
49 * pointer into the owner field with the RWSEM_READER_OWNED bit set.
50 * On unlock, the owner field will largely be left untouched. So
51 * for a free or reader-owned rwsem, the owner value may contain
71 …WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, l…
74 atomic_long_read(&(sem)->owner), (long)current, \
132 * All writes to owner are protected by WRITE_ONCE() to make sure that
134 * the owner value concurrently without lock. Read from owner, however,
144 atomic_long_set(&sem->owner, (long)current); in rwsem_set_owner()
150 atomic_long_set(&sem->owner, 0); in rwsem_clear_owner()
154 * Test the flags in the owner field.
158 return atomic_long_read(&sem->owner) & flags; in rwsem_test_oflags()
163 * the owner field.
165 * Note that the owner value just indicates the task has owned the rwsem
166 * previously, it may not be the real owner or one of the real owners
172 struct task_struct *owner) in __rwsem_set_reader_owned() argument
174 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED | in __rwsem_set_reader_owned()
175 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE); in __rwsem_set_reader_owned()
177 atomic_long_set(&sem->owner, val); in __rwsem_set_reader_owned()
187 * Return just the real task structure pointer of the owner
192 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK); in rwsem_owner()
212 * it will make sure that the owner field of a reader-owned rwsem either
213 * points to a real reader-owner(s) or gets cleared. The only exception is
218 unsigned long val = atomic_long_read(&sem->owner); in rwsem_clear_reader_owned()
221 if (atomic_long_try_cmpxchg(&sem->owner, &val, in rwsem_clear_reader_owned()
238 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_set_nonspinnable() local
241 if (!(owner & RWSEM_READER_OWNED)) in rwsem_set_nonspinnable()
243 if (owner & RWSEM_NONSPINNABLE) in rwsem_set_nonspinnable()
245 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner, in rwsem_set_nonspinnable()
246 owner | RWSEM_NONSPINNABLE)); in rwsem_set_nonspinnable()
277 * Return the real task structure pointer of the owner and the embedded
278 * flags in the owner. pflags must be non-NULL.
283 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_owner_flags() local
285 *pflags = owner & RWSEM_OWNER_FLAGS_MASK; in rwsem_owner_flags()
286 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK); in rwsem_owner_flags()
298 * (3) the owner field has RWSEM_READ_OWNED bit set.
325 atomic_long_set(&sem->owner, 0L); in __init_rwsem()
454 struct task_struct *owner; in rwsem_mark_wake() local
481 owner = waiter->task; in rwsem_mark_wake()
482 __rwsem_set_reader_owned(sem, owner); in rwsem_mark_wake()
669 * depending on the lock owner state.
670 * OWNER_NULL : owner is currently NULL
671 * OWNER_WRITER: when owner changes and is a writer
672 * OWNER_READER: when owner changes and the new owner may be a reader.
675 * owner stops running, is unknown, or its timeslice has
706 struct task_struct *owner; in rwsem_can_spin_on_owner() local
719 owner = rwsem_owner_flags(sem, &flags); in rwsem_can_spin_on_owner()
721 * Don't check the read-owner as the entry may be stale. in rwsem_can_spin_on_owner()
724 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner))) in rwsem_can_spin_on_owner()
732 rwsem_owner_state(struct task_struct *owner, unsigned long flags) in rwsem_owner_state() argument
740 return owner ? OWNER_WRITER : OWNER_NULL; in rwsem_owner_state()
746 struct task_struct *new, *owner; in rwsem_spin_on_owner() local
752 owner = rwsem_owner_flags(sem, &flags); in rwsem_spin_on_owner()
753 state = rwsem_owner_state(owner, flags); in rwsem_spin_on_owner()
760 * on the owner as well. Once that writer acquires the lock, in rwsem_spin_on_owner()
765 if ((new != owner) || (new_flags != flags)) { in rwsem_spin_on_owner()
771 * Ensure we emit the owner->on_cpu, dereference _after_ in rwsem_spin_on_owner()
772 * checking sem->owner still matches owner, if that fails, in rwsem_spin_on_owner()
773 * owner might point to free()d memory, if it still matches, in rwsem_spin_on_owner()
780 if (need_resched() || !owner_on_cpu(owner)) { in rwsem_spin_on_owner()
828 * Optimistically spin on the owner field and attempt to acquire the in rwsem_optimistic_spin()
829 * lock whenever the owner changes. Spinning will be stopped when: in rwsem_optimistic_spin()
854 * the owner state changes from non-reader to reader. in rwsem_optimistic_spin()
885 * spinning while a NULL owner is detected may miss some in rwsem_optimistic_spin()
892 * 1) The lock owner is in the process of releasing the in rwsem_optimistic_spin()
893 * lock, sem->owner is cleared but the lock has not in rwsem_optimistic_spin()
895 * 2) The lock was free and owner cleared, but another in rwsem_optimistic_spin()
897 * we try to get it. The new owner may be a spinnable in rwsem_optimistic_spin()
904 * new owner is not a writer or spinnable, the RT task will in rwsem_optimistic_spin()
907 * If the owner is a writer, the need_resched() check is in rwsem_optimistic_spin()
908 * done inside rwsem_spin_on_owner(). If the owner is not in rwsem_optimistic_spin()
935 * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
941 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner); in clear_nonspinnable()
966 * given wake_q if the rwsem lock owner isn't a writer. If rwsem is likely
1005 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) && in rwsem_down_read_slowpath()
1171 * the lock, attempt to spin on owner to accelerate lock in rwsem_down_write_slowpath()
1172 * transfer. If the previous owner is a on-cpu writer and it in rwsem_down_write_slowpath()
1377 * sem->owner may differ from current if the ownership is transferred in __up_write()
1518 struct task_struct *owner) in __rwsem_set_reader_owned() argument
1694 * The owner value for a reader-owned lock is mostly for debugging in down_read_non_owner()