xref: /linux/include/asm-generic/qspinlock.h (revision 7a309195d11cde854eb75559fbd6b48f9e518f25)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Queued spinlock
4  *
5  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
6  * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
7  *
8  * Authors: Waiman Long <waiman.long@hpe.com>
9  */
10 #ifndef __ASM_GENERIC_QSPINLOCK_H
11 #define __ASM_GENERIC_QSPINLOCK_H
12 
13 #include <asm-generic/qspinlock_types.h>
14 #include <linux/atomic.h>
15 
16 /**
17  * queued_spin_is_locked - is the spinlock locked?
18  * @lock: Pointer to queued spinlock structure
19  * Return: 1 if it is locked, 0 otherwise
20  */
21 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
22 {
23 	/*
24 	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
25 	 * isn't immediately observable.
26 	 */
27 	return atomic_read(&lock->val);
28 }
29 
30 /**
31  * queued_spin_value_unlocked - is the spinlock structure unlocked?
32  * @lock: queued spinlock structure
33  * Return: 1 if it is unlocked, 0 otherwise
34  *
35  * N.B. Whenever there are tasks waiting for the lock, it is considered
36  *      locked wrt the lockref code to avoid lock stealing by the lockref
37  *      code and change things underneath the lock. This also allows some
38  *      optimizations to be applied without conflict with lockref.
39  */
40 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
41 {
42 	return !atomic_read(&lock.val);
43 }
44 
45 /**
46  * queued_spin_is_contended - check if the lock is contended
47  * @lock : Pointer to queued spinlock structure
48  * Return: 1 if lock contended, 0 otherwise
49  */
50 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
51 {
52 	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
53 }
54 /**
55  * queued_spin_trylock - try to acquire the queued spinlock
56  * @lock : Pointer to queued spinlock structure
57  * Return: 1 if lock acquired, 0 if failed
58  */
59 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
60 {
61 	u32 val = atomic_read(&lock->val);
62 
63 	if (unlikely(val))
64 		return 0;
65 
66 	return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
67 }
68 
69 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
70 
71 /**
72  * queued_spin_lock - acquire a queued spinlock
73  * @lock: Pointer to queued spinlock structure
74  */
75 static __always_inline void queued_spin_lock(struct qspinlock *lock)
76 {
77 	u32 val = 0;
78 
79 	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
80 		return;
81 
82 	queued_spin_lock_slowpath(lock, val);
83 }
84 
85 #ifndef queued_spin_unlock
86 /**
87  * queued_spin_unlock - release a queued spinlock
88  * @lock : Pointer to queued spinlock structure
89  */
90 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
91 {
92 	/*
93 	 * unlock() needs release semantics:
94 	 */
95 	smp_store_release(&lock->locked, 0);
96 }
97 #endif
98 
99 #ifndef virt_spin_lock
100 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
101 {
102 	return false;
103 }
104 #endif
105 
106 /*
107  * Remapping spinlock architecture specific functions to the corresponding
108  * queued spinlock functions.
109  */
110 #define arch_spin_is_locked(l)		queued_spin_is_locked(l)
111 #define arch_spin_is_contended(l)	queued_spin_is_contended(l)
112 #define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
113 #define arch_spin_lock(l)		queued_spin_lock(l)
114 #define arch_spin_trylock(l)		queued_spin_trylock(l)
115 #define arch_spin_unlock(l)		queued_spin_unlock(l)
116 
117 #endif /* __ASM_GENERIC_QSPINLOCK_H */
118