xref: /linux/include/asm-generic/qspinlock.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Queued spinlock
4  *
5  * A 'generic' spinlock implementation that is based on MCS locks. For an
6  * architecture that's looking for a 'generic' spinlock, please first consider
7  * ticket-lock.h and only come looking here when you've considered all the
8  * constraints below and can show your hardware does actually perform better
9  * with qspinlock.
10  *
11  * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
12  * weaker than RCtso if you're power), where regular code only expects atomic_t
13  * to be RCpc.
14  *
15  * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
16  * of atomic operations to behave well together, please audit them carefully to
17  * ensure they all have forward progress. Many atomic operations may default to
18  * cmpxchg() loops which will not have good forward progress properties on
19  * LL/SC architectures.
20  *
21  * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
22  * do. Carefully read the patches that introduced
23  * queued_fetch_set_pending_acquire().
24  *
25  * qspinlock also heavily relies on mixed size atomic operations, in specific
26  * it requires architectures to have xchg16; something which many LL/SC
27  * architectures need to implement as a 32bit and+or in order to satisfy the
28  * forward progress guarantees mentioned above.
29  *
30  * Further reading on mixed size atomics that might be relevant:
31  *
32  *   http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
33  *
34  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
35  * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
36  *
37  * Authors: Waiman Long <waiman.long@hpe.com>
38  */
39 #ifndef __ASM_GENERIC_QSPINLOCK_H
40 #define __ASM_GENERIC_QSPINLOCK_H
41 
42 #include <asm-generic/qspinlock_types.h>
43 #include <linux/atomic.h>
44 
45 #ifndef queued_spin_is_locked
46 /**
47  * queued_spin_is_locked - is the spinlock locked?
48  * @lock: Pointer to queued spinlock structure
49  * Return: 1 if it is locked, 0 otherwise
50  */
51 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
52 {
53 	/*
54 	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
55 	 * isn't immediately observable.
56 	 */
57 	return atomic_read(&lock->val);
58 }
59 #endif
60 
61 /**
62  * queued_spin_value_unlocked - is the spinlock structure unlocked?
63  * @lock: queued spinlock structure
64  * Return: 1 if it is unlocked, 0 otherwise
65  *
66  * N.B. Whenever there are tasks waiting for the lock, it is considered
67  *      locked wrt the lockref code to avoid lock stealing by the lockref
68  *      code and change things underneath the lock. This also allows some
69  *      optimizations to be applied without conflict with lockref.
70  */
71 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
72 {
73 	return !lock.val.counter;
74 }
75 
76 /**
77  * queued_spin_is_contended - check if the lock is contended
78  * @lock : Pointer to queued spinlock structure
79  * Return: 1 if lock contended, 0 otherwise
80  */
81 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
82 {
83 	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
84 }
85 /**
86  * queued_spin_trylock - try to acquire the queued spinlock
87  * @lock : Pointer to queued spinlock structure
88  * Return: 1 if lock acquired, 0 if failed
89  */
90 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
91 {
92 	int val = atomic_read(&lock->val);
93 
94 	if (unlikely(val))
95 		return 0;
96 
97 	return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
98 }
99 
100 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
101 
102 #ifndef queued_spin_lock
103 /**
104  * queued_spin_lock - acquire a queued spinlock
105  * @lock: Pointer to queued spinlock structure
106  */
107 static __always_inline void queued_spin_lock(struct qspinlock *lock)
108 {
109 	int val = 0;
110 
111 	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
112 		return;
113 
114 	queued_spin_lock_slowpath(lock, val);
115 }
116 #endif
117 
118 #ifndef queued_spin_unlock
119 /**
120  * queued_spin_unlock - release a queued spinlock
121  * @lock : Pointer to queued spinlock structure
122  */
123 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
124 {
125 	/*
126 	 * unlock() needs release semantics:
127 	 */
128 	smp_store_release(&lock->locked, 0);
129 }
130 #endif
131 
132 #ifndef virt_spin_lock
133 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
134 {
135 	return false;
136 }
137 #endif
138 
139 /*
140  * Remapping spinlock architecture specific functions to the corresponding
141  * queued spinlock functions.
142  */
143 #define arch_spin_is_locked(l)		queued_spin_is_locked(l)
144 #define arch_spin_is_contended(l)	queued_spin_is_contended(l)
145 #define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
146 #define arch_spin_lock(l)		queued_spin_lock(l)
147 #define arch_spin_trylock(l)		queued_spin_trylock(l)
148 #define arch_spin_unlock(l)		queued_spin_unlock(l)
149 
150 #endif /* __ASM_GENERIC_QSPINLOCK_H */
151