xref: /linux/arch/arm/include/asm/spinlock.h (revision ff5599816711d2e67da2d7561fd36ac48debd433)
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3 
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
6 #endif
7 
8 #include <asm/processor.h>
9 
10 /*
11  * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
12  * extensions, so when running on UP, we have to patch these instructions away.
13  */
14 #define ALT_SMP(smp, up)					\
15 	"9998:	" smp "\n"					\
16 	"	.pushsection \".alt.smp.init\", \"a\"\n"	\
17 	"	.long	9998b\n"				\
18 	"	" up "\n"					\
19 	"	.popsection\n"
20 
21 #ifdef CONFIG_THUMB2_KERNEL
22 #define SEV		ALT_SMP("sev.w", "nop.w")
23 /*
24  * For Thumb-2, special care is needed to ensure that the conditional WFE
25  * instruction really does assemble to exactly 4 bytes (as required by
26  * the SMP_ON_UP fixup code).   By itself "wfene" might cause the
27  * assembler to insert a extra (16-bit) IT instruction, depending on the
28  * presence or absence of neighbouring conditional instructions.
29  *
30  * To avoid this unpredictableness, an approprite IT is inserted explicitly:
31  * the assembler won't change IT instructions which are explicitly present
32  * in the input.
33  */
34 #define WFE(cond)	ALT_SMP(		\
35 	"it " cond "\n\t"			\
36 	"wfe" cond ".n",			\
37 						\
38 	"nop.w"					\
39 )
40 #else
41 #define SEV		ALT_SMP("sev", "nop")
42 #define WFE(cond)	ALT_SMP("wfe" cond, "nop")
43 #endif
44 
45 static inline void dsb_sev(void)
46 {
47 #if __LINUX_ARM_ARCH__ >= 7
48 	__asm__ __volatile__ (
49 		"dsb\n"
50 		SEV
51 	);
52 #else
53 	__asm__ __volatile__ (
54 		"mcr p15, 0, %0, c7, c10, 4\n"
55 		SEV
56 		: : "r" (0)
57 	);
58 #endif
59 }
60 
61 /*
62  * ARMv6 ticket-based spin-locking.
63  *
64  * A memory barrier is required after we get a lock, and before we
65  * release it, because V6 CPUs are assumed to have weakly ordered
66  * memory.
67  */
68 
69 #define arch_spin_unlock_wait(lock) \
70 	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
71 
72 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
73 
74 static inline void arch_spin_lock(arch_spinlock_t *lock)
75 {
76 	unsigned long tmp;
77 	u32 newval;
78 	arch_spinlock_t lockval;
79 
80 	__asm__ __volatile__(
81 "1:	ldrex	%0, [%3]\n"
82 "	add	%1, %0, %4\n"
83 "	strex	%2, %1, [%3]\n"
84 "	teq	%2, #0\n"
85 "	bne	1b"
86 	: "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
87 	: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
88 	: "cc");
89 
90 	while (lockval.tickets.next != lockval.tickets.owner) {
91 		wfe();
92 		lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
93 	}
94 
95 	smp_mb();
96 }
97 
98 static inline int arch_spin_trylock(arch_spinlock_t *lock)
99 {
100 	unsigned long contended, res;
101 	u32 slock;
102 
103 	do {
104 		__asm__ __volatile__(
105 		"	ldrex	%0, [%3]\n"
106 		"	mov	%2, #0\n"
107 		"	subs	%1, %0, %0, ror #16\n"
108 		"	addeq	%0, %0, %4\n"
109 		"	strexeq	%2, %0, [%3]"
110 		: "=&r" (slock), "=&r" (contended), "=r" (res)
111 		: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
112 		: "cc");
113 	} while (res);
114 
115 	if (!contended) {
116 		smp_mb();
117 		return 1;
118 	} else {
119 		return 0;
120 	}
121 }
122 
123 static inline void arch_spin_unlock(arch_spinlock_t *lock)
124 {
125 	smp_mb();
126 	lock->tickets.owner++;
127 	dsb_sev();
128 }
129 
130 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
131 {
132 	struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
133 	return tickets.owner != tickets.next;
134 }
135 
136 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
137 {
138 	struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
139 	return (tickets.next - tickets.owner) > 1;
140 }
141 #define arch_spin_is_contended	arch_spin_is_contended
142 
143 /*
144  * RWLOCKS
145  *
146  *
147  * Write locks are easy - we just set bit 31.  When unlocking, we can
148  * just write zero since the lock is exclusively held.
149  */
150 
151 static inline void arch_write_lock(arch_rwlock_t *rw)
152 {
153 	unsigned long tmp;
154 
155 	__asm__ __volatile__(
156 "1:	ldrex	%0, [%1]\n"
157 "	teq	%0, #0\n"
158 	WFE("ne")
159 "	strexeq	%0, %2, [%1]\n"
160 "	teq	%0, #0\n"
161 "	bne	1b"
162 	: "=&r" (tmp)
163 	: "r" (&rw->lock), "r" (0x80000000)
164 	: "cc");
165 
166 	smp_mb();
167 }
168 
169 static inline int arch_write_trylock(arch_rwlock_t *rw)
170 {
171 	unsigned long tmp;
172 
173 	__asm__ __volatile__(
174 "	ldrex	%0, [%1]\n"
175 "	teq	%0, #0\n"
176 "	strexeq	%0, %2, [%1]"
177 	: "=&r" (tmp)
178 	: "r" (&rw->lock), "r" (0x80000000)
179 	: "cc");
180 
181 	if (tmp == 0) {
182 		smp_mb();
183 		return 1;
184 	} else {
185 		return 0;
186 	}
187 }
188 
189 static inline void arch_write_unlock(arch_rwlock_t *rw)
190 {
191 	smp_mb();
192 
193 	__asm__ __volatile__(
194 	"str	%1, [%0]\n"
195 	:
196 	: "r" (&rw->lock), "r" (0)
197 	: "cc");
198 
199 	dsb_sev();
200 }
201 
202 /* write_can_lock - would write_trylock() succeed? */
203 #define arch_write_can_lock(x)		((x)->lock == 0)
204 
205 /*
206  * Read locks are a bit more hairy:
207  *  - Exclusively load the lock value.
208  *  - Increment it.
209  *  - Store new lock value if positive, and we still own this location.
210  *    If the value is negative, we've already failed.
211  *  - If we failed to store the value, we want a negative result.
212  *  - If we failed, try again.
213  * Unlocking is similarly hairy.  We may have multiple read locks
214  * currently active.  However, we know we won't have any write
215  * locks.
216  */
217 static inline void arch_read_lock(arch_rwlock_t *rw)
218 {
219 	unsigned long tmp, tmp2;
220 
221 	__asm__ __volatile__(
222 "1:	ldrex	%0, [%2]\n"
223 "	adds	%0, %0, #1\n"
224 "	strexpl	%1, %0, [%2]\n"
225 	WFE("mi")
226 "	rsbpls	%0, %1, #0\n"
227 "	bmi	1b"
228 	: "=&r" (tmp), "=&r" (tmp2)
229 	: "r" (&rw->lock)
230 	: "cc");
231 
232 	smp_mb();
233 }
234 
235 static inline void arch_read_unlock(arch_rwlock_t *rw)
236 {
237 	unsigned long tmp, tmp2;
238 
239 	smp_mb();
240 
241 	__asm__ __volatile__(
242 "1:	ldrex	%0, [%2]\n"
243 "	sub	%0, %0, #1\n"
244 "	strex	%1, %0, [%2]\n"
245 "	teq	%1, #0\n"
246 "	bne	1b"
247 	: "=&r" (tmp), "=&r" (tmp2)
248 	: "r" (&rw->lock)
249 	: "cc");
250 
251 	if (tmp == 0)
252 		dsb_sev();
253 }
254 
255 static inline int arch_read_trylock(arch_rwlock_t *rw)
256 {
257 	unsigned long tmp, tmp2 = 1;
258 
259 	__asm__ __volatile__(
260 "	ldrex	%0, [%2]\n"
261 "	adds	%0, %0, #1\n"
262 "	strexpl	%1, %0, [%2]\n"
263 	: "=&r" (tmp), "+r" (tmp2)
264 	: "r" (&rw->lock)
265 	: "cc");
266 
267 	smp_mb();
268 	return tmp2 == 0;
269 }
270 
271 /* read_can_lock - would read_trylock() succeed? */
272 #define arch_read_can_lock(x)		((x)->lock < 0x80000000)
273 
274 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
275 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
276 
277 #define arch_spin_relax(lock)	cpu_relax()
278 #define arch_read_relax(lock)	cpu_relax()
279 #define arch_write_relax(lock)	cpu_relax()
280 
281 #endif /* __ASM_SPINLOCK_H */
282