xref: /linux/arch/arc/include/asm/spinlock.h (revision a518d63777a4e94e4b2dd86501604ec49ffe86b2)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11 
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
15 
16 #define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 #define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
18 
19 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
20 {
21 	smp_cond_load_acquire(&lock->slock, !VAL);
22 }
23 
24 #ifdef CONFIG_ARC_HAS_LLSC
25 
26 static inline void arch_spin_lock(arch_spinlock_t *lock)
27 {
28 	unsigned int val;
29 
30 	smp_mb();
31 
32 	__asm__ __volatile__(
33 	"1:	llock	%[val], [%[slock]]	\n"
34 	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
35 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
36 	"	bnz	1b			\n"
37 	"					\n"
38 	: [val]		"=&r"	(val)
39 	: [slock]	"r"	(&(lock->slock)),
40 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
41 	: "memory", "cc");
42 
43 	smp_mb();
44 }
45 
46 /* 1 - lock taken successfully */
47 static inline int arch_spin_trylock(arch_spinlock_t *lock)
48 {
49 	unsigned int val, got_it = 0;
50 
51 	smp_mb();
52 
53 	__asm__ __volatile__(
54 	"1:	llock	%[val], [%[slock]]	\n"
55 	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
56 	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
57 	"	bnz	1b			\n"
58 	"	mov	%[got_it], 1		\n"
59 	"4:					\n"
60 	"					\n"
61 	: [val]		"=&r"	(val),
62 	  [got_it]	"+&r"	(got_it)
63 	: [slock]	"r"	(&(lock->slock)),
64 	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
65 	: "memory", "cc");
66 
67 	smp_mb();
68 
69 	return got_it;
70 }
71 
72 static inline void arch_spin_unlock(arch_spinlock_t *lock)
73 {
74 	smp_mb();
75 
76 	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
77 
78 	smp_mb();
79 }
80 
81 /*
82  * Read-write spinlocks, allowing multiple readers but only one writer.
83  * Unfair locking as Writers could be starved indefinitely by Reader(s)
84  */
85 
86 static inline void arch_read_lock(arch_rwlock_t *rw)
87 {
88 	unsigned int val;
89 
90 	smp_mb();
91 
92 	/*
93 	 * zero means writer holds the lock exclusively, deny Reader.
94 	 * Otherwise grant lock to first/subseq reader
95 	 *
96 	 * 	if (rw->counter > 0) {
97 	 *		rw->counter--;
98 	 *		ret = 1;
99 	 *	}
100 	 */
101 
102 	__asm__ __volatile__(
103 	"1:	llock	%[val], [%[rwlock]]	\n"
104 	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
105 	"	sub	%[val], %[val], 1	\n"	/* reader lock */
106 	"	scond	%[val], [%[rwlock]]	\n"
107 	"	bnz	1b			\n"
108 	"					\n"
109 	: [val]		"=&r"	(val)
110 	: [rwlock]	"r"	(&(rw->counter)),
111 	  [WR_LOCKED]	"ir"	(0)
112 	: "memory", "cc");
113 
114 	smp_mb();
115 }
116 
117 /* 1 - lock taken successfully */
118 static inline int arch_read_trylock(arch_rwlock_t *rw)
119 {
120 	unsigned int val, got_it = 0;
121 
122 	smp_mb();
123 
124 	__asm__ __volatile__(
125 	"1:	llock	%[val], [%[rwlock]]	\n"
126 	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
127 	"	sub	%[val], %[val], 1	\n"	/* counter-- */
128 	"	scond	%[val], [%[rwlock]]	\n"
129 	"	bnz	1b			\n"	/* retry if collided with someone */
130 	"	mov	%[got_it], 1		\n"
131 	"					\n"
132 	"4: ; --- done ---			\n"
133 
134 	: [val]		"=&r"	(val),
135 	  [got_it]	"+&r"	(got_it)
136 	: [rwlock]	"r"	(&(rw->counter)),
137 	  [WR_LOCKED]	"ir"	(0)
138 	: "memory", "cc");
139 
140 	smp_mb();
141 
142 	return got_it;
143 }
144 
145 static inline void arch_write_lock(arch_rwlock_t *rw)
146 {
147 	unsigned int val;
148 
149 	smp_mb();
150 
151 	/*
152 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
153 	 * deny writer. Otherwise if unlocked grant to writer
154 	 * Hence the claim that Linux rwlocks are unfair to writers.
155 	 * (can be starved for an indefinite time by readers).
156 	 *
157 	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
158 	 *		rw->counter = 0;
159 	 *		ret = 1;
160 	 *	}
161 	 */
162 
163 	__asm__ __volatile__(
164 	"1:	llock	%[val], [%[rwlock]]	\n"
165 	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
166 	"	mov	%[val], %[WR_LOCKED]	\n"
167 	"	scond	%[val], [%[rwlock]]	\n"
168 	"	bnz	1b			\n"
169 	"					\n"
170 	: [val]		"=&r"	(val)
171 	: [rwlock]	"r"	(&(rw->counter)),
172 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
173 	  [WR_LOCKED]	"ir"	(0)
174 	: "memory", "cc");
175 
176 	smp_mb();
177 }
178 
179 /* 1 - lock taken successfully */
180 static inline int arch_write_trylock(arch_rwlock_t *rw)
181 {
182 	unsigned int val, got_it = 0;
183 
184 	smp_mb();
185 
186 	__asm__ __volatile__(
187 	"1:	llock	%[val], [%[rwlock]]	\n"
188 	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
189 	"	mov	%[val], %[WR_LOCKED]	\n"
190 	"	scond	%[val], [%[rwlock]]	\n"
191 	"	bnz	1b			\n"	/* retry if collided with someone */
192 	"	mov	%[got_it], 1		\n"
193 	"					\n"
194 	"4: ; --- done ---			\n"
195 
196 	: [val]		"=&r"	(val),
197 	  [got_it]	"+&r"	(got_it)
198 	: [rwlock]	"r"	(&(rw->counter)),
199 	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
200 	  [WR_LOCKED]	"ir"	(0)
201 	: "memory", "cc");
202 
203 	smp_mb();
204 
205 	return got_it;
206 }
207 
208 static inline void arch_read_unlock(arch_rwlock_t *rw)
209 {
210 	unsigned int val;
211 
212 	smp_mb();
213 
214 	/*
215 	 * rw->counter++;
216 	 */
217 	__asm__ __volatile__(
218 	"1:	llock	%[val], [%[rwlock]]	\n"
219 	"	add	%[val], %[val], 1	\n"
220 	"	scond	%[val], [%[rwlock]]	\n"
221 	"	bnz	1b			\n"
222 	"					\n"
223 	: [val]		"=&r"	(val)
224 	: [rwlock]	"r"	(&(rw->counter))
225 	: "memory", "cc");
226 
227 	smp_mb();
228 }
229 
230 static inline void arch_write_unlock(arch_rwlock_t *rw)
231 {
232 	smp_mb();
233 
234 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
235 
236 	smp_mb();
237 }
238 
239 #else	/* !CONFIG_ARC_HAS_LLSC */
240 
241 static inline void arch_spin_lock(arch_spinlock_t *lock)
242 {
243 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
244 
245 	/*
246 	 * This smp_mb() is technically superfluous, we only need the one
247 	 * after the lock for providing the ACQUIRE semantics.
248 	 * However doing the "right" thing was regressing hackbench
249 	 * so keeping this, pending further investigation
250 	 */
251 	smp_mb();
252 
253 	__asm__ __volatile__(
254 	"1:	ex  %0, [%1]		\n"
255 #ifdef CONFIG_EZNPS_MTM_EXT
256 	"	.word %3		\n"
257 #endif
258 	"	breq  %0, %2, 1b	\n"
259 	: "+&r" (val)
260 	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
261 #ifdef CONFIG_EZNPS_MTM_EXT
262 	, "i"(CTOP_INST_SCHD_RW)
263 #endif
264 	: "memory");
265 
266 	/*
267 	 * ACQUIRE barrier to ensure load/store after taking the lock
268 	 * don't "bleed-up" out of the critical section (leak-in is allowed)
269 	 * http://www.spinics.net/lists/kernel/msg2010409.html
270 	 *
271 	 * ARCv2 only has load-load, store-store and all-all barrier
272 	 * thus need the full all-all barrier
273 	 */
274 	smp_mb();
275 }
276 
277 /* 1 - lock taken successfully */
278 static inline int arch_spin_trylock(arch_spinlock_t *lock)
279 {
280 	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
281 
282 	smp_mb();
283 
284 	__asm__ __volatile__(
285 	"1:	ex  %0, [%1]		\n"
286 	: "+r" (val)
287 	: "r"(&(lock->slock))
288 	: "memory");
289 
290 	smp_mb();
291 
292 	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
293 }
294 
295 static inline void arch_spin_unlock(arch_spinlock_t *lock)
296 {
297 	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
298 
299 	/*
300 	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
301 	 * is the only option
302 	 */
303 	smp_mb();
304 
305 	/*
306 	 * EX is not really required here, a simple STore of 0 suffices.
307 	 * However this causes tasklist livelocks in SystemC based SMP virtual
308 	 * platforms where the systemc core scheduler uses EX as a cue for
309 	 * moving to next core. Do a git log of this file for details
310 	 */
311 	__asm__ __volatile__(
312 	"	ex  %0, [%1]		\n"
313 	: "+r" (val)
314 	: "r"(&(lock->slock))
315 	: "memory");
316 
317 	/*
318 	 * superfluous, but keeping for now - see pairing version in
319 	 * arch_spin_lock above
320 	 */
321 	smp_mb();
322 }
323 
324 /*
325  * Read-write spinlocks, allowing multiple readers but only one writer.
326  * Unfair locking as Writers could be starved indefinitely by Reader(s)
327  *
328  * The spinlock itself is contained in @counter and access to it is
329  * serialized with @lock_mutex.
330  */
331 
332 /* 1 - lock taken successfully */
333 static inline int arch_read_trylock(arch_rwlock_t *rw)
334 {
335 	int ret = 0;
336 	unsigned long flags;
337 
338 	local_irq_save(flags);
339 	arch_spin_lock(&(rw->lock_mutex));
340 
341 	/*
342 	 * zero means writer holds the lock exclusively, deny Reader.
343 	 * Otherwise grant lock to first/subseq reader
344 	 */
345 	if (rw->counter > 0) {
346 		rw->counter--;
347 		ret = 1;
348 	}
349 
350 	arch_spin_unlock(&(rw->lock_mutex));
351 	local_irq_restore(flags);
352 
353 	smp_mb();
354 	return ret;
355 }
356 
357 /* 1 - lock taken successfully */
358 static inline int arch_write_trylock(arch_rwlock_t *rw)
359 {
360 	int ret = 0;
361 	unsigned long flags;
362 
363 	local_irq_save(flags);
364 	arch_spin_lock(&(rw->lock_mutex));
365 
366 	/*
367 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
368 	 * deny writer. Otherwise if unlocked grant to writer
369 	 * Hence the claim that Linux rwlocks are unfair to writers.
370 	 * (can be starved for an indefinite time by readers).
371 	 */
372 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
373 		rw->counter = 0;
374 		ret = 1;
375 	}
376 	arch_spin_unlock(&(rw->lock_mutex));
377 	local_irq_restore(flags);
378 
379 	return ret;
380 }
381 
382 static inline void arch_read_lock(arch_rwlock_t *rw)
383 {
384 	while (!arch_read_trylock(rw))
385 		cpu_relax();
386 }
387 
388 static inline void arch_write_lock(arch_rwlock_t *rw)
389 {
390 	while (!arch_write_trylock(rw))
391 		cpu_relax();
392 }
393 
394 static inline void arch_read_unlock(arch_rwlock_t *rw)
395 {
396 	unsigned long flags;
397 
398 	local_irq_save(flags);
399 	arch_spin_lock(&(rw->lock_mutex));
400 	rw->counter++;
401 	arch_spin_unlock(&(rw->lock_mutex));
402 	local_irq_restore(flags);
403 }
404 
405 static inline void arch_write_unlock(arch_rwlock_t *rw)
406 {
407 	unsigned long flags;
408 
409 	local_irq_save(flags);
410 	arch_spin_lock(&(rw->lock_mutex));
411 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
412 	arch_spin_unlock(&(rw->lock_mutex));
413 	local_irq_restore(flags);
414 }
415 
416 #endif
417 
418 #define arch_read_can_lock(x)	((x)->counter > 0)
419 #define arch_write_can_lock(x)	((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
420 
421 #define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
422 #define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
423 
424 #define arch_spin_relax(lock)	cpu_relax()
425 #define arch_read_relax(lock)	cpu_relax()
426 #define arch_write_relax(lock)	cpu_relax()
427 
428 #endif /* __ASM_SPINLOCK_H */
429