xref: /linux/arch/arc/include/asm/spinlock.h (revision 2576c28e3f623ed401db7e6197241865328620ef)
16e35fa2dSVineet Gupta /*
26e35fa2dSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
36e35fa2dSVineet Gupta  *
46e35fa2dSVineet Gupta  * This program is free software; you can redistribute it and/or modify
56e35fa2dSVineet Gupta  * it under the terms of the GNU General Public License version 2 as
66e35fa2dSVineet Gupta  * published by the Free Software Foundation.
76e35fa2dSVineet Gupta  */
86e35fa2dSVineet Gupta 
96e35fa2dSVineet Gupta #ifndef __ASM_SPINLOCK_H
106e35fa2dSVineet Gupta #define __ASM_SPINLOCK_H
116e35fa2dSVineet Gupta 
126e35fa2dSVineet Gupta #include <asm/spinlock_types.h>
136e35fa2dSVineet Gupta #include <asm/processor.h>
146e35fa2dSVineet Gupta #include <asm/barrier.h>
156e35fa2dSVineet Gupta 
166e35fa2dSVineet Gupta #define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
176e35fa2dSVineet Gupta #define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
186e35fa2dSVineet Gupta #define arch_spin_unlock_wait(x) \
196e35fa2dSVineet Gupta 	do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
206e35fa2dSVineet Gupta 
216e35fa2dSVineet Gupta static inline void arch_spin_lock(arch_spinlock_t *lock)
226e35fa2dSVineet Gupta {
236e35fa2dSVineet Gupta 	unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
246e35fa2dSVineet Gupta 
25*2576c28eSVineet Gupta 	/*
26*2576c28eSVineet Gupta 	 * This smp_mb() is technically superfluous, we only need the one
27*2576c28eSVineet Gupta 	 * after the lock for providing the ACQUIRE semantics.
28*2576c28eSVineet Gupta 	 * However doing the "right" thing was regressing hackbench
29*2576c28eSVineet Gupta 	 * so keeping this, pending further investigation
30*2576c28eSVineet Gupta 	 */
31*2576c28eSVineet Gupta 	smp_mb();
32*2576c28eSVineet Gupta 
336e35fa2dSVineet Gupta 	__asm__ __volatile__(
346e35fa2dSVineet Gupta 	"1:	ex  %0, [%1]		\n"
356e35fa2dSVineet Gupta 	"	breq  %0, %2, 1b	\n"
366e35fa2dSVineet Gupta 	: "+&r" (tmp)
376e35fa2dSVineet Gupta 	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
386e35fa2dSVineet Gupta 	: "memory");
39*2576c28eSVineet Gupta 
40*2576c28eSVineet Gupta 	/*
41*2576c28eSVineet Gupta 	 * ACQUIRE barrier to ensure load/store after taking the lock
42*2576c28eSVineet Gupta 	 * don't "bleed-up" out of the critical section (leak-in is allowed)
43*2576c28eSVineet Gupta 	 * http://www.spinics.net/lists/kernel/msg2010409.html
44*2576c28eSVineet Gupta 	 *
45*2576c28eSVineet Gupta 	 * ARCv2 only has load-load, store-store and all-all barrier
46*2576c28eSVineet Gupta 	 * thus need the full all-all barrier
47*2576c28eSVineet Gupta 	 */
48*2576c28eSVineet Gupta 	smp_mb();
496e35fa2dSVineet Gupta }
506e35fa2dSVineet Gupta 
516e35fa2dSVineet Gupta static inline int arch_spin_trylock(arch_spinlock_t *lock)
526e35fa2dSVineet Gupta {
536e35fa2dSVineet Gupta 	unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
546e35fa2dSVineet Gupta 
55*2576c28eSVineet Gupta 	smp_mb();
56*2576c28eSVineet Gupta 
576e35fa2dSVineet Gupta 	__asm__ __volatile__(
586e35fa2dSVineet Gupta 	"1:	ex  %0, [%1]		\n"
596e35fa2dSVineet Gupta 	: "+r" (tmp)
606e35fa2dSVineet Gupta 	: "r"(&(lock->slock))
616e35fa2dSVineet Gupta 	: "memory");
626e35fa2dSVineet Gupta 
63*2576c28eSVineet Gupta 	smp_mb();
64*2576c28eSVineet Gupta 
656e35fa2dSVineet Gupta 	return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
666e35fa2dSVineet Gupta }
676e35fa2dSVineet Gupta 
686e35fa2dSVineet Gupta static inline void arch_spin_unlock(arch_spinlock_t *lock)
696e35fa2dSVineet Gupta {
706c00350bSVineet Gupta 	unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
716c00350bSVineet Gupta 
72*2576c28eSVineet Gupta 	/*
73*2576c28eSVineet Gupta 	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
74*2576c28eSVineet Gupta 	 * is the only option
75*2576c28eSVineet Gupta 	 */
76*2576c28eSVineet Gupta 	smp_mb();
77*2576c28eSVineet Gupta 
786c00350bSVineet Gupta 	__asm__ __volatile__(
796c00350bSVineet Gupta 	"	ex  %0, [%1]		\n"
806c00350bSVineet Gupta 	: "+r" (tmp)
816c00350bSVineet Gupta 	: "r"(&(lock->slock))
826c00350bSVineet Gupta 	: "memory");
836c00350bSVineet Gupta 
84*2576c28eSVineet Gupta 	/*
85*2576c28eSVineet Gupta 	 * superfluous, but keeping for now - see pairing version in
86*2576c28eSVineet Gupta 	 * arch_spin_lock above
87*2576c28eSVineet Gupta 	 */
886e35fa2dSVineet Gupta 	smp_mb();
896e35fa2dSVineet Gupta }
906e35fa2dSVineet Gupta 
916e35fa2dSVineet Gupta /*
926e35fa2dSVineet Gupta  * Read-write spinlocks, allowing multiple readers but only one writer.
936e35fa2dSVineet Gupta  *
946e35fa2dSVineet Gupta  * The spinlock itself is contained in @counter and access to it is
956e35fa2dSVineet Gupta  * serialized with @lock_mutex.
966e35fa2dSVineet Gupta  *
976e35fa2dSVineet Gupta  * Unfair locking as Writers could be starved indefinitely by Reader(s)
986e35fa2dSVineet Gupta  */
996e35fa2dSVineet Gupta 
1006e35fa2dSVineet Gupta /* Would read_trylock() succeed? */
1016e35fa2dSVineet Gupta #define arch_read_can_lock(x)	((x)->counter > 0)
1026e35fa2dSVineet Gupta 
1036e35fa2dSVineet Gupta /* Would write_trylock() succeed? */
1046e35fa2dSVineet Gupta #define arch_write_can_lock(x)	((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
1056e35fa2dSVineet Gupta 
1066e35fa2dSVineet Gupta /* 1 - lock taken successfully */
1076e35fa2dSVineet Gupta static inline int arch_read_trylock(arch_rwlock_t *rw)
1086e35fa2dSVineet Gupta {
1096e35fa2dSVineet Gupta 	int ret = 0;
1106e35fa2dSVineet Gupta 
1116e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
1126e35fa2dSVineet Gupta 
1136e35fa2dSVineet Gupta 	/*
1146e35fa2dSVineet Gupta 	 * zero means writer holds the lock exclusively, deny Reader.
1156e35fa2dSVineet Gupta 	 * Otherwise grant lock to first/subseq reader
1166e35fa2dSVineet Gupta 	 */
1176e35fa2dSVineet Gupta 	if (rw->counter > 0) {
1186e35fa2dSVineet Gupta 		rw->counter--;
1196e35fa2dSVineet Gupta 		ret = 1;
1206e35fa2dSVineet Gupta 	}
1216e35fa2dSVineet Gupta 
1226e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
1236e35fa2dSVineet Gupta 
1246e35fa2dSVineet Gupta 	smp_mb();
1256e35fa2dSVineet Gupta 	return ret;
1266e35fa2dSVineet Gupta }
1276e35fa2dSVineet Gupta 
1286e35fa2dSVineet Gupta /* 1 - lock taken successfully */
1296e35fa2dSVineet Gupta static inline int arch_write_trylock(arch_rwlock_t *rw)
1306e35fa2dSVineet Gupta {
1316e35fa2dSVineet Gupta 	int ret = 0;
1326e35fa2dSVineet Gupta 
1336e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
1346e35fa2dSVineet Gupta 
1356e35fa2dSVineet Gupta 	/*
1366e35fa2dSVineet Gupta 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
1376e35fa2dSVineet Gupta 	 * deny writer. Otherwise if unlocked grant to writer
1386e35fa2dSVineet Gupta 	 * Hence the claim that Linux rwlocks are unfair to writers.
1396e35fa2dSVineet Gupta 	 * (can be starved for an indefinite time by readers).
1406e35fa2dSVineet Gupta 	 */
1416e35fa2dSVineet Gupta 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
1426e35fa2dSVineet Gupta 		rw->counter = 0;
1436e35fa2dSVineet Gupta 		ret = 1;
1446e35fa2dSVineet Gupta 	}
1456e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
1466e35fa2dSVineet Gupta 
1476e35fa2dSVineet Gupta 	return ret;
1486e35fa2dSVineet Gupta }
1496e35fa2dSVineet Gupta 
1506e35fa2dSVineet Gupta static inline void arch_read_lock(arch_rwlock_t *rw)
1516e35fa2dSVineet Gupta {
1526e35fa2dSVineet Gupta 	while (!arch_read_trylock(rw))
1536e35fa2dSVineet Gupta 		cpu_relax();
1546e35fa2dSVineet Gupta }
1556e35fa2dSVineet Gupta 
1566e35fa2dSVineet Gupta static inline void arch_write_lock(arch_rwlock_t *rw)
1576e35fa2dSVineet Gupta {
1586e35fa2dSVineet Gupta 	while (!arch_write_trylock(rw))
1596e35fa2dSVineet Gupta 		cpu_relax();
1606e35fa2dSVineet Gupta }
1616e35fa2dSVineet Gupta 
1626e35fa2dSVineet Gupta static inline void arch_read_unlock(arch_rwlock_t *rw)
1636e35fa2dSVineet Gupta {
1646e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
1656e35fa2dSVineet Gupta 	rw->counter++;
1666e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
1676e35fa2dSVineet Gupta }
1686e35fa2dSVineet Gupta 
1696e35fa2dSVineet Gupta static inline void arch_write_unlock(arch_rwlock_t *rw)
1706e35fa2dSVineet Gupta {
1716e35fa2dSVineet Gupta 	arch_spin_lock(&(rw->lock_mutex));
1726e35fa2dSVineet Gupta 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
1736e35fa2dSVineet Gupta 	arch_spin_unlock(&(rw->lock_mutex));
1746e35fa2dSVineet Gupta }
1756e35fa2dSVineet Gupta 
1766e35fa2dSVineet Gupta #define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
1776e35fa2dSVineet Gupta #define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
1786e35fa2dSVineet Gupta 
1796e35fa2dSVineet Gupta #define arch_spin_relax(lock)	cpu_relax()
1806e35fa2dSVineet Gupta #define arch_read_relax(lock)	cpu_relax()
1816e35fa2dSVineet Gupta #define arch_write_relax(lock)	cpu_relax()
1826e35fa2dSVineet Gupta 
1836e35fa2dSVineet Gupta #endif /* __ASM_SPINLOCK_H */
184