xref: /linux/arch/sh/include/asm/spinlock.h (revision bcefe12eff5dca6fdfa94ed85e5bee66380d5cd9)
1 /*
2  * include/asm-sh/spinlock.h
3  *
4  * Copyright (C) 2002, 2003 Paul Mundt
5  * Copyright (C) 2006, 2007 Akio Idehara
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #ifndef __ASM_SH_SPINLOCK_H
12 #define __ASM_SH_SPINLOCK_H
13 
14 /*
15  * The only locking implemented here uses SH-4A opcodes. For others,
16  * split this out as per atomic-*.h.
17  */
18 #ifndef CONFIG_CPU_SH4A
19 #error "Need movli.l/movco.l for spinlocks"
20 #endif
21 
22 /*
23  * Your basic SMP spinlocks, allowing only a single CPU anywhere
24  */
25 
26 #define __raw_spin_is_locked(x)		((x)->lock <= 0)
27 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
28 #define __raw_spin_unlock_wait(x) \
29 	do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0)
30 
31 /*
32  * Simple spin lock operations.  There are two variants, one clears IRQ's
33  * on the local processor, one does not.
34  *
35  * We make no fairness assumptions.  They have a cost.
36  */
37 static inline void __raw_spin_lock(raw_spinlock_t *lock)
38 {
39 	unsigned long tmp;
40 	unsigned long oldval;
41 
42 	__asm__ __volatile__ (
43 		"1:						\n\t"
44 		"movli.l	@%2, %0	! __raw_spin_lock	\n\t"
45 		"mov		%0, %1				\n\t"
46 		"mov		#0, %0				\n\t"
47 		"movco.l	%0, @%2				\n\t"
48 		"bf		1b				\n\t"
49 		"cmp/pl		%1				\n\t"
50 		"bf		1b				\n\t"
51 		: "=&z" (tmp), "=&r" (oldval)
52 		: "r" (&lock->lock)
53 		: "t", "memory"
54 	);
55 }
56 
57 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
58 {
59 	unsigned long tmp;
60 
61 	__asm__ __volatile__ (
62 		"mov		#1, %0 ! __raw_spin_unlock	\n\t"
63 		"mov.l		%0, @%1				\n\t"
64 		: "=&z" (tmp)
65 		: "r" (&lock->lock)
66 		: "t", "memory"
67 	);
68 }
69 
70 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
71 {
72 	unsigned long tmp, oldval;
73 
74 	__asm__ __volatile__ (
75 		"1:						\n\t"
76 		"movli.l	@%2, %0	! __raw_spin_trylock	\n\t"
77 		"mov		%0, %1				\n\t"
78 		"mov		#0, %0				\n\t"
79 		"movco.l	%0, @%2				\n\t"
80 		"bf		1b				\n\t"
81 		"synco						\n\t"
82 		: "=&z" (tmp), "=&r" (oldval)
83 		: "r" (&lock->lock)
84 		: "t", "memory"
85 	);
86 
87 	return oldval;
88 }
89 
90 /*
91  * Read-write spinlocks, allowing multiple readers but only one writer.
92  *
93  * NOTE! it is quite common to have readers in interrupts but no interrupt
94  * writers. For those circumstances we can "mix" irq-safe locks - any writer
95  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
96  * read-locks.
97  */
98 
99 /**
100  * read_can_lock - would read_trylock() succeed?
101  * @lock: the rwlock in question.
102  */
103 #define __raw_read_can_lock(x)	((x)->lock > 0)
104 
105 /**
106  * write_can_lock - would write_trylock() succeed?
107  * @lock: the rwlock in question.
108  */
109 #define __raw_write_can_lock(x)	((x)->lock == RW_LOCK_BIAS)
110 
111 static inline void __raw_read_lock(raw_rwlock_t *rw)
112 {
113 	unsigned long tmp;
114 
115 	__asm__ __volatile__ (
116 		"1:						\n\t"
117 		"movli.l	@%1, %0	! __raw_read_lock	\n\t"
118 		"cmp/pl		%0				\n\t"
119 		"bf		1b				\n\t"
120 		"add		#-1, %0				\n\t"
121 		"movco.l	%0, @%1				\n\t"
122 		"bf		1b				\n\t"
123 		: "=&z" (tmp)
124 		: "r" (&rw->lock)
125 		: "t", "memory"
126 	);
127 }
128 
129 static inline void __raw_read_unlock(raw_rwlock_t *rw)
130 {
131 	unsigned long tmp;
132 
133 	__asm__ __volatile__ (
134 		"1:						\n\t"
135 		"movli.l	@%1, %0	! __raw_read_unlock	\n\t"
136 		"add		#1, %0				\n\t"
137 		"movco.l	%0, @%1				\n\t"
138 		"bf		1b				\n\t"
139 		: "=&z" (tmp)
140 		: "r" (&rw->lock)
141 		: "t", "memory"
142 	);
143 }
144 
145 static inline void __raw_write_lock(raw_rwlock_t *rw)
146 {
147 	unsigned long tmp;
148 
149 	__asm__ __volatile__ (
150 		"1:						\n\t"
151 		"movli.l	@%1, %0	! __raw_write_lock	\n\t"
152 		"cmp/hs		%2, %0				\n\t"
153 		"bf		1b				\n\t"
154 		"sub		%2, %0				\n\t"
155 		"movco.l	%0, @%1				\n\t"
156 		"bf		1b				\n\t"
157 		: "=&z" (tmp)
158 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
159 		: "t", "memory"
160 	);
161 }
162 
163 static inline void __raw_write_unlock(raw_rwlock_t *rw)
164 {
165 	__asm__ __volatile__ (
166 		"mov.l		%1, @%0 ! __raw_write_unlock	\n\t"
167 		:
168 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
169 		: "t", "memory"
170 	);
171 }
172 
173 static inline int __raw_read_trylock(raw_rwlock_t *rw)
174 {
175 	unsigned long tmp, oldval;
176 
177 	__asm__ __volatile__ (
178 		"1:						\n\t"
179 		"movli.l	@%2, %0	! __raw_read_trylock	\n\t"
180 		"mov		%0, %1				\n\t"
181 		"cmp/pl		%0				\n\t"
182 		"bf		2f				\n\t"
183 		"add		#-1, %0				\n\t"
184 		"movco.l	%0, @%2				\n\t"
185 		"bf		1b				\n\t"
186 		"2:						\n\t"
187 		"synco						\n\t"
188 		: "=&z" (tmp), "=&r" (oldval)
189 		: "r" (&rw->lock)
190 		: "t", "memory"
191 	);
192 
193 	return (oldval > 0);
194 }
195 
196 static inline int __raw_write_trylock(raw_rwlock_t *rw)
197 {
198 	unsigned long tmp, oldval;
199 
200 	__asm__ __volatile__ (
201 		"1:						\n\t"
202 		"movli.l	@%2, %0	! __raw_write_trylock	\n\t"
203 		"mov		%0, %1				\n\t"
204 		"cmp/hs		%3, %0				\n\t"
205 		"bf		2f				\n\t"
206 		"sub		%3, %0				\n\t"
207 		"2:						\n\t"
208 		"movco.l	%0, @%2				\n\t"
209 		"bf		1b				\n\t"
210 		"synco						\n\t"
211 		: "=&z" (tmp), "=&r" (oldval)
212 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
213 		: "t", "memory"
214 	);
215 
216 	return (oldval > (RW_LOCK_BIAS - 1));
217 }
218 
219 #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
220 #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
221 
222 #define _raw_spin_relax(lock)	cpu_relax()
223 #define _raw_read_relax(lock)	cpu_relax()
224 #define _raw_write_relax(lock)	cpu_relax()
225 
226 #endif /* __ASM_SH_SPINLOCK_H */
227