xref: /linux/arch/sparc/include/asm/spinlock_32.h (revision 664b0bae0b87f69bc9deb098f5e0158b9cf18e04)
1*b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2a439fe51SSam Ravnborg /* spinlock.h: 32-bit Sparc spinlock support.
3a439fe51SSam Ravnborg  *
4a439fe51SSam Ravnborg  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5a439fe51SSam Ravnborg  */
6a439fe51SSam Ravnborg 
7a439fe51SSam Ravnborg #ifndef __SPARC_SPINLOCK_H
8a439fe51SSam Ravnborg #define __SPARC_SPINLOCK_H
9a439fe51SSam Ravnborg 
10a439fe51SSam Ravnborg #ifndef __ASSEMBLY__
11a439fe51SSam Ravnborg 
12a439fe51SSam Ravnborg #include <asm/psr.h>
13726328d9SPeter Zijlstra #include <asm/barrier.h>
14f400bdb1SSam Ravnborg #include <asm/processor.h> /* for cpu_relax */
15a439fe51SSam Ravnborg 
160199c4e6SThomas Gleixner #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
17a439fe51SSam Ravnborg 
arch_spin_lock(arch_spinlock_t * lock)180199c4e6SThomas Gleixner static inline void arch_spin_lock(arch_spinlock_t *lock)
19a439fe51SSam Ravnborg {
20a439fe51SSam Ravnborg 	__asm__ __volatile__(
21a439fe51SSam Ravnborg 	"\n1:\n\t"
22a439fe51SSam Ravnborg 	"ldstub	[%0], %%g2\n\t"
23a439fe51SSam Ravnborg 	"orcc	%%g2, 0x0, %%g0\n\t"
24a439fe51SSam Ravnborg 	"bne,a	2f\n\t"
25a439fe51SSam Ravnborg 	" ldub	[%0], %%g2\n\t"
26a439fe51SSam Ravnborg 	".subsection	2\n"
27a439fe51SSam Ravnborg 	"2:\n\t"
28a439fe51SSam Ravnborg 	"orcc	%%g2, 0x0, %%g0\n\t"
29a439fe51SSam Ravnborg 	"bne,a	2b\n\t"
30a439fe51SSam Ravnborg 	" ldub	[%0], %%g2\n\t"
31a439fe51SSam Ravnborg 	"b,a	1b\n\t"
32a439fe51SSam Ravnborg 	".previous\n"
33a439fe51SSam Ravnborg 	: /* no outputs */
34a439fe51SSam Ravnborg 	: "r" (lock)
35a439fe51SSam Ravnborg 	: "g2", "memory", "cc");
36a439fe51SSam Ravnborg }
37a439fe51SSam Ravnborg 
arch_spin_trylock(arch_spinlock_t * lock)380199c4e6SThomas Gleixner static inline int arch_spin_trylock(arch_spinlock_t *lock)
39a439fe51SSam Ravnborg {
40a439fe51SSam Ravnborg 	unsigned int result;
41a439fe51SSam Ravnborg 	__asm__ __volatile__("ldstub [%1], %0"
42a439fe51SSam Ravnborg 			     : "=r" (result)
43a439fe51SSam Ravnborg 			     : "r" (lock)
44a439fe51SSam Ravnborg 			     : "memory");
45a439fe51SSam Ravnborg 	return (result == 0);
46a439fe51SSam Ravnborg }
47a439fe51SSam Ravnborg 
arch_spin_unlock(arch_spinlock_t * lock)480199c4e6SThomas Gleixner static inline void arch_spin_unlock(arch_spinlock_t *lock)
49a439fe51SSam Ravnborg {
50a439fe51SSam Ravnborg 	__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
51a439fe51SSam Ravnborg }
52a439fe51SSam Ravnborg 
53a439fe51SSam Ravnborg /* Read-write spinlocks, allowing multiple readers
54a439fe51SSam Ravnborg  * but only one writer.
55a439fe51SSam Ravnborg  *
56a439fe51SSam Ravnborg  * NOTE! it is quite common to have readers in interrupts
57a439fe51SSam Ravnborg  * but no interrupt writers. For those circumstances we
58a439fe51SSam Ravnborg  * can "mix" irq-safe locks - any writer needs to get a
59a439fe51SSam Ravnborg  * irq-safe write-lock, but readers can get non-irqsafe
60a439fe51SSam Ravnborg  * read-locks.
61a439fe51SSam Ravnborg  *
62a439fe51SSam Ravnborg  * XXX This might create some problems with my dual spinlock
63a439fe51SSam Ravnborg  * XXX scheme, deadlocks etc. -DaveM
64a439fe51SSam Ravnborg  *
65a439fe51SSam Ravnborg  * Sort of like atomic_t's on Sparc, but even more clever.
66a439fe51SSam Ravnborg  *
67a439fe51SSam Ravnborg  *	------------------------------------
68fb3a6bbcSThomas Gleixner  *	| 24-bit counter           | wlock |  arch_rwlock_t
69a439fe51SSam Ravnborg  *	------------------------------------
70a439fe51SSam Ravnborg  *	 31                       8 7     0
71a439fe51SSam Ravnborg  *
72a439fe51SSam Ravnborg  * wlock signifies the one writer is in or somebody is updating
73a439fe51SSam Ravnborg  * counter. For a writer, if he successfully acquires the wlock,
74a439fe51SSam Ravnborg  * but counter is non-zero, he has to release the lock and wait,
75a439fe51SSam Ravnborg  * till both counter and wlock are zero.
76a439fe51SSam Ravnborg  *
77a439fe51SSam Ravnborg  * Unfortunately this scheme limits us to ~16,000,000 cpus.
78a439fe51SSam Ravnborg  */
__arch_read_lock(arch_rwlock_t * rw)79e5931943SThomas Gleixner static inline void __arch_read_lock(arch_rwlock_t *rw)
80a439fe51SSam Ravnborg {
81fb3a6bbcSThomas Gleixner 	register arch_rwlock_t *lp asm("g1");
82a439fe51SSam Ravnborg 	lp = rw;
83a439fe51SSam Ravnborg 	__asm__ __volatile__(
84a439fe51SSam Ravnborg 	"mov	%%o7, %%g4\n\t"
85a439fe51SSam Ravnborg 	"call	___rw_read_enter\n\t"
86a439fe51SSam Ravnborg 	" ldstub	[%%g1 + 3], %%g2\n"
87a439fe51SSam Ravnborg 	: /* no outputs */
88a439fe51SSam Ravnborg 	: "r" (lp)
89a439fe51SSam Ravnborg 	: "g2", "g4", "memory", "cc");
90a439fe51SSam Ravnborg }
91a439fe51SSam Ravnborg 
92e5931943SThomas Gleixner #define arch_read_lock(lock) \
93a439fe51SSam Ravnborg do {	unsigned long flags; \
94a439fe51SSam Ravnborg 	local_irq_save(flags); \
95e5931943SThomas Gleixner 	__arch_read_lock(lock); \
96a439fe51SSam Ravnborg 	local_irq_restore(flags); \
97a439fe51SSam Ravnborg } while(0)
98a439fe51SSam Ravnborg 
__arch_read_unlock(arch_rwlock_t * rw)99e5931943SThomas Gleixner static inline void __arch_read_unlock(arch_rwlock_t *rw)
100a439fe51SSam Ravnborg {
101fb3a6bbcSThomas Gleixner 	register arch_rwlock_t *lp asm("g1");
102a439fe51SSam Ravnborg 	lp = rw;
103a439fe51SSam Ravnborg 	__asm__ __volatile__(
104a439fe51SSam Ravnborg 	"mov	%%o7, %%g4\n\t"
105a439fe51SSam Ravnborg 	"call	___rw_read_exit\n\t"
106a439fe51SSam Ravnborg 	" ldstub	[%%g1 + 3], %%g2\n"
107a439fe51SSam Ravnborg 	: /* no outputs */
108a439fe51SSam Ravnborg 	: "r" (lp)
109a439fe51SSam Ravnborg 	: "g2", "g4", "memory", "cc");
110a439fe51SSam Ravnborg }
111a439fe51SSam Ravnborg 
112e5931943SThomas Gleixner #define arch_read_unlock(lock) \
113a439fe51SSam Ravnborg do {	unsigned long flags; \
114a439fe51SSam Ravnborg 	local_irq_save(flags); \
115e5931943SThomas Gleixner 	__arch_read_unlock(lock); \
116a439fe51SSam Ravnborg 	local_irq_restore(flags); \
117a439fe51SSam Ravnborg } while(0)
118a439fe51SSam Ravnborg 
arch_write_lock(arch_rwlock_t * rw)119e5931943SThomas Gleixner static inline void arch_write_lock(arch_rwlock_t *rw)
120a439fe51SSam Ravnborg {
121fb3a6bbcSThomas Gleixner 	register arch_rwlock_t *lp asm("g1");
122a439fe51SSam Ravnborg 	lp = rw;
123a439fe51SSam Ravnborg 	__asm__ __volatile__(
124a439fe51SSam Ravnborg 	"mov	%%o7, %%g4\n\t"
125a439fe51SSam Ravnborg 	"call	___rw_write_enter\n\t"
126a439fe51SSam Ravnborg 	" ldstub	[%%g1 + 3], %%g2\n"
127a439fe51SSam Ravnborg 	: /* no outputs */
128a439fe51SSam Ravnborg 	: "r" (lp)
129a439fe51SSam Ravnborg 	: "g2", "g4", "memory", "cc");
130a439fe51SSam Ravnborg 	*(volatile __u32 *)&lp->lock = ~0U;
131a439fe51SSam Ravnborg }
132a439fe51SSam Ravnborg 
arch_write_unlock(arch_rwlock_t * lock)133ee9e8397STobias Klauser static inline void arch_write_unlock(arch_rwlock_t *lock)
1343f6aa0b1SMikael Pettersson {
1353f6aa0b1SMikael Pettersson 	__asm__ __volatile__(
1363f6aa0b1SMikael Pettersson "	st		%%g0, [%0]"
1373f6aa0b1SMikael Pettersson 	: /* no outputs */
1383f6aa0b1SMikael Pettersson 	: "r" (lock)
1393f6aa0b1SMikael Pettersson 	: "memory");
1403f6aa0b1SMikael Pettersson }
1413f6aa0b1SMikael Pettersson 
arch_write_trylock(arch_rwlock_t * rw)142e5931943SThomas Gleixner static inline int arch_write_trylock(arch_rwlock_t *rw)
143a439fe51SSam Ravnborg {
144a439fe51SSam Ravnborg 	unsigned int val;
145a439fe51SSam Ravnborg 
146a439fe51SSam Ravnborg 	__asm__ __volatile__("ldstub [%1 + 3], %0"
147a439fe51SSam Ravnborg 			     : "=r" (val)
148a439fe51SSam Ravnborg 			     : "r" (&rw->lock)
149a439fe51SSam Ravnborg 			     : "memory");
150a439fe51SSam Ravnborg 
151a439fe51SSam Ravnborg 	if (val == 0) {
152a439fe51SSam Ravnborg 		val = rw->lock & ~0xff;
153a439fe51SSam Ravnborg 		if (val)
154a439fe51SSam Ravnborg 			((volatile u8*)&rw->lock)[3] = 0;
155a439fe51SSam Ravnborg 		else
156a439fe51SSam Ravnborg 			*(volatile u32*)&rw->lock = ~0U;
157a439fe51SSam Ravnborg 	}
158a439fe51SSam Ravnborg 
159a439fe51SSam Ravnborg 	return (val == 0);
160a439fe51SSam Ravnborg }
161a439fe51SSam Ravnborg 
__arch_read_trylock(arch_rwlock_t * rw)162e5931943SThomas Gleixner static inline int __arch_read_trylock(arch_rwlock_t *rw)
163a439fe51SSam Ravnborg {
164fb3a6bbcSThomas Gleixner 	register arch_rwlock_t *lp asm("g1");
165a439fe51SSam Ravnborg 	register int res asm("o0");
166a439fe51SSam Ravnborg 	lp = rw;
167a439fe51SSam Ravnborg 	__asm__ __volatile__(
168a439fe51SSam Ravnborg 	"mov	%%o7, %%g4\n\t"
169a439fe51SSam Ravnborg 	"call	___rw_read_try\n\t"
170a439fe51SSam Ravnborg 	" ldstub	[%%g1 + 3], %%g2\n"
171a439fe51SSam Ravnborg 	: "=r" (res)
172a439fe51SSam Ravnborg 	: "r" (lp)
173a439fe51SSam Ravnborg 	: "g2", "g4", "memory", "cc");
174a439fe51SSam Ravnborg 	return res;
175a439fe51SSam Ravnborg }
176a439fe51SSam Ravnborg 
177e5931943SThomas Gleixner #define arch_read_trylock(lock) \
178a439fe51SSam Ravnborg ({	unsigned long flags; \
179a439fe51SSam Ravnborg 	int res; \
180a439fe51SSam Ravnborg 	local_irq_save(flags); \
181e5931943SThomas Gleixner 	res = __arch_read_trylock(lock); \
182a439fe51SSam Ravnborg 	local_irq_restore(flags); \
183a439fe51SSam Ravnborg 	res; \
184a439fe51SSam Ravnborg })
185a439fe51SSam Ravnborg 
186a439fe51SSam Ravnborg #endif /* !(__ASSEMBLY__) */
187a439fe51SSam Ravnborg 
188a439fe51SSam Ravnborg #endif /* __SPARC_SPINLOCK_H */
189