xref: /linux/arch/riscv/include/asm/spinlock.h (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright (C) 2015 Regents of the University of California
3  * Copyright (C) 2017 SiFive
4  *
5  *   This program is free software; you can redistribute it and/or
6  *   modify it under the terms of the GNU General Public License
7  *   as published by the Free Software Foundation, version 2.
8  *
9  *   This program is distributed in the hope that it will be useful,
10  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  *   GNU General Public License for more details.
13  */
14 
15 #ifndef _ASM_RISCV_SPINLOCK_H
16 #define _ASM_RISCV_SPINLOCK_H
17 
18 #include <linux/kernel.h>
19 #include <asm/current.h>
20 #include <asm/fence.h>
21 
22 /*
23  * Simple spin lock operations.  These provide no fairness guarantees.
24  */
25 
26 /* FIXME: Replace this with a ticket lock, like MIPS. */
27 
28 #define arch_spin_is_locked(x)	(READ_ONCE((x)->lock) != 0)
29 
30 static inline void arch_spin_unlock(arch_spinlock_t *lock)
31 {
32 	smp_store_release(&lock->lock, 0);
33 }
34 
35 static inline int arch_spin_trylock(arch_spinlock_t *lock)
36 {
37 	int tmp = 1, busy;
38 
39 	__asm__ __volatile__ (
40 		"	amoswap.w %0, %2, %1\n"
41 		RISCV_ACQUIRE_BARRIER
42 		: "=r" (busy), "+A" (lock->lock)
43 		: "r" (tmp)
44 		: "memory");
45 
46 	return !busy;
47 }
48 
49 static inline void arch_spin_lock(arch_spinlock_t *lock)
50 {
51 	while (1) {
52 		if (arch_spin_is_locked(lock))
53 			continue;
54 
55 		if (arch_spin_trylock(lock))
56 			break;
57 	}
58 }
59 
60 /***********************************************************/
61 
62 static inline void arch_read_lock(arch_rwlock_t *lock)
63 {
64 	int tmp;
65 
66 	__asm__ __volatile__(
67 		"1:	lr.w	%1, %0\n"
68 		"	bltz	%1, 1b\n"
69 		"	addi	%1, %1, 1\n"
70 		"	sc.w	%1, %1, %0\n"
71 		"	bnez	%1, 1b\n"
72 		RISCV_ACQUIRE_BARRIER
73 		: "+A" (lock->lock), "=&r" (tmp)
74 		:: "memory");
75 }
76 
77 static inline void arch_write_lock(arch_rwlock_t *lock)
78 {
79 	int tmp;
80 
81 	__asm__ __volatile__(
82 		"1:	lr.w	%1, %0\n"
83 		"	bnez	%1, 1b\n"
84 		"	li	%1, -1\n"
85 		"	sc.w	%1, %1, %0\n"
86 		"	bnez	%1, 1b\n"
87 		RISCV_ACQUIRE_BARRIER
88 		: "+A" (lock->lock), "=&r" (tmp)
89 		:: "memory");
90 }
91 
92 static inline int arch_read_trylock(arch_rwlock_t *lock)
93 {
94 	int busy;
95 
96 	__asm__ __volatile__(
97 		"1:	lr.w	%1, %0\n"
98 		"	bltz	%1, 1f\n"
99 		"	addi	%1, %1, 1\n"
100 		"	sc.w	%1, %1, %0\n"
101 		"	bnez	%1, 1b\n"
102 		RISCV_ACQUIRE_BARRIER
103 		"1:\n"
104 		: "+A" (lock->lock), "=&r" (busy)
105 		:: "memory");
106 
107 	return !busy;
108 }
109 
110 static inline int arch_write_trylock(arch_rwlock_t *lock)
111 {
112 	int busy;
113 
114 	__asm__ __volatile__(
115 		"1:	lr.w	%1, %0\n"
116 		"	bnez	%1, 1f\n"
117 		"	li	%1, -1\n"
118 		"	sc.w	%1, %1, %0\n"
119 		"	bnez	%1, 1b\n"
120 		RISCV_ACQUIRE_BARRIER
121 		"1:\n"
122 		: "+A" (lock->lock), "=&r" (busy)
123 		:: "memory");
124 
125 	return !busy;
126 }
127 
128 static inline void arch_read_unlock(arch_rwlock_t *lock)
129 {
130 	__asm__ __volatile__(
131 		RISCV_RELEASE_BARRIER
132 		"	amoadd.w x0, %1, %0\n"
133 		: "+A" (lock->lock)
134 		: "r" (-1)
135 		: "memory");
136 }
137 
138 static inline void arch_write_unlock(arch_rwlock_t *lock)
139 {
140 	smp_store_release(&lock->lock, 0);
141 }
142 
143 #endif /* _ASM_RISCV_SPINLOCK_H */
144