xref: /linux/arch/s390/lib/spinlock.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Out of line spinlock code.
4  *
5  *    Copyright IBM Corp. 2004, 2006
6  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8 
9 #include <linux/types.h>
10 #include <linux/export.h>
11 #include <linux/spinlock.h>
12 #include <linux/jiffies.h>
13 #include <linux/init.h>
14 #include <linux/smp.h>
15 #include <linux/percpu.h>
16 #include <linux/io.h>
17 #include <asm/alternative.h>
18 #include <asm/asm.h>
19 
20 int spin_retry = -1;
21 
22 static int __init spin_retry_init(void)
23 {
24 	if (spin_retry < 0)
25 		spin_retry = 1000;
26 	return 0;
27 }
28 early_initcall(spin_retry_init);
29 
30 /*
31  * spin_retry= parameter
32  */
33 static int __init spin_retry_setup(char *str)
34 {
35 	spin_retry = simple_strtoul(str, &str, 0);
36 	return 1;
37 }
38 __setup("spin_retry=", spin_retry_setup);
39 
40 struct spin_wait {
41 	struct spin_wait *next, *prev;
42 	int node_id;
43 } __aligned(32);
44 
45 static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
46 
47 #define _Q_LOCK_CPU_OFFSET	0
48 #define _Q_LOCK_STEAL_OFFSET	16
49 #define _Q_TAIL_IDX_OFFSET	18
50 #define _Q_TAIL_CPU_OFFSET	20
51 
52 #define _Q_LOCK_CPU_MASK	0x0000ffff
53 #define _Q_LOCK_STEAL_ADD	0x00010000
54 #define _Q_LOCK_STEAL_MASK	0x00030000
55 #define _Q_TAIL_IDX_MASK	0x000c0000
56 #define _Q_TAIL_CPU_MASK	0xfff00000
57 
58 #define _Q_LOCK_MASK		(_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
59 #define _Q_TAIL_MASK		(_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
60 
61 void arch_spin_lock_setup(int cpu)
62 {
63 	struct spin_wait *node;
64 	int ix;
65 
66 	node = per_cpu_ptr(&spin_wait[0], cpu);
67 	for (ix = 0; ix < 4; ix++, node++) {
68 		memset(node, 0, sizeof(*node));
69 		node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
70 			(ix << _Q_TAIL_IDX_OFFSET);
71 	}
72 }
73 
74 static inline int arch_load_niai4(int *lock)
75 {
76 	int owner;
77 
78 	asm_inline volatile(
79 		ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", ALT_FACILITY(49)) /* NIAI 4 */
80 		"	l	%[owner],%[lock]\n"
81 		: [owner] "=d" (owner) : [lock] "R" (*lock) : "memory");
82 	return owner;
83 }
84 
85 #ifdef __HAVE_ASM_FLAG_OUTPUTS__
86 
87 static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new)
88 {
89 	int cc;
90 
91 	asm_inline volatile(
92 		ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
93 		"	cs	%[old],%[new],%[lock]\n"
94 		: [old] "+d" (old), [lock] "+Q" (*lock), "=@cc" (cc)
95 		: [new] "d" (new)
96 		: "memory");
97 	return cc == 0;
98 }
99 
100 #else /* __HAVE_ASM_FLAG_OUTPUTS__ */
101 
102 static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new)
103 {
104 	int expected = old;
105 
106 	asm_inline volatile(
107 		ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
108 		"	cs	%[old],%[new],%[lock]\n"
109 		: [old] "+d" (old), [lock] "+Q" (*lock)
110 		: [new] "d" (new)
111 		: "cc", "memory");
112 	return expected == old;
113 }
114 
115 #endif /* __HAVE_ASM_FLAG_OUTPUTS__ */
116 
117 static inline struct spin_wait *arch_spin_decode_tail(int lock)
118 {
119 	int ix, cpu;
120 
121 	ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
122 	cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
123 	return per_cpu_ptr(&spin_wait[ix], cpu - 1);
124 }
125 
126 static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
127 {
128 	if (lock & _Q_LOCK_CPU_MASK)
129 		return lock & _Q_LOCK_CPU_MASK;
130 	if (node == NULL || node->prev == NULL)
131 		return 0;	/* 0 -> no target cpu */
132 	while (node->prev)
133 		node = node->prev;
134 	return node->node_id >> _Q_TAIL_CPU_OFFSET;
135 }
136 
137 static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
138 {
139 	struct spin_wait *node, *next;
140 	int lockval, ix, node_id, tail_id, old, new, owner, count;
141 
142 	ix = get_lowcore()->spinlock_index++;
143 	barrier();
144 	lockval = SPINLOCK_LOCKVAL;	/* cpu + 1 */
145 	node = this_cpu_ptr(&spin_wait[ix]);
146 	node->prev = node->next = NULL;
147 	node_id = node->node_id;
148 
149 	/* Enqueue the node for this CPU in the spinlock wait queue */
150 	old = READ_ONCE(lp->lock);
151 	while (1) {
152 		if ((old & _Q_LOCK_CPU_MASK) == 0 &&
153 		    (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
154 			/*
155 			 * The lock is free but there may be waiters.
156 			 * With no waiters simply take the lock, if there
157 			 * are waiters try to steal the lock. The lock may
158 			 * be stolen three times before the next queued
159 			 * waiter will get the lock.
160 			 */
161 			new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
162 			if (arch_try_cmpxchg(&lp->lock, &old, new))
163 				/* Got the lock */
164 				goto out;
165 			/* lock passing in progress */
166 			continue;
167 		}
168 		/* Make the node of this CPU the new tail. */
169 		new = node_id | (old & _Q_LOCK_MASK);
170 		if (arch_try_cmpxchg(&lp->lock, &old, new))
171 			break;
172 	}
173 	/* Set the 'next' pointer of the tail node in the queue */
174 	tail_id = old & _Q_TAIL_MASK;
175 	if (tail_id != 0) {
176 		node->prev = arch_spin_decode_tail(tail_id);
177 		WRITE_ONCE(node->prev->next, node);
178 	}
179 
180 	/* Pass the virtual CPU to the lock holder if it is not running */
181 	owner = arch_spin_yield_target(old, node);
182 	if (owner && arch_vcpu_is_preempted(owner - 1))
183 		smp_yield_cpu(owner - 1);
184 
185 	/* Spin on the CPU local node->prev pointer */
186 	if (tail_id != 0) {
187 		count = spin_retry;
188 		while (READ_ONCE(node->prev) != NULL) {
189 			if (count-- >= 0)
190 				continue;
191 			count = spin_retry;
192 			/* Query running state of lock holder again. */
193 			owner = arch_spin_yield_target(old, node);
194 			if (owner && arch_vcpu_is_preempted(owner - 1))
195 				smp_yield_cpu(owner - 1);
196 		}
197 	}
198 
199 	/* Spin on the lock value in the spinlock_t */
200 	count = spin_retry;
201 	while (1) {
202 		old = READ_ONCE(lp->lock);
203 		owner = old & _Q_LOCK_CPU_MASK;
204 		if (!owner) {
205 			tail_id = old & _Q_TAIL_MASK;
206 			new = ((tail_id != node_id) ? tail_id : 0) | lockval;
207 			if (arch_try_cmpxchg(&lp->lock, &old, new))
208 				/* Got the lock */
209 				break;
210 			continue;
211 		}
212 		if (count-- >= 0)
213 			continue;
214 		count = spin_retry;
215 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
216 			smp_yield_cpu(owner - 1);
217 	}
218 
219 	/* Pass lock_spin job to next CPU in the queue */
220 	if (node_id && tail_id != node_id) {
221 		/* Wait until the next CPU has set up the 'next' pointer */
222 		while ((next = READ_ONCE(node->next)) == NULL)
223 			;
224 		next->prev = NULL;
225 	}
226 
227  out:
228 	get_lowcore()->spinlock_index--;
229 }
230 
231 static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
232 {
233 	int lockval, old, new, owner, count;
234 
235 	lockval = SPINLOCK_LOCKVAL;	/* cpu + 1 */
236 
237 	/* Pass the virtual CPU to the lock holder if it is not running */
238 	owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
239 	if (owner && arch_vcpu_is_preempted(owner - 1))
240 		smp_yield_cpu(owner - 1);
241 
242 	count = spin_retry;
243 	while (1) {
244 		old = arch_load_niai4(&lp->lock);
245 		owner = old & _Q_LOCK_CPU_MASK;
246 		/* Try to get the lock if it is free. */
247 		if (!owner) {
248 			new = (old & _Q_TAIL_MASK) | lockval;
249 			if (arch_try_cmpxchg_niai8(&lp->lock, old, new)) {
250 				/* Got the lock */
251 				return;
252 			}
253 			continue;
254 		}
255 		if (count-- >= 0)
256 			continue;
257 		count = spin_retry;
258 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
259 			smp_yield_cpu(owner - 1);
260 	}
261 }
262 
263 void arch_spin_lock_wait(arch_spinlock_t *lp)
264 {
265 	if (test_cpu_flag(CIF_DEDICATED_CPU))
266 		arch_spin_lock_queued(lp);
267 	else
268 		arch_spin_lock_classic(lp);
269 }
270 EXPORT_SYMBOL(arch_spin_lock_wait);
271 
272 int arch_spin_trylock_retry(arch_spinlock_t *lp)
273 {
274 	int cpu = SPINLOCK_LOCKVAL;
275 	int owner, count;
276 
277 	for (count = spin_retry; count > 0; count--) {
278 		owner = READ_ONCE(lp->lock);
279 		/* Try to get the lock if it is free. */
280 		if (!owner) {
281 			if (arch_try_cmpxchg(&lp->lock, &owner, cpu))
282 				return 1;
283 		}
284 	}
285 	return 0;
286 }
287 EXPORT_SYMBOL(arch_spin_trylock_retry);
288 
289 void arch_read_lock_wait(arch_rwlock_t *rw)
290 {
291 	if (unlikely(in_interrupt())) {
292 		while (READ_ONCE(rw->cnts) & 0x10000)
293 			barrier();
294 		return;
295 	}
296 
297 	/* Remove this reader again to allow recursive read locking */
298 	__atomic_add_const(-1, &rw->cnts);
299 	/* Put the reader into the wait queue */
300 	arch_spin_lock(&rw->wait);
301 	/* Now add this reader to the count value again */
302 	__atomic_add_const(1, &rw->cnts);
303 	/* Loop until the writer is done */
304 	while (READ_ONCE(rw->cnts) & 0x10000)
305 		barrier();
306 	arch_spin_unlock(&rw->wait);
307 }
308 EXPORT_SYMBOL(arch_read_lock_wait);
309 
310 void arch_write_lock_wait(arch_rwlock_t *rw)
311 {
312 	int old;
313 
314 	/* Add this CPU to the write waiters */
315 	__atomic_add(0x20000, &rw->cnts);
316 
317 	/* Put the writer into the wait queue */
318 	arch_spin_lock(&rw->wait);
319 
320 	while (1) {
321 		old = READ_ONCE(rw->cnts);
322 		if ((old & 0x1ffff) == 0 &&
323 		    arch_try_cmpxchg(&rw->cnts, &old, old | 0x10000))
324 			/* Got the lock */
325 			break;
326 		barrier();
327 	}
328 
329 	arch_spin_unlock(&rw->wait);
330 }
331 EXPORT_SYMBOL(arch_write_lock_wait);
332 
333 void arch_spin_relax(arch_spinlock_t *lp)
334 {
335 	int cpu;
336 
337 	cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
338 	if (!cpu)
339 		return;
340 	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
341 		return;
342 	smp_yield_cpu(cpu - 1);
343 }
344 EXPORT_SYMBOL(arch_spin_relax);
345