xref: /linux/arch/s390/lib/spinlock.c (revision a3a4a816b4b194c45d0217e8b9e08b2639802cda)
1 /*
2  *    Out of line spinlock code.
3  *
4  *    Copyright IBM Corp. 2004, 2006
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  */
7 
8 #include <linux/types.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14 
15 int spin_retry = -1;
16 
17 static int __init spin_retry_init(void)
18 {
19 	if (spin_retry < 0)
20 		spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
21 	return 0;
22 }
23 early_initcall(spin_retry_init);
24 
25 /**
26  * spin_retry= parameter
27  */
28 static int __init spin_retry_setup(char *str)
29 {
30 	spin_retry = simple_strtoul(str, &str, 0);
31 	return 1;
32 }
33 __setup("spin_retry=", spin_retry_setup);
34 
35 static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
36 {
37 	asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
38 }
39 
40 void arch_spin_lock_wait(arch_spinlock_t *lp)
41 {
42 	unsigned int cpu = SPINLOCK_LOCKVAL;
43 	unsigned int owner;
44 	int count, first_diag;
45 
46 	first_diag = 1;
47 	while (1) {
48 		owner = ACCESS_ONCE(lp->lock);
49 		/* Try to get the lock if it is free. */
50 		if (!owner) {
51 			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
52 				return;
53 			continue;
54 		}
55 		/* First iteration: check if the lock owner is running. */
56 		if (first_diag && arch_vcpu_is_preempted(~owner)) {
57 			smp_yield_cpu(~owner);
58 			first_diag = 0;
59 			continue;
60 		}
61 		/* Loop for a while on the lock value. */
62 		count = spin_retry;
63 		do {
64 			if (MACHINE_HAS_CAD)
65 				_raw_compare_and_delay(&lp->lock, owner);
66 			owner = ACCESS_ONCE(lp->lock);
67 		} while (owner && count-- > 0);
68 		if (!owner)
69 			continue;
70 		/*
71 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
72 		 * yield the CPU unconditionally. For LPAR rely on the
73 		 * sense running status.
74 		 */
75 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
76 			smp_yield_cpu(~owner);
77 			first_diag = 0;
78 		}
79 	}
80 }
81 EXPORT_SYMBOL(arch_spin_lock_wait);
82 
83 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
84 {
85 	unsigned int cpu = SPINLOCK_LOCKVAL;
86 	unsigned int owner;
87 	int count, first_diag;
88 
89 	local_irq_restore(flags);
90 	first_diag = 1;
91 	while (1) {
92 		owner = ACCESS_ONCE(lp->lock);
93 		/* Try to get the lock if it is free. */
94 		if (!owner) {
95 			local_irq_disable();
96 			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
97 				return;
98 			local_irq_restore(flags);
99 			continue;
100 		}
101 		/* Check if the lock owner is running. */
102 		if (first_diag && arch_vcpu_is_preempted(~owner)) {
103 			smp_yield_cpu(~owner);
104 			first_diag = 0;
105 			continue;
106 		}
107 		/* Loop for a while on the lock value. */
108 		count = spin_retry;
109 		do {
110 			if (MACHINE_HAS_CAD)
111 				_raw_compare_and_delay(&lp->lock, owner);
112 			owner = ACCESS_ONCE(lp->lock);
113 		} while (owner && count-- > 0);
114 		if (!owner)
115 			continue;
116 		/*
117 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
118 		 * yield the CPU unconditionally. For LPAR rely on the
119 		 * sense running status.
120 		 */
121 		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
122 			smp_yield_cpu(~owner);
123 			first_diag = 0;
124 		}
125 	}
126 }
127 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
128 
129 int arch_spin_trylock_retry(arch_spinlock_t *lp)
130 {
131 	unsigned int cpu = SPINLOCK_LOCKVAL;
132 	unsigned int owner;
133 	int count;
134 
135 	for (count = spin_retry; count > 0; count--) {
136 		owner = READ_ONCE(lp->lock);
137 		/* Try to get the lock if it is free. */
138 		if (!owner) {
139 			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
140 				return 1;
141 		} else if (MACHINE_HAS_CAD)
142 			_raw_compare_and_delay(&lp->lock, owner);
143 	}
144 	return 0;
145 }
146 EXPORT_SYMBOL(arch_spin_trylock_retry);
147 
148 void _raw_read_lock_wait(arch_rwlock_t *rw)
149 {
150 	unsigned int owner, old;
151 	int count = spin_retry;
152 
153 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
154 	__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
155 #endif
156 	owner = 0;
157 	while (1) {
158 		if (count-- <= 0) {
159 			if (owner && arch_vcpu_is_preempted(~owner))
160 				smp_yield_cpu(~owner);
161 			count = spin_retry;
162 		}
163 		old = ACCESS_ONCE(rw->lock);
164 		owner = ACCESS_ONCE(rw->owner);
165 		if ((int) old < 0) {
166 			if (MACHINE_HAS_CAD)
167 				_raw_compare_and_delay(&rw->lock, old);
168 			continue;
169 		}
170 		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
171 			return;
172 	}
173 }
174 EXPORT_SYMBOL(_raw_read_lock_wait);
175 
176 int _raw_read_trylock_retry(arch_rwlock_t *rw)
177 {
178 	unsigned int old;
179 	int count = spin_retry;
180 
181 	while (count-- > 0) {
182 		old = ACCESS_ONCE(rw->lock);
183 		if ((int) old < 0) {
184 			if (MACHINE_HAS_CAD)
185 				_raw_compare_and_delay(&rw->lock, old);
186 			continue;
187 		}
188 		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
189 			return 1;
190 	}
191 	return 0;
192 }
193 EXPORT_SYMBOL(_raw_read_trylock_retry);
194 
195 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
196 
197 void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
198 {
199 	unsigned int owner, old;
200 	int count = spin_retry;
201 
202 	owner = 0;
203 	while (1) {
204 		if (count-- <= 0) {
205 			if (owner && arch_vcpu_is_preempted(~owner))
206 				smp_yield_cpu(~owner);
207 			count = spin_retry;
208 		}
209 		old = ACCESS_ONCE(rw->lock);
210 		owner = ACCESS_ONCE(rw->owner);
211 		smp_mb();
212 		if ((int) old >= 0) {
213 			prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
214 			old = prev;
215 		}
216 		if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
217 			break;
218 		if (MACHINE_HAS_CAD)
219 			_raw_compare_and_delay(&rw->lock, old);
220 	}
221 }
222 EXPORT_SYMBOL(_raw_write_lock_wait);
223 
224 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
225 
226 void _raw_write_lock_wait(arch_rwlock_t *rw)
227 {
228 	unsigned int owner, old, prev;
229 	int count = spin_retry;
230 
231 	prev = 0x80000000;
232 	owner = 0;
233 	while (1) {
234 		if (count-- <= 0) {
235 			if (owner && arch_vcpu_is_preempted(~owner))
236 				smp_yield_cpu(~owner);
237 			count = spin_retry;
238 		}
239 		old = ACCESS_ONCE(rw->lock);
240 		owner = ACCESS_ONCE(rw->owner);
241 		if ((int) old >= 0 &&
242 		    _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
243 			prev = old;
244 		else
245 			smp_mb();
246 		if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
247 			break;
248 		if (MACHINE_HAS_CAD)
249 			_raw_compare_and_delay(&rw->lock, old);
250 	}
251 }
252 EXPORT_SYMBOL(_raw_write_lock_wait);
253 
254 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
255 
256 int _raw_write_trylock_retry(arch_rwlock_t *rw)
257 {
258 	unsigned int old;
259 	int count = spin_retry;
260 
261 	while (count-- > 0) {
262 		old = ACCESS_ONCE(rw->lock);
263 		if (old) {
264 			if (MACHINE_HAS_CAD)
265 				_raw_compare_and_delay(&rw->lock, old);
266 			continue;
267 		}
268 		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
269 			return 1;
270 	}
271 	return 0;
272 }
273 EXPORT_SYMBOL(_raw_write_trylock_retry);
274 
275 void arch_lock_relax(unsigned int cpu)
276 {
277 	if (!cpu)
278 		return;
279 	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
280 		return;
281 	smp_yield_cpu(~cpu);
282 }
283 EXPORT_SYMBOL(arch_lock_relax);
284