xref: /freebsd/sys/powerpc/include/atomic.h (revision db612abe8df3355d1eb23bb3b50fdd97bc21e979)
1 /*-
2  * Copyright (c) 2008 Marcel Moolenaar
3  * Copyright (c) 2001 Benno Rice
4  * Copyright (c) 2001 David E. O'Brien
5  * Copyright (c) 1998 Doug Rabson
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #ifndef _MACHINE_ATOMIC_H_
33 #define	_MACHINE_ATOMIC_H_
34 
35 #ifndef _SYS_CDEFS_H_
36 #error this file needs sys/cdefs.h as a prerequisite
37 #endif
38 
39 #define	__ATOMIC_BARRIER					\
40     __asm __volatile("sync" : : : "memory")
41 
42 /*
43  * atomic_add(p, v)
44  * { *p += v; }
45  */
46 
47 #define __ATOMIC_ADD_8(p, v, t)					\
48     8-bit atomic_add not implemented
49 
50 #define __ATOMIC_ADD_16(p, v, t)				\
51     16-bit atomic_add not implemented
52 
53 #define __ATOMIC_ADD_32(p, v, t)				\
54     __asm __volatile(						\
55 	"1:	lwarx	%0, 0, %2\n"				\
56 	"	add	%0, %3, %0\n"				\
57 	"	stwcx.	%0, 0, %2\n"				\
58 	"	bne-	1b\n"					\
59 	: "=&r" (t), "=m" (*p)					\
60 	: "r" (p), "r" (v), "m" (*p)				\
61 	: "cc", "memory")					\
62     /* __ATOMIC_ADD_32 */
63 
64 #define	__ATOMIC_ADD_64(p, v, t)				\
65     64-bit atomic_add not implemented
66 
67 #define	_ATOMIC_ADD(width, suffix, type)			\
68     static __inline void					\
69     atomic_add_##suffix(volatile type *p, type v) {		\
70 	type t;							\
71 	__ATOMIC_ADD_##width(p, v, t);				\
72     }								\
73 								\
74     static __inline void					\
75     atomic_add_acq_##suffix(volatile type *p, type v) {		\
76 	type t;							\
77 	__ATOMIC_ADD_##width(p, v, t);				\
78 	__ATOMIC_BARRIER;					\
79     }								\
80 								\
81     static __inline void					\
82     atomic_add_rel_##suffix(volatile type *p, type v) {		\
83 	type t;							\
84 	__ATOMIC_BARRIER;					\
85 	__ATOMIC_ADD_##width(p, v, t);				\
86     }								\
87     /* _ATOMIC_ADD */
88 
89 #if 0
90 _ATOMIC_ADD(8, 8, uint8_t)
91 _ATOMIC_ADD(8, char, u_char)
92 _ATOMIC_ADD(16, 16, uint16_t)
93 _ATOMIC_ADD(16, short, u_short)
94 #endif
95 _ATOMIC_ADD(32, 32, uint32_t)
96 _ATOMIC_ADD(32, int, u_int)
97 _ATOMIC_ADD(32, long, u_long)
98 _ATOMIC_ADD(32, ptr, uintptr_t)
99 #if 0
100 _ATOMIC_ADD(64, 64, uint64_t)
101 _ATOMIC_ADD(64, long_long, u_long_long)
102 #endif
103 
104 #undef _ATOMIC_ADD
105 #undef __ATOMIC_ADD_64
106 #undef __ATOMIC_ADD_32
107 #undef __ATOMIC_ADD_16
108 #undef __ATOMIC_ADD_8
109 
110 /*
111  * atomic_clear(p, v)
112  * { *p &= ~v; }
113  */
114 
115 #define __ATOMIC_CLEAR_8(p, v, t)				\
116     8-bit atomic_clear not implemented
117 
118 #define __ATOMIC_CLEAR_16(p, v, t)				\
119     16-bit atomic_clear not implemented
120 
121 #define __ATOMIC_CLEAR_32(p, v, t)				\
122     __asm __volatile(						\
123 	"1:	lwarx	%0, 0, %2\n"				\
124 	"	andc	%0, %0, %3\n"				\
125 	"	stwcx.	%0, 0, %2\n"				\
126 	"	bne-	1b\n"					\
127 	: "=&r" (t), "=m" (*p)					\
128 	: "r" (p), "r" (v), "m" (*p)				\
129 	: "cc", "memory")					\
130     /* __ATOMIC_CLEAR_32 */
131 
132 #define	__ATOMIC_CLEAR_64(p, v, t)				\
133     64-bit atomic_clear not implemented
134 
135 #define	_ATOMIC_CLEAR(width, suffix, type)			\
136     static __inline void					\
137     atomic_clear_##suffix(volatile type *p, type v) {		\
138 	type t;							\
139 	__ATOMIC_CLEAR_##width(p, v, t);			\
140     }								\
141 								\
142     static __inline void					\
143     atomic_clear_acq_##suffix(volatile type *p, type v) {	\
144 	type t;							\
145 	__ATOMIC_CLEAR_##width(p, v, t);			\
146 	__ATOMIC_BARRIER;					\
147     }								\
148 								\
149     static __inline void					\
150     atomic_clear_rel_##suffix(volatile type *p, type v) {	\
151 	type t;							\
152 	__ATOMIC_BARRIER;					\
153 	__ATOMIC_CLEAR_##width(p, v, t);			\
154     }								\
155     /* _ATOMIC_CLEAR */
156 
157 #if 0
158 _ATOMIC_CLEAR(8, 8, uint8_t)
159 _ATOMIC_CLEAR(8, char, u_char)
160 _ATOMIC_CLEAR(16, 16, uint16_t)
161 _ATOMIC_CLEAR(16, short, u_short)
162 #endif
163 _ATOMIC_CLEAR(32, 32, uint32_t)
164 _ATOMIC_CLEAR(32, int, u_int)
165 _ATOMIC_CLEAR(32, long, u_long)
166 _ATOMIC_CLEAR(32, ptr, uintptr_t)
167 #if 0
168 _ATOMIC_CLEAR(64, 64, uint64_t)
169 _ATOMIC_CLEAR(64, long_long, u_long_long)
170 #endif
171 
172 #undef _ATOMIC_CLEAR
173 #undef __ATOMIC_CLEAR_64
174 #undef __ATOMIC_CLEAR_32
175 #undef __ATOMIC_CLEAR_16
176 #undef __ATOMIC_CLEAR_8
177 
178 /*
179  * atomic_cmpset(p, o, n)
180  */
181 /* TODO -- see below */
182 
183 /*
184  * atomic_load_acq(p)
185  */
186 /* TODO -- see below */
187 
188 /*
189  * atomic_readandclear(p)
190  */
191 /* TODO -- see below */
192 
193 /*
194  * atomic_set(p, v)
195  * { *p |= v; }
196  */
197 
198 #define __ATOMIC_SET_8(p, v, t)					\
199     8-bit atomic_set not implemented
200 
201 #define __ATOMIC_SET_16(p, v, t)				\
202     16-bit atomic_set not implemented
203 
204 #define __ATOMIC_SET_32(p, v, t)				\
205     __asm __volatile(						\
206 	"1:	lwarx	%0, 0, %2\n"				\
207 	"	or	%0, %3, %0\n"				\
208 	"	stwcx.	%0, 0, %2\n"				\
209 	"	bne-	1b\n"					\
210 	: "=&r" (t), "=m" (*p)					\
211 	: "r" (p), "r" (v), "m" (*p)				\
212 	: "cc", "memory")					\
213     /* __ATOMIC_SET_32 */
214 
215 #define	__ATOMIC_SET_64(p, v, t)				\
216     64-bit atomic_set not implemented
217 
218 #define	_ATOMIC_SET(width, suffix, type)			\
219     static __inline void					\
220     atomic_set_##suffix(volatile type *p, type v) {		\
221 	type t;							\
222 	__ATOMIC_SET_##width(p, v, t);				\
223     }								\
224 								\
225     static __inline void					\
226     atomic_set_acq_##suffix(volatile type *p, type v) {		\
227 	type t;							\
228 	__ATOMIC_SET_##width(p, v, t);				\
229 	__ATOMIC_BARRIER;					\
230     }								\
231 								\
232     static __inline void					\
233     atomic_set_rel_##suffix(volatile type *p, type v) {		\
234 	type t;							\
235 	__ATOMIC_BARRIER;					\
236 	__ATOMIC_SET_##width(p, v, t);				\
237     }								\
238     /* _ATOMIC_SET */
239 
240 #if 0
241 _ATOMIC_SET(8, 8, uint8_t)
242 _ATOMIC_SET(8, char, u_char)
243 _ATOMIC_SET(16, 16, uint16_t)
244 _ATOMIC_SET(16, short, u_short)
245 #endif
246 _ATOMIC_SET(32, 32, uint32_t)
247 _ATOMIC_SET(32, int, u_int)
248 _ATOMIC_SET(32, long, u_long)
249 _ATOMIC_SET(32, ptr, uintptr_t)
250 #if 0
251 _ATOMIC_SET(64, 64, uint64_t)
252 _ATOMIC_SET(64, long_long, u_long_long)
253 #endif
254 
255 #undef _ATOMIC_SET
256 #undef __ATOMIC_SET_64
257 #undef __ATOMIC_SET_32
258 #undef __ATOMIC_SET_16
259 #undef __ATOMIC_SET_8
260 
261 /*
262  * atomic_subtract(p, v)
263  * { *p -= v; }
264  */
265 
266 #define __ATOMIC_SUBTRACT_8(p, v, t)				\
267     8-bit atomic_subtract not implemented
268 
269 #define __ATOMIC_SUBTRACT_16(p, v, t)				\
270     16-bit atomic_subtract not implemented
271 
272 #define __ATOMIC_SUBTRACT_32(p, v, t)				\
273     __asm __volatile(						\
274 	"1:	lwarx	%0, 0, %2\n"				\
275 	"	subf	%0, %3, %0\n"				\
276 	"	stwcx.	%0, 0, %2\n"				\
277 	"	bne-	1b\n"					\
278 	: "=&r" (t), "=m" (*p)					\
279 	: "r" (p), "r" (v), "m" (*p)				\
280 	: "cc", "memory")					\
281     /* __ATOMIC_SUBTRACT_32 */
282 
283 #define	__ATOMIC_SUBTRACT_64(p, v, t)				\
284     64-bit atomic_subtract not implemented
285 
286 #define	_ATOMIC_SUBTRACT(width, suffix, type)			\
287     static __inline void					\
288     atomic_subtract_##suffix(volatile type *p, type v) {	\
289 	type t;							\
290 	__ATOMIC_SUBTRACT_##width(p, v, t);			\
291     }								\
292 								\
293     static __inline void					\
294     atomic_subtract_acq_##suffix(volatile type *p, type v) {	\
295 	type t;							\
296 	__ATOMIC_SUBTRACT_##width(p, v, t);			\
297 	__ATOMIC_BARRIER;					\
298     }								\
299 								\
300     static __inline void					\
301     atomic_subtract_rel_##suffix(volatile type *p, type v) {	\
302 	type t;							\
303 	__ATOMIC_BARRIER;					\
304 	__ATOMIC_SUBTRACT_##width(p, v, t);			\
305     }								\
306     /* _ATOMIC_SUBTRACT */
307 
308 #if 0
309 _ATOMIC_SUBTRACT(8, 8, uint8_t)
310 _ATOMIC_SUBTRACT(8, char, u_char)
311 _ATOMIC_SUBTRACT(16, 16, uint16_t)
312 _ATOMIC_SUBTRACT(16, short, u_short)
313 #endif
314 _ATOMIC_SUBTRACT(32, 32, uint32_t)
315 _ATOMIC_SUBTRACT(32, int, u_int)
316 _ATOMIC_SUBTRACT(32, long, u_long)
317 _ATOMIC_SUBTRACT(32, ptr, uintptr_t)
318 #if 0
319 _ATOMIC_SUBTRACT(64, 64, uint64_t)
320 _ATOMIC_SUBTRACT(64, long_long, u_long_long)
321 #endif
322 
323 #undef _ATOMIC_SUBTRACT
324 #undef __ATOMIC_SUBTRACT_64
325 #undef __ATOMIC_SUBTRACT_32
326 #undef __ATOMIC_SUBTRACT_16
327 #undef __ATOMIC_SUBTRACT_8
328 
329 /*
330  * atomic_store_rel(p, v)
331  */
332 /* TODO -- see below */
333 
334 /*
335  * Old/original implementations that still need revisiting.
336  */
337 
338 static __inline uint32_t
339 atomic_readandclear_32(volatile uint32_t *addr)
340 {
341 	uint32_t result,temp;
342 
343 #ifdef __GNUCLIKE_ASM
344 	__asm __volatile (
345 		"\tsync\n"			/* drain writes */
346 		"1:\tlwarx %0, 0, %3\n\t"	/* load old value */
347 		"li %1, 0\n\t"			/* load new value */
348 		"stwcx. %1, 0, %3\n\t"      	/* attempt to store */
349 		"bne- 1b\n\t"			/* spin if failed */
350 		: "=&r"(result), "=&r"(temp), "=m" (*addr)
351 		: "r" (addr), "m" (*addr)
352 		: "cc", "memory");
353 #endif
354 
355 	return (result);
356 }
357 
358 #define	atomic_readandclear_int		atomic_readandclear_32
359 #define	atomic_readandclear_long	atomic_readandclear_32
360 #define	atomic_readandclear_ptr		atomic_readandclear_32
361 
362 /*
363  * We assume that a = b will do atomic loads and stores.
364  */
365 #define	ATOMIC_STORE_LOAD(TYPE, WIDTH)				\
366 static __inline u_##TYPE					\
367 atomic_load_acq_##WIDTH(volatile u_##TYPE *p)			\
368 {								\
369 	u_##TYPE v;						\
370 								\
371 	v = *p;							\
372 	__ATOMIC_BARRIER;					\
373 	return (v);						\
374 }								\
375 								\
376 static __inline void						\
377 atomic_store_rel_##WIDTH(volatile u_##TYPE *p, u_##TYPE v)	\
378 {								\
379 	__ATOMIC_BARRIER;					\
380 	*p = v;							\
381 }								\
382 								\
383 static __inline u_##TYPE					\
384 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
385 {								\
386 	u_##TYPE v;						\
387 								\
388 	v = *p;							\
389 	__ATOMIC_BARRIER;					\
390 	return (v);						\
391 }								\
392 								\
393 static __inline void						\
394 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
395 {								\
396 	__ATOMIC_BARRIER;					\
397 	*p = v;							\
398 }
399 
400 ATOMIC_STORE_LOAD(char,		8)
401 ATOMIC_STORE_LOAD(short,	16)
402 ATOMIC_STORE_LOAD(int,		32)
403 
404 #define	atomic_load_acq_long	atomic_load_acq_32
405 #define	atomic_store_rel_long	atomic_store_rel_32
406 #define	atomic_load_acq_ptr	atomic_load_acq_32
407 #define	atomic_store_rel_ptr	atomic_store_rel_32
408 
409 #undef ATOMIC_STORE_LOAD
410 
411 /*
412  * Atomically compare the value stored at *p with cmpval and if the
413  * two values are equal, update the value of *p with newval. Returns
414  * zero if the compare failed, nonzero otherwise.
415  */
416 static __inline uint32_t
417 atomic_cmpset_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
418 {
419 	uint32_t	ret;
420 
421 #ifdef __GNUCLIKE_ASM
422 	__asm __volatile (
423 		"1:\tlwarx %0, 0, %2\n\t"	/* load old value */
424 		"cmplw %3, %0\n\t"		/* compare */
425 		"bne 2f\n\t"			/* exit if not equal */
426 		"stwcx. %4, 0, %2\n\t"      	/* attempt to store */
427 		"bne- 1b\n\t"			/* spin if failed */
428 		"li %0, 1\n\t"			/* success - retval = 1 */
429 		"b 3f\n\t"			/* we've succeeded */
430 		"2:\n\t"
431 		"stwcx. %0, 0, %2\n\t"       	/* clear reservation (74xx) */
432 		"li %0, 0\n\t"			/* failure - retval = 0 */
433 		"3:\n\t"
434 		: "=&r" (ret), "=m" (*p)
435 		: "r" (p), "r" (cmpval), "r" (newval), "m" (*p)
436 		: "cc", "memory");
437 #endif
438 
439 	return (ret);
440 }
441 
442 static __inline u_long
443 atomic_cmpset_long(volatile u_long* p, u_long cmpval, u_long newval)
444 {
445 	uint32_t	ret;
446 
447 #ifdef __GNUCLIKE_ASM
448 	__asm __volatile (
449 		"1:\tlwarx %0, 0, %2\n\t"	/* load old value */
450 		"cmplw %3, %0\n\t"		/* compare */
451 		"bne 2f\n\t"			/* exit if not equal */
452 		"stwcx. %4, 0, %2\n\t"      	/* attempt to store */
453 		"bne- 1b\n\t"			/* spin if failed */
454 		"li %0, 1\n\t"			/* success - retval = 1 */
455 		"b 3f\n\t"			/* we've succeeded */
456 		"2:\n\t"
457 		"stwcx. %0, 0, %2\n\t"       	/* clear reservation (74xx) */
458 		"li %0, 0\n\t"			/* failure - retval = 0 */
459 		"3:\n\t"
460 		: "=&r" (ret), "=m" (*p)
461 		: "r" (p), "r" (cmpval), "r" (newval), "m" (*p)
462 		: "cc", "memory");
463 #endif
464 
465 	return (ret);
466 }
467 
468 #define	atomic_cmpset_int	atomic_cmpset_32
469 
470 #define	atomic_cmpset_ptr(dst, old, new)	\
471     atomic_cmpset_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
472 
473 static __inline uint32_t
474 atomic_cmpset_acq_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
475 {
476 	int retval;
477 
478 	retval = atomic_cmpset_32(p, cmpval, newval);
479 	__ATOMIC_BARRIER;
480 	return (retval);
481 }
482 
483 static __inline uint32_t
484 atomic_cmpset_rel_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
485 {
486 	__ATOMIC_BARRIER;
487 	return (atomic_cmpset_32(p, cmpval, newval));
488 }
489 
490 static __inline u_long
491 atomic_cmpset_acq_long(volatile u_long *p, u_long cmpval, u_long newval)
492 {
493 	int retval;
494 
495 	retval = atomic_cmpset_long(p, cmpval, newval);
496 	__ATOMIC_BARRIER;
497 	return (retval);
498 }
499 
500 static __inline uint32_t
501 atomic_cmpset_rel_long(volatile u_long *p, u_long cmpval, u_long newval)
502 {
503 	__ATOMIC_BARRIER;
504 	return (atomic_cmpset_long(p, cmpval, newval));
505 }
506 
507 #define	atomic_cmpset_acq_int	atomic_cmpset_acq_32
508 #define	atomic_cmpset_rel_int	atomic_cmpset_rel_32
509 
510 #define	atomic_cmpset_acq_ptr(dst, old, new)	\
511     atomic_cmpset_acq_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
512 #define	atomic_cmpset_rel_ptr(dst, old, new)	\
513     atomic_cmpset_rel_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
514 
515 static __inline uint32_t
516 atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
517 {
518 	uint32_t value;
519 
520 	do {
521 		value = *p;
522 	} while (!atomic_cmpset_32(p, value, value + v));
523 	return (value);
524 }
525 
526 #define	atomic_fetchadd_int	atomic_fetchadd_32
527 #define	atomic_fetchadd_long(p, v)	\
528     (u_long)atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
529 
530 #undef __ATOMIC_BARRIER
531 
532 #endif /* ! _MACHINE_ATOMIC_H_ */
533