xref: /freebsd/sys/powerpc/include/atomic.h (revision aa64588d28258aef88cc33b8043112e8856948d0)
1 /*-
2  * Copyright (c) 2008 Marcel Moolenaar
3  * Copyright (c) 2001 Benno Rice
4  * Copyright (c) 2001 David E. O'Brien
5  * Copyright (c) 1998 Doug Rabson
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #ifndef _MACHINE_ATOMIC_H_
33 #define	_MACHINE_ATOMIC_H_
34 
35 #ifndef _SYS_CDEFS_H_
36 #error this file needs sys/cdefs.h as a prerequisite
37 #endif
38 
39 #define	__ATOMIC_BARRIER					\
40     __asm __volatile("sync" : : : "memory")
41 
42 #define mb()	__ATOMIC_BARRIER
43 #define	wmb()	mb()
44 #define	rmb()	mb()
45 
46 /*
47  * atomic_add(p, v)
48  * { *p += v; }
49  */
50 
51 #define __ATOMIC_ADD_8(p, v, t)					\
52     8-bit atomic_add not implemented
53 
54 #define __ATOMIC_ADD_16(p, v, t)				\
55     16-bit atomic_add not implemented
56 
57 #define __ATOMIC_ADD_32(p, v, t)				\
58     __asm __volatile(						\
59 	"1:	lwarx	%0, 0, %2\n"				\
60 	"	add	%0, %3, %0\n"				\
61 	"	stwcx.	%0, 0, %2\n"				\
62 	"	bne-	1b\n"					\
63 	: "=&r" (t), "=m" (*p)					\
64 	: "r" (p), "r" (v), "m" (*p)				\
65 	: "cc", "memory")					\
66     /* __ATOMIC_ADD_32 */
67 
68 #define	__ATOMIC_ADD_64(p, v, t)				\
69     64-bit atomic_add not implemented
70 
71 #define	_ATOMIC_ADD(width, suffix, type)			\
72     static __inline void					\
73     atomic_add_##suffix(volatile type *p, type v) {		\
74 	type t;							\
75 	__ATOMIC_ADD_##width(p, v, t);				\
76     }								\
77 								\
78     static __inline void					\
79     atomic_add_acq_##suffix(volatile type *p, type v) {		\
80 	type t;							\
81 	__ATOMIC_ADD_##width(p, v, t);				\
82 	__ATOMIC_BARRIER;					\
83     }								\
84 								\
85     static __inline void					\
86     atomic_add_rel_##suffix(volatile type *p, type v) {		\
87 	type t;							\
88 	__ATOMIC_BARRIER;					\
89 	__ATOMIC_ADD_##width(p, v, t);				\
90     }								\
91     /* _ATOMIC_ADD */
92 
93 #if 0
94 _ATOMIC_ADD(8, 8, uint8_t)
95 _ATOMIC_ADD(8, char, u_char)
96 _ATOMIC_ADD(16, 16, uint16_t)
97 _ATOMIC_ADD(16, short, u_short)
98 #endif
99 _ATOMIC_ADD(32, 32, uint32_t)
100 _ATOMIC_ADD(32, int, u_int)
101 _ATOMIC_ADD(32, long, u_long)
102 _ATOMIC_ADD(32, ptr, uintptr_t)
103 #if 0
104 _ATOMIC_ADD(64, 64, uint64_t)
105 _ATOMIC_ADD(64, long_long, u_long_long)
106 #endif
107 
108 #undef _ATOMIC_ADD
109 #undef __ATOMIC_ADD_64
110 #undef __ATOMIC_ADD_32
111 #undef __ATOMIC_ADD_16
112 #undef __ATOMIC_ADD_8
113 
114 /*
115  * atomic_clear(p, v)
116  * { *p &= ~v; }
117  */
118 
119 #define __ATOMIC_CLEAR_8(p, v, t)				\
120     8-bit atomic_clear not implemented
121 
122 #define __ATOMIC_CLEAR_16(p, v, t)				\
123     16-bit atomic_clear not implemented
124 
125 #define __ATOMIC_CLEAR_32(p, v, t)				\
126     __asm __volatile(						\
127 	"1:	lwarx	%0, 0, %2\n"				\
128 	"	andc	%0, %0, %3\n"				\
129 	"	stwcx.	%0, 0, %2\n"				\
130 	"	bne-	1b\n"					\
131 	: "=&r" (t), "=m" (*p)					\
132 	: "r" (p), "r" (v), "m" (*p)				\
133 	: "cc", "memory")					\
134     /* __ATOMIC_CLEAR_32 */
135 
136 #define	__ATOMIC_CLEAR_64(p, v, t)				\
137     64-bit atomic_clear not implemented
138 
139 #define	_ATOMIC_CLEAR(width, suffix, type)			\
140     static __inline void					\
141     atomic_clear_##suffix(volatile type *p, type v) {		\
142 	type t;							\
143 	__ATOMIC_CLEAR_##width(p, v, t);			\
144     }								\
145 								\
146     static __inline void					\
147     atomic_clear_acq_##suffix(volatile type *p, type v) {	\
148 	type t;							\
149 	__ATOMIC_CLEAR_##width(p, v, t);			\
150 	__ATOMIC_BARRIER;					\
151     }								\
152 								\
153     static __inline void					\
154     atomic_clear_rel_##suffix(volatile type *p, type v) {	\
155 	type t;							\
156 	__ATOMIC_BARRIER;					\
157 	__ATOMIC_CLEAR_##width(p, v, t);			\
158     }								\
159     /* _ATOMIC_CLEAR */
160 
161 #if 0
162 _ATOMIC_CLEAR(8, 8, uint8_t)
163 _ATOMIC_CLEAR(8, char, u_char)
164 _ATOMIC_CLEAR(16, 16, uint16_t)
165 _ATOMIC_CLEAR(16, short, u_short)
166 #endif
167 _ATOMIC_CLEAR(32, 32, uint32_t)
168 _ATOMIC_CLEAR(32, int, u_int)
169 _ATOMIC_CLEAR(32, long, u_long)
170 _ATOMIC_CLEAR(32, ptr, uintptr_t)
171 #if 0
172 _ATOMIC_CLEAR(64, 64, uint64_t)
173 _ATOMIC_CLEAR(64, long_long, u_long_long)
174 #endif
175 
176 #undef _ATOMIC_CLEAR
177 #undef __ATOMIC_CLEAR_64
178 #undef __ATOMIC_CLEAR_32
179 #undef __ATOMIC_CLEAR_16
180 #undef __ATOMIC_CLEAR_8
181 
182 /*
183  * atomic_cmpset(p, o, n)
184  */
185 /* TODO -- see below */
186 
187 /*
188  * atomic_load_acq(p)
189  */
190 /* TODO -- see below */
191 
192 /*
193  * atomic_readandclear(p)
194  */
195 /* TODO -- see below */
196 
197 /*
198  * atomic_set(p, v)
199  * { *p |= v; }
200  */
201 
202 #define __ATOMIC_SET_8(p, v, t)					\
203     8-bit atomic_set not implemented
204 
205 #define __ATOMIC_SET_16(p, v, t)				\
206     16-bit atomic_set not implemented
207 
208 #define __ATOMIC_SET_32(p, v, t)				\
209     __asm __volatile(						\
210 	"1:	lwarx	%0, 0, %2\n"				\
211 	"	or	%0, %3, %0\n"				\
212 	"	stwcx.	%0, 0, %2\n"				\
213 	"	bne-	1b\n"					\
214 	: "=&r" (t), "=m" (*p)					\
215 	: "r" (p), "r" (v), "m" (*p)				\
216 	: "cc", "memory")					\
217     /* __ATOMIC_SET_32 */
218 
219 #define	__ATOMIC_SET_64(p, v, t)				\
220     64-bit atomic_set not implemented
221 
222 #define	_ATOMIC_SET(width, suffix, type)			\
223     static __inline void					\
224     atomic_set_##suffix(volatile type *p, type v) {		\
225 	type t;							\
226 	__ATOMIC_SET_##width(p, v, t);				\
227     }								\
228 								\
229     static __inline void					\
230     atomic_set_acq_##suffix(volatile type *p, type v) {		\
231 	type t;							\
232 	__ATOMIC_SET_##width(p, v, t);				\
233 	__ATOMIC_BARRIER;					\
234     }								\
235 								\
236     static __inline void					\
237     atomic_set_rel_##suffix(volatile type *p, type v) {		\
238 	type t;							\
239 	__ATOMIC_BARRIER;					\
240 	__ATOMIC_SET_##width(p, v, t);				\
241     }								\
242     /* _ATOMIC_SET */
243 
244 #if 0
245 _ATOMIC_SET(8, 8, uint8_t)
246 _ATOMIC_SET(8, char, u_char)
247 _ATOMIC_SET(16, 16, uint16_t)
248 _ATOMIC_SET(16, short, u_short)
249 #endif
250 _ATOMIC_SET(32, 32, uint32_t)
251 _ATOMIC_SET(32, int, u_int)
252 _ATOMIC_SET(32, long, u_long)
253 _ATOMIC_SET(32, ptr, uintptr_t)
254 #if 0
255 _ATOMIC_SET(64, 64, uint64_t)
256 _ATOMIC_SET(64, long_long, u_long_long)
257 #endif
258 
259 #undef _ATOMIC_SET
260 #undef __ATOMIC_SET_64
261 #undef __ATOMIC_SET_32
262 #undef __ATOMIC_SET_16
263 #undef __ATOMIC_SET_8
264 
265 /*
266  * atomic_subtract(p, v)
267  * { *p -= v; }
268  */
269 
270 #define __ATOMIC_SUBTRACT_8(p, v, t)				\
271     8-bit atomic_subtract not implemented
272 
273 #define __ATOMIC_SUBTRACT_16(p, v, t)				\
274     16-bit atomic_subtract not implemented
275 
276 #define __ATOMIC_SUBTRACT_32(p, v, t)				\
277     __asm __volatile(						\
278 	"1:	lwarx	%0, 0, %2\n"				\
279 	"	subf	%0, %3, %0\n"				\
280 	"	stwcx.	%0, 0, %2\n"				\
281 	"	bne-	1b\n"					\
282 	: "=&r" (t), "=m" (*p)					\
283 	: "r" (p), "r" (v), "m" (*p)				\
284 	: "cc", "memory")					\
285     /* __ATOMIC_SUBTRACT_32 */
286 
287 #define	__ATOMIC_SUBTRACT_64(p, v, t)				\
288     64-bit atomic_subtract not implemented
289 
290 #define	_ATOMIC_SUBTRACT(width, suffix, type)			\
291     static __inline void					\
292     atomic_subtract_##suffix(volatile type *p, type v) {	\
293 	type t;							\
294 	__ATOMIC_SUBTRACT_##width(p, v, t);			\
295     }								\
296 								\
297     static __inline void					\
298     atomic_subtract_acq_##suffix(volatile type *p, type v) {	\
299 	type t;							\
300 	__ATOMIC_SUBTRACT_##width(p, v, t);			\
301 	__ATOMIC_BARRIER;					\
302     }								\
303 								\
304     static __inline void					\
305     atomic_subtract_rel_##suffix(volatile type *p, type v) {	\
306 	type t;							\
307 	__ATOMIC_BARRIER;					\
308 	__ATOMIC_SUBTRACT_##width(p, v, t);			\
309     }								\
310     /* _ATOMIC_SUBTRACT */
311 
312 #if 0
313 _ATOMIC_SUBTRACT(8, 8, uint8_t)
314 _ATOMIC_SUBTRACT(8, char, u_char)
315 _ATOMIC_SUBTRACT(16, 16, uint16_t)
316 _ATOMIC_SUBTRACT(16, short, u_short)
317 #endif
318 _ATOMIC_SUBTRACT(32, 32, uint32_t)
319 _ATOMIC_SUBTRACT(32, int, u_int)
320 _ATOMIC_SUBTRACT(32, long, u_long)
321 _ATOMIC_SUBTRACT(32, ptr, uintptr_t)
322 #if 0
323 _ATOMIC_SUBTRACT(64, 64, uint64_t)
324 _ATOMIC_SUBTRACT(64, long_long, u_long_long)
325 #endif
326 
327 #undef _ATOMIC_SUBTRACT
328 #undef __ATOMIC_SUBTRACT_64
329 #undef __ATOMIC_SUBTRACT_32
330 #undef __ATOMIC_SUBTRACT_16
331 #undef __ATOMIC_SUBTRACT_8
332 
333 /*
334  * atomic_store_rel(p, v)
335  */
336 /* TODO -- see below */
337 
338 /*
339  * Old/original implementations that still need revisiting.
340  */
341 
342 static __inline uint32_t
343 atomic_readandclear_32(volatile uint32_t *addr)
344 {
345 	uint32_t result,temp;
346 
347 #ifdef __GNUCLIKE_ASM
348 	__asm __volatile (
349 		"\tsync\n"			/* drain writes */
350 		"1:\tlwarx %0, 0, %3\n\t"	/* load old value */
351 		"li %1, 0\n\t"			/* load new value */
352 		"stwcx. %1, 0, %3\n\t"      	/* attempt to store */
353 		"bne- 1b\n\t"			/* spin if failed */
354 		: "=&r"(result), "=&r"(temp), "=m" (*addr)
355 		: "r" (addr), "m" (*addr)
356 		: "cc", "memory");
357 #endif
358 
359 	return (result);
360 }
361 
362 #define	atomic_readandclear_int		atomic_readandclear_32
363 #define	atomic_readandclear_long	atomic_readandclear_32
364 #define	atomic_readandclear_ptr		atomic_readandclear_32
365 
366 /*
367  * We assume that a = b will do atomic loads and stores.
368  */
369 #define	ATOMIC_STORE_LOAD(TYPE, WIDTH)				\
370 static __inline u_##TYPE					\
371 atomic_load_acq_##WIDTH(volatile u_##TYPE *p)			\
372 {								\
373 	u_##TYPE v;						\
374 								\
375 	v = *p;							\
376 	__ATOMIC_BARRIER;					\
377 	return (v);						\
378 }								\
379 								\
380 static __inline void						\
381 atomic_store_rel_##WIDTH(volatile u_##TYPE *p, u_##TYPE v)	\
382 {								\
383 	__ATOMIC_BARRIER;					\
384 	*p = v;							\
385 }								\
386 								\
387 static __inline u_##TYPE					\
388 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
389 {								\
390 	u_##TYPE v;						\
391 								\
392 	v = *p;							\
393 	__ATOMIC_BARRIER;					\
394 	return (v);						\
395 }								\
396 								\
397 static __inline void						\
398 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
399 {								\
400 	__ATOMIC_BARRIER;					\
401 	*p = v;							\
402 }
403 
404 ATOMIC_STORE_LOAD(char,		8)
405 ATOMIC_STORE_LOAD(short,	16)
406 ATOMIC_STORE_LOAD(int,		32)
407 
408 #define	atomic_load_acq_long	atomic_load_acq_32
409 #define	atomic_store_rel_long	atomic_store_rel_32
410 #define	atomic_load_acq_ptr	atomic_load_acq_32
411 #define	atomic_store_rel_ptr	atomic_store_rel_32
412 
413 #undef ATOMIC_STORE_LOAD
414 
415 /*
416  * Atomically compare the value stored at *p with cmpval and if the
417  * two values are equal, update the value of *p with newval. Returns
418  * zero if the compare failed, nonzero otherwise.
419  */
420 static __inline uint32_t
421 atomic_cmpset_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
422 {
423 	uint32_t	ret;
424 
425 #ifdef __GNUCLIKE_ASM
426 	__asm __volatile (
427 		"1:\tlwarx %0, 0, %2\n\t"	/* load old value */
428 		"cmplw %3, %0\n\t"		/* compare */
429 		"bne 2f\n\t"			/* exit if not equal */
430 		"stwcx. %4, 0, %2\n\t"      	/* attempt to store */
431 		"bne- 1b\n\t"			/* spin if failed */
432 		"li %0, 1\n\t"			/* success - retval = 1 */
433 		"b 3f\n\t"			/* we've succeeded */
434 		"2:\n\t"
435 		"stwcx. %0, 0, %2\n\t"       	/* clear reservation (74xx) */
436 		"li %0, 0\n\t"			/* failure - retval = 0 */
437 		"3:\n\t"
438 		: "=&r" (ret), "=m" (*p)
439 		: "r" (p), "r" (cmpval), "r" (newval), "m" (*p)
440 		: "cc", "memory");
441 #endif
442 
443 	return (ret);
444 }
445 
446 static __inline u_long
447 atomic_cmpset_long(volatile u_long* p, u_long cmpval, u_long newval)
448 {
449 	uint32_t	ret;
450 
451 #ifdef __GNUCLIKE_ASM
452 	__asm __volatile (
453 		"1:\tlwarx %0, 0, %2\n\t"	/* load old value */
454 		"cmplw %3, %0\n\t"		/* compare */
455 		"bne 2f\n\t"			/* exit if not equal */
456 		"stwcx. %4, 0, %2\n\t"      	/* attempt to store */
457 		"bne- 1b\n\t"			/* spin if failed */
458 		"li %0, 1\n\t"			/* success - retval = 1 */
459 		"b 3f\n\t"			/* we've succeeded */
460 		"2:\n\t"
461 		"stwcx. %0, 0, %2\n\t"       	/* clear reservation (74xx) */
462 		"li %0, 0\n\t"			/* failure - retval = 0 */
463 		"3:\n\t"
464 		: "=&r" (ret), "=m" (*p)
465 		: "r" (p), "r" (cmpval), "r" (newval), "m" (*p)
466 		: "cc", "memory");
467 #endif
468 
469 	return (ret);
470 }
471 
472 #define	atomic_cmpset_int	atomic_cmpset_32
473 
474 #define	atomic_cmpset_ptr(dst, old, new)	\
475     atomic_cmpset_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
476 
477 static __inline uint32_t
478 atomic_cmpset_acq_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
479 {
480 	int retval;
481 
482 	retval = atomic_cmpset_32(p, cmpval, newval);
483 	__ATOMIC_BARRIER;
484 	return (retval);
485 }
486 
487 static __inline uint32_t
488 atomic_cmpset_rel_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
489 {
490 	__ATOMIC_BARRIER;
491 	return (atomic_cmpset_32(p, cmpval, newval));
492 }
493 
494 static __inline u_long
495 atomic_cmpset_acq_long(volatile u_long *p, u_long cmpval, u_long newval)
496 {
497 	int retval;
498 
499 	retval = atomic_cmpset_long(p, cmpval, newval);
500 	__ATOMIC_BARRIER;
501 	return (retval);
502 }
503 
504 static __inline uint32_t
505 atomic_cmpset_rel_long(volatile u_long *p, u_long cmpval, u_long newval)
506 {
507 	__ATOMIC_BARRIER;
508 	return (atomic_cmpset_long(p, cmpval, newval));
509 }
510 
511 #define	atomic_cmpset_acq_int	atomic_cmpset_acq_32
512 #define	atomic_cmpset_rel_int	atomic_cmpset_rel_32
513 
514 #define	atomic_cmpset_acq_ptr(dst, old, new)	\
515     atomic_cmpset_acq_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
516 #define	atomic_cmpset_rel_ptr(dst, old, new)	\
517     atomic_cmpset_rel_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
518 
519 static __inline uint32_t
520 atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
521 {
522 	uint32_t value;
523 
524 	do {
525 		value = *p;
526 	} while (!atomic_cmpset_32(p, value, value + v));
527 	return (value);
528 }
529 
530 #define	atomic_fetchadd_int	atomic_fetchadd_32
531 #define	atomic_fetchadd_long(p, v)	\
532     (u_long)atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
533 
534 #endif /* ! _MACHINE_ATOMIC_H_ */
535