xref: /freebsd/sys/amd64/include/atomic.h (revision 3416500aef140042c64bc149cb1ec6620483bc44)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define	_MACHINE_ATOMIC_H_
30 
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34 
35 /*
36  * To express interprocessor (as opposed to processor and device) memory
37  * ordering constraints, use the atomic_*() functions with acquire and release
38  * semantics rather than the *mb() functions.  An architecture's memory
39  * ordering (or memory consistency) model governs the order in which a
40  * program's accesses to different locations may be performed by an
41  * implementation of that architecture.  In general, for memory regions
42  * defined as writeback cacheable, the memory ordering implemented by amd64
43  * processors preserves the program ordering of a load followed by a load, a
44  * load followed by a store, and a store followed by a store.  Only a store
45  * followed by a load to a different memory location may be reordered.
46  * Therefore, except for special cases, like non-temporal memory accesses or
47  * memory regions defined as write combining, the memory ordering effects
48  * provided by the sfence instruction in the wmb() function and the lfence
49  * instruction in the rmb() function are redundant.  In contrast, the
50  * atomic_*() functions with acquire and release semantics do not perform
51  * redundant instructions for ordinary cases of interprocessor memory
52  * ordering on any architecture.
53  */
54 #define	mb()	__asm __volatile("mfence;" : : : "memory")
55 #define	wmb()	__asm __volatile("sfence;" : : : "memory")
56 #define	rmb()	__asm __volatile("lfence;" : : : "memory")
57 
58 /*
59  * Various simple operations on memory, each of which is atomic in the
60  * presence of interrupts and multiple processors.
61  *
62  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
63  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
64  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
65  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
66  *
67  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
68  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
69  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
70  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
71  *
72  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
73  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
74  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
75  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
76  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
77  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
78  *
79  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
80  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
81  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
82  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
83  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
84  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
85  */
86 
87 /*
88  * The above functions are expanded inline in the statically-linked
89  * kernel.  Lock prefixes are generated if an SMP kernel is being
90  * built.
91  *
92  * Kernel modules call real functions which are built into the kernel.
93  * This allows kernel modules to be portable between UP and SMP systems.
94  */
95 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
96 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
97 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
98 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
99 
100 int	atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
101 int	atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
102 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
103 int	atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
104 int	atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
105 int	atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
106 	    u_short src);
107 int	atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
108 int	atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src);
109 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
110 u_long	atomic_fetchadd_long(volatile u_long *p, u_long v);
111 int	atomic_testandset_int(volatile u_int *p, u_int v);
112 int	atomic_testandset_long(volatile u_long *p, u_int v);
113 int	atomic_testandclear_int(volatile u_int *p, u_int v);
114 int	atomic_testandclear_long(volatile u_long *p, u_int v);
115 void	atomic_thread_fence_acq(void);
116 void	atomic_thread_fence_acq_rel(void);
117 void	atomic_thread_fence_rel(void);
118 void	atomic_thread_fence_seq_cst(void);
119 
120 #define	ATOMIC_LOAD(TYPE)					\
121 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
122 #define	ATOMIC_STORE(TYPE)					\
123 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
124 
125 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
126 
127 /*
128  * For userland, always use lock prefixes so that the binaries will run
129  * on both SMP and !SMP systems.
130  */
131 #if defined(SMP) || !defined(_KERNEL)
132 #define	MPLOCKED	"lock ; "
133 #else
134 #define	MPLOCKED
135 #endif
136 
137 /*
138  * The assembly is volatilized to avoid code chunk removal by the compiler.
139  * GCC aggressively reorders operations and memory clobbering is necessary
140  * in order to avoid that for memory barriers.
141  */
142 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
143 static __inline void					\
144 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
145 {							\
146 	__asm __volatile(MPLOCKED OP			\
147 	: "+m" (*p)					\
148 	: CONS (V)					\
149 	: "cc");					\
150 }							\
151 							\
152 static __inline void					\
153 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
154 {							\
155 	__asm __volatile(MPLOCKED OP			\
156 	: "+m" (*p)					\
157 	: CONS (V)					\
158 	: "memory", "cc");				\
159 }							\
160 struct __hack
161 
162 /*
163  * Atomic compare and set, used by the mutex functions.
164  *
165  * cmpset:
166  *	if (*dst == expect)
167  *		*dst = src
168  *
169  * fcmpset:
170  *	if (*dst == *expect)
171  *		*dst = src
172  *	else
173  *		*expect = *dst
174  *
175  * Returns 0 on failure, non-zero on success.
176  */
177 #define	ATOMIC_CMPSET(TYPE)				\
178 static __inline int					\
179 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
180 {							\
181 	u_char res;					\
182 							\
183 	__asm __volatile(				\
184 	"	" MPLOCKED "		"		\
185 	"	cmpxchg %3,%1 ;	"			\
186 	"	sete	%0 ;		"		\
187 	"# atomic_cmpset_" #TYPE "	"		\
188 	: "=q" (res),			/* 0 */		\
189 	  "+m" (*dst),			/* 1 */		\
190 	  "+a" (expect)			/* 2 */		\
191 	: "r" (src)			/* 3 */		\
192 	: "memory", "cc");				\
193 	return (res);					\
194 }							\
195 							\
196 static __inline int					\
197 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
198 {							\
199 	u_char res;					\
200 							\
201 	__asm __volatile(				\
202 	"	" MPLOCKED "		"		\
203 	"	cmpxchg %3,%1 ;		"		\
204 	"	sete	%0 ;		"		\
205 	"# atomic_fcmpset_" #TYPE "	"		\
206 	: "=q" (res),			/* 0 */		\
207 	  "+m" (*dst),			/* 1 */		\
208 	  "+a" (*expect)		/* 2 */		\
209 	: "r" (src)			/* 3 */		\
210 	: "memory", "cc");				\
211 	return (res);					\
212 }
213 
214 ATOMIC_CMPSET(char);
215 ATOMIC_CMPSET(short);
216 ATOMIC_CMPSET(int);
217 ATOMIC_CMPSET(long);
218 
219 /*
220  * Atomically add the value of v to the integer pointed to by p and return
221  * the previous value of *p.
222  */
223 static __inline u_int
224 atomic_fetchadd_int(volatile u_int *p, u_int v)
225 {
226 
227 	__asm __volatile(
228 	"	" MPLOCKED "		"
229 	"	xaddl	%0,%1 ;		"
230 	"# atomic_fetchadd_int"
231 	: "+r" (v),			/* 0 */
232 	  "+m" (*p)			/* 1 */
233 	: : "cc");
234 	return (v);
235 }
236 
237 /*
238  * Atomically add the value of v to the long integer pointed to by p and return
239  * the previous value of *p.
240  */
241 static __inline u_long
242 atomic_fetchadd_long(volatile u_long *p, u_long v)
243 {
244 
245 	__asm __volatile(
246 	"	" MPLOCKED "		"
247 	"	xaddq	%0,%1 ;		"
248 	"# atomic_fetchadd_long"
249 	: "+r" (v),			/* 0 */
250 	  "+m" (*p)			/* 1 */
251 	: : "cc");
252 	return (v);
253 }
254 
255 static __inline int
256 atomic_testandset_int(volatile u_int *p, u_int v)
257 {
258 	u_char res;
259 
260 	__asm __volatile(
261 	"	" MPLOCKED "		"
262 	"	btsl	%2,%1 ;		"
263 	"	setc	%0 ;		"
264 	"# atomic_testandset_int"
265 	: "=q" (res),			/* 0 */
266 	  "+m" (*p)			/* 1 */
267 	: "Ir" (v & 0x1f)		/* 2 */
268 	: "cc");
269 	return (res);
270 }
271 
272 static __inline int
273 atomic_testandset_long(volatile u_long *p, u_int v)
274 {
275 	u_char res;
276 
277 	__asm __volatile(
278 	"	" MPLOCKED "		"
279 	"	btsq	%2,%1 ;		"
280 	"	setc	%0 ;		"
281 	"# atomic_testandset_long"
282 	: "=q" (res),			/* 0 */
283 	  "+m" (*p)			/* 1 */
284 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
285 	: "cc");
286 	return (res);
287 }
288 
289 static __inline int
290 atomic_testandclear_int(volatile u_int *p, u_int v)
291 {
292 	u_char res;
293 
294 	__asm __volatile(
295 	"	" MPLOCKED "		"
296 	"	btrl	%2,%1 ;		"
297 	"	setc	%0 ;		"
298 	"# atomic_testandclear_int"
299 	: "=q" (res),			/* 0 */
300 	  "+m" (*p)			/* 1 */
301 	: "Ir" (v & 0x1f)		/* 2 */
302 	: "cc");
303 	return (res);
304 }
305 
306 static __inline int
307 atomic_testandclear_long(volatile u_long *p, u_int v)
308 {
309 	u_char res;
310 
311 	__asm __volatile(
312 	"	" MPLOCKED "		"
313 	"	btrq	%2,%1 ;		"
314 	"	setc	%0 ;		"
315 	"# atomic_testandclear_long"
316 	: "=q" (res),			/* 0 */
317 	  "+m" (*p)			/* 1 */
318 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
319 	: "cc");
320 	return (res);
321 }
322 
323 /*
324  * We assume that a = b will do atomic loads and stores.  Due to the
325  * IA32 memory model, a simple store guarantees release semantics.
326  *
327  * However, a load may pass a store if they are performed on distinct
328  * addresses, so we need a Store/Load barrier for sequentially
329  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
330  * Store/Load barrier, as recommended by the AMD Software Optimization
331  * Guide, and not mfence.  To avoid false data dependencies, we use a
332  * special address for "mem".  In the kernel, we use a private per-cpu
333  * cache line.  In user space, we use a word in the stack's red zone
334  * (-8(%rsp)).
335  *
336  * For UP kernels, however, the memory of the single processor is
337  * always consistent, so we only need to stop the compiler from
338  * reordering accesses in a way that violates the semantics of acquire
339  * and release.
340  */
341 
342 #if defined(_KERNEL)
343 
344 /*
345  * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
346  *
347  * The open-coded number is used instead of the symbolic expression to
348  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
349  * An assertion in amd64/vm_machdep.c ensures that the value is correct.
350  */
351 #define	OFFSETOF_MONITORBUF	0x100
352 
353 #if defined(SMP)
354 static __inline void
355 __storeload_barrier(void)
356 {
357 
358 	__asm __volatile("lock; addl $0,%%gs:%0"
359 	    : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
360 }
361 #else /* _KERNEL && UP */
362 static __inline void
363 __storeload_barrier(void)
364 {
365 
366 	__compiler_membar();
367 }
368 #endif /* SMP */
369 #else /* !_KERNEL */
370 static __inline void
371 __storeload_barrier(void)
372 {
373 
374 	__asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
375 }
376 #endif /* _KERNEL*/
377 
378 #define	ATOMIC_LOAD(TYPE)					\
379 static __inline u_##TYPE					\
380 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
381 {								\
382 	u_##TYPE res;						\
383 								\
384 	res = *p;						\
385 	__compiler_membar();					\
386 	return (res);						\
387 }								\
388 struct __hack
389 
390 #define	ATOMIC_STORE(TYPE)					\
391 static __inline void						\
392 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
393 {								\
394 								\
395 	__compiler_membar();					\
396 	*p = v;							\
397 }								\
398 struct __hack
399 
400 static __inline void
401 atomic_thread_fence_acq(void)
402 {
403 
404 	__compiler_membar();
405 }
406 
407 static __inline void
408 atomic_thread_fence_rel(void)
409 {
410 
411 	__compiler_membar();
412 }
413 
414 static __inline void
415 atomic_thread_fence_acq_rel(void)
416 {
417 
418 	__compiler_membar();
419 }
420 
421 static __inline void
422 atomic_thread_fence_seq_cst(void)
423 {
424 
425 	__storeload_barrier();
426 }
427 
428 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
429 
430 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
431 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
432 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
433 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
434 
435 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
436 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
437 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
438 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
439 
440 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
441 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
442 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
443 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
444 
445 ATOMIC_ASM(set,	     long,  "orq %1,%0",   "ir",  v);
446 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "ir", ~v);
447 ATOMIC_ASM(add,	     long,  "addq %1,%0",  "ir",  v);
448 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "ir",  v);
449 
450 #define	ATOMIC_LOADSTORE(TYPE)					\
451 	ATOMIC_LOAD(TYPE);					\
452 	ATOMIC_STORE(TYPE)
453 
454 ATOMIC_LOADSTORE(char);
455 ATOMIC_LOADSTORE(short);
456 ATOMIC_LOADSTORE(int);
457 ATOMIC_LOADSTORE(long);
458 
459 #undef ATOMIC_ASM
460 #undef ATOMIC_LOAD
461 #undef ATOMIC_STORE
462 #undef ATOMIC_LOADSTORE
463 #ifndef WANT_FUNCTIONS
464 
465 /* Read the current value and store a new value in the destination. */
466 #ifdef __GNUCLIKE_ASM
467 
468 static __inline u_int
469 atomic_swap_int(volatile u_int *p, u_int v)
470 {
471 
472 	__asm __volatile(
473 	"	xchgl	%1,%0 ;		"
474 	"# atomic_swap_int"
475 	: "+r" (v),			/* 0 */
476 	  "+m" (*p));			/* 1 */
477 	return (v);
478 }
479 
480 static __inline u_long
481 atomic_swap_long(volatile u_long *p, u_long v)
482 {
483 
484 	__asm __volatile(
485 	"	xchgq	%1,%0 ;		"
486 	"# atomic_swap_long"
487 	: "+r" (v),			/* 0 */
488 	  "+m" (*p));			/* 1 */
489 	return (v);
490 }
491 
492 #else /* !__GNUCLIKE_ASM */
493 
494 u_int	atomic_swap_int(volatile u_int *p, u_int v);
495 u_long	atomic_swap_long(volatile u_long *p, u_long v);
496 
497 #endif /* __GNUCLIKE_ASM */
498 
499 #define	atomic_set_acq_char		atomic_set_barr_char
500 #define	atomic_set_rel_char		atomic_set_barr_char
501 #define	atomic_clear_acq_char		atomic_clear_barr_char
502 #define	atomic_clear_rel_char		atomic_clear_barr_char
503 #define	atomic_add_acq_char		atomic_add_barr_char
504 #define	atomic_add_rel_char		atomic_add_barr_char
505 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
506 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
507 #define	atomic_cmpset_acq_char		atomic_cmpset_char
508 #define	atomic_cmpset_rel_char		atomic_cmpset_char
509 #define	atomic_fcmpset_acq_char		atomic_fcmpset_char
510 #define	atomic_fcmpset_rel_char		atomic_fcmpset_char
511 
512 #define	atomic_set_acq_short		atomic_set_barr_short
513 #define	atomic_set_rel_short		atomic_set_barr_short
514 #define	atomic_clear_acq_short		atomic_clear_barr_short
515 #define	atomic_clear_rel_short		atomic_clear_barr_short
516 #define	atomic_add_acq_short		atomic_add_barr_short
517 #define	atomic_add_rel_short		atomic_add_barr_short
518 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
519 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
520 #define	atomic_cmpset_acq_short		atomic_cmpset_short
521 #define	atomic_cmpset_rel_short		atomic_cmpset_short
522 #define	atomic_fcmpset_acq_short	atomic_fcmpset_short
523 #define	atomic_fcmpset_rel_short	atomic_fcmpset_short
524 
525 #define	atomic_set_acq_int		atomic_set_barr_int
526 #define	atomic_set_rel_int		atomic_set_barr_int
527 #define	atomic_clear_acq_int		atomic_clear_barr_int
528 #define	atomic_clear_rel_int		atomic_clear_barr_int
529 #define	atomic_add_acq_int		atomic_add_barr_int
530 #define	atomic_add_rel_int		atomic_add_barr_int
531 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
532 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
533 #define	atomic_cmpset_acq_int		atomic_cmpset_int
534 #define	atomic_cmpset_rel_int		atomic_cmpset_int
535 #define	atomic_fcmpset_acq_int		atomic_fcmpset_int
536 #define	atomic_fcmpset_rel_int		atomic_fcmpset_int
537 
538 #define	atomic_set_acq_long		atomic_set_barr_long
539 #define	atomic_set_rel_long		atomic_set_barr_long
540 #define	atomic_clear_acq_long		atomic_clear_barr_long
541 #define	atomic_clear_rel_long		atomic_clear_barr_long
542 #define	atomic_add_acq_long		atomic_add_barr_long
543 #define	atomic_add_rel_long		atomic_add_barr_long
544 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
545 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
546 #define	atomic_cmpset_acq_long		atomic_cmpset_long
547 #define	atomic_cmpset_rel_long		atomic_cmpset_long
548 #define	atomic_fcmpset_acq_long		atomic_fcmpset_long
549 #define	atomic_fcmpset_rel_long		atomic_fcmpset_long
550 
551 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
552 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
553 
554 /* Operations on 8-bit bytes. */
555 #define	atomic_set_8		atomic_set_char
556 #define	atomic_set_acq_8	atomic_set_acq_char
557 #define	atomic_set_rel_8	atomic_set_rel_char
558 #define	atomic_clear_8		atomic_clear_char
559 #define	atomic_clear_acq_8	atomic_clear_acq_char
560 #define	atomic_clear_rel_8	atomic_clear_rel_char
561 #define	atomic_add_8		atomic_add_char
562 #define	atomic_add_acq_8	atomic_add_acq_char
563 #define	atomic_add_rel_8	atomic_add_rel_char
564 #define	atomic_subtract_8	atomic_subtract_char
565 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
566 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
567 #define	atomic_load_acq_8	atomic_load_acq_char
568 #define	atomic_store_rel_8	atomic_store_rel_char
569 #define	atomic_cmpset_8		atomic_cmpset_char
570 #define	atomic_cmpset_acq_8	atomic_cmpset_acq_char
571 #define	atomic_cmpset_rel_8	atomic_cmpset_rel_char
572 #define	atomic_fcmpset_8	atomic_fcmpset_char
573 #define	atomic_fcmpset_acq_8	atomic_fcmpset_acq_char
574 #define	atomic_fcmpset_rel_8	atomic_fcmpset_rel_char
575 
576 /* Operations on 16-bit words. */
577 #define	atomic_set_16		atomic_set_short
578 #define	atomic_set_acq_16	atomic_set_acq_short
579 #define	atomic_set_rel_16	atomic_set_rel_short
580 #define	atomic_clear_16		atomic_clear_short
581 #define	atomic_clear_acq_16	atomic_clear_acq_short
582 #define	atomic_clear_rel_16	atomic_clear_rel_short
583 #define	atomic_add_16		atomic_add_short
584 #define	atomic_add_acq_16	atomic_add_acq_short
585 #define	atomic_add_rel_16	atomic_add_rel_short
586 #define	atomic_subtract_16	atomic_subtract_short
587 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
588 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
589 #define	atomic_load_acq_16	atomic_load_acq_short
590 #define	atomic_store_rel_16	atomic_store_rel_short
591 #define	atomic_cmpset_16	atomic_cmpset_short
592 #define	atomic_cmpset_acq_16	atomic_cmpset_acq_short
593 #define	atomic_cmpset_rel_16	atomic_cmpset_rel_short
594 #define	atomic_fcmpset_16	atomic_fcmpset_short
595 #define	atomic_fcmpset_acq_16	atomic_fcmpset_acq_short
596 #define	atomic_fcmpset_rel_16	atomic_fcmpset_rel_short
597 
598 /* Operations on 32-bit double words. */
599 #define	atomic_set_32		atomic_set_int
600 #define	atomic_set_acq_32	atomic_set_acq_int
601 #define	atomic_set_rel_32	atomic_set_rel_int
602 #define	atomic_clear_32		atomic_clear_int
603 #define	atomic_clear_acq_32	atomic_clear_acq_int
604 #define	atomic_clear_rel_32	atomic_clear_rel_int
605 #define	atomic_add_32		atomic_add_int
606 #define	atomic_add_acq_32	atomic_add_acq_int
607 #define	atomic_add_rel_32	atomic_add_rel_int
608 #define	atomic_subtract_32	atomic_subtract_int
609 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
610 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
611 #define	atomic_load_acq_32	atomic_load_acq_int
612 #define	atomic_store_rel_32	atomic_store_rel_int
613 #define	atomic_cmpset_32	atomic_cmpset_int
614 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
615 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
616 #define	atomic_fcmpset_32	atomic_fcmpset_int
617 #define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
618 #define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
619 #define	atomic_swap_32		atomic_swap_int
620 #define	atomic_readandclear_32	atomic_readandclear_int
621 #define	atomic_fetchadd_32	atomic_fetchadd_int
622 #define	atomic_testandset_32	atomic_testandset_int
623 #define	atomic_testandclear_32	atomic_testandclear_int
624 
625 /* Operations on 64-bit quad words. */
626 #define	atomic_set_64		atomic_set_long
627 #define	atomic_set_acq_64	atomic_set_acq_long
628 #define	atomic_set_rel_64	atomic_set_rel_long
629 #define	atomic_clear_64		atomic_clear_long
630 #define	atomic_clear_acq_64	atomic_clear_acq_long
631 #define	atomic_clear_rel_64	atomic_clear_rel_long
632 #define	atomic_add_64		atomic_add_long
633 #define	atomic_add_acq_64	atomic_add_acq_long
634 #define	atomic_add_rel_64	atomic_add_rel_long
635 #define	atomic_subtract_64	atomic_subtract_long
636 #define	atomic_subtract_acq_64	atomic_subtract_acq_long
637 #define	atomic_subtract_rel_64	atomic_subtract_rel_long
638 #define	atomic_load_acq_64	atomic_load_acq_long
639 #define	atomic_store_rel_64	atomic_store_rel_long
640 #define	atomic_cmpset_64	atomic_cmpset_long
641 #define	atomic_cmpset_acq_64	atomic_cmpset_acq_long
642 #define	atomic_cmpset_rel_64	atomic_cmpset_rel_long
643 #define	atomic_fcmpset_64	atomic_fcmpset_long
644 #define	atomic_fcmpset_acq_64	atomic_fcmpset_acq_long
645 #define	atomic_fcmpset_rel_64	atomic_fcmpset_rel_long
646 #define	atomic_swap_64		atomic_swap_long
647 #define	atomic_readandclear_64	atomic_readandclear_long
648 #define	atomic_fetchadd_64	atomic_fetchadd_long
649 #define	atomic_testandset_64	atomic_testandset_long
650 #define	atomic_testandclear_64	atomic_testandclear_long
651 
652 /* Operations on pointers. */
653 #define	atomic_set_ptr		atomic_set_long
654 #define	atomic_set_acq_ptr	atomic_set_acq_long
655 #define	atomic_set_rel_ptr	atomic_set_rel_long
656 #define	atomic_clear_ptr	atomic_clear_long
657 #define	atomic_clear_acq_ptr	atomic_clear_acq_long
658 #define	atomic_clear_rel_ptr	atomic_clear_rel_long
659 #define	atomic_add_ptr		atomic_add_long
660 #define	atomic_add_acq_ptr	atomic_add_acq_long
661 #define	atomic_add_rel_ptr	atomic_add_rel_long
662 #define	atomic_subtract_ptr	atomic_subtract_long
663 #define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
664 #define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
665 #define	atomic_load_acq_ptr	atomic_load_acq_long
666 #define	atomic_store_rel_ptr	atomic_store_rel_long
667 #define	atomic_cmpset_ptr	atomic_cmpset_long
668 #define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
669 #define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
670 #define	atomic_fcmpset_ptr	atomic_fcmpset_long
671 #define	atomic_fcmpset_acq_ptr	atomic_fcmpset_acq_long
672 #define	atomic_fcmpset_rel_ptr	atomic_fcmpset_rel_long
673 #define	atomic_swap_ptr		atomic_swap_long
674 #define	atomic_readandclear_ptr	atomic_readandclear_long
675 
676 #endif /* !WANT_FUNCTIONS */
677 
678 #endif /* !_MACHINE_ATOMIC_H_ */
679