xref: /freebsd/sys/amd64/include/atomic.h (revision 8ef24a0d4b28fe230e20637f56869cc4148cd2ca)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define	_MACHINE_ATOMIC_H_
30 
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34 
35 /*
36  * To express interprocessor (as opposed to processor and device) memory
37  * ordering constraints, use the atomic_*() functions with acquire and release
38  * semantics rather than the *mb() functions.  An architecture's memory
39  * ordering (or memory consistency) model governs the order in which a
40  * program's accesses to different locations may be performed by an
41  * implementation of that architecture.  In general, for memory regions
42  * defined as writeback cacheable, the memory ordering implemented by amd64
43  * processors preserves the program ordering of a load followed by a load, a
44  * load followed by a store, and a store followed by a store.  Only a store
45  * followed by a load to a different memory location may be reordered.
46  * Therefore, except for special cases, like non-temporal memory accesses or
47  * memory regions defined as write combining, the memory ordering effects
48  * provided by the sfence instruction in the wmb() function and the lfence
49  * instruction in the rmb() function are redundant.  In contrast, the
50  * atomic_*() functions with acquire and release semantics do not perform
51  * redundant instructions for ordinary cases of interprocessor memory
52  * ordering on any architecture.
53  */
54 #define	mb()	__asm __volatile("mfence;" : : : "memory")
55 #define	wmb()	__asm __volatile("sfence;" : : : "memory")
56 #define	rmb()	__asm __volatile("lfence;" : : : "memory")
57 
58 /*
59  * Various simple operations on memory, each of which is atomic in the
60  * presence of interrupts and multiple processors.
61  *
62  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
63  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
64  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
65  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
66  *
67  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
68  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
69  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
70  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
71  *
72  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
73  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
74  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
75  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
76  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
77  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
78  *
79  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
80  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
81  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
82  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
83  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
84  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
85  */
86 
87 /*
88  * The above functions are expanded inline in the statically-linked
89  * kernel.  Lock prefixes are generated if an SMP kernel is being
90  * built.
91  *
92  * Kernel modules call real functions which are built into the kernel.
93  * This allows kernel modules to be portable between UP and SMP systems.
94  */
95 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
96 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
97 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
98 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
99 
100 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
101 int	atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
102 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
103 u_long	atomic_fetchadd_long(volatile u_long *p, u_long v);
104 int	atomic_testandset_int(volatile u_int *p, u_int v);
105 int	atomic_testandset_long(volatile u_long *p, u_int v);
106 int	atomic_testandclear_int(volatile u_int *p, u_int v);
107 int	atomic_testandclear_long(volatile u_long *p, u_int v);
108 void	atomic_thread_fence_acq(void);
109 void	atomic_thread_fence_acq_rel(void);
110 void	atomic_thread_fence_rel(void);
111 void	atomic_thread_fence_seq_cst(void);
112 
113 #define	ATOMIC_LOAD(TYPE)					\
114 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
115 #define	ATOMIC_STORE(TYPE)					\
116 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
117 
118 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
119 
120 /*
121  * For userland, always use lock prefixes so that the binaries will run
122  * on both SMP and !SMP systems.
123  */
124 #if defined(SMP) || !defined(_KERNEL)
125 #define	MPLOCKED	"lock ; "
126 #else
127 #define	MPLOCKED
128 #endif
129 
130 /*
131  * The assembly is volatilized to avoid code chunk removal by the compiler.
132  * GCC aggressively reorders operations and memory clobbering is necessary
133  * in order to avoid that for memory barriers.
134  */
135 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
136 static __inline void					\
137 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
138 {							\
139 	__asm __volatile(MPLOCKED OP			\
140 	: "+m" (*p)					\
141 	: CONS (V)					\
142 	: "cc");					\
143 }							\
144 							\
145 static __inline void					\
146 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
147 {							\
148 	__asm __volatile(MPLOCKED OP			\
149 	: "+m" (*p)					\
150 	: CONS (V)					\
151 	: "memory", "cc");				\
152 }							\
153 struct __hack
154 
155 /*
156  * Atomic compare and set, used by the mutex functions
157  *
158  * if (*dst == expect) *dst = src (all 32 bit words)
159  *
160  * Returns 0 on failure, non-zero on success
161  */
162 
163 static __inline int
164 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
165 {
166 	u_char res;
167 
168 	__asm __volatile(
169 	"	" MPLOCKED "		"
170 	"	cmpxchgl %3,%1 ;	"
171 	"       sete	%0 ;		"
172 	"# atomic_cmpset_int"
173 	: "=q" (res),			/* 0 */
174 	  "+m" (*dst),			/* 1 */
175 	  "+a" (expect)			/* 2 */
176 	: "r" (src)			/* 3 */
177 	: "memory", "cc");
178 	return (res);
179 }
180 
181 static __inline int
182 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
183 {
184 	u_char res;
185 
186 	__asm __volatile(
187 	"	" MPLOCKED "		"
188 	"	cmpxchgq %3,%1 ;	"
189 	"       sete	%0 ;		"
190 	"# atomic_cmpset_long"
191 	: "=q" (res),			/* 0 */
192 	  "+m" (*dst),			/* 1 */
193 	  "+a" (expect)			/* 2 */
194 	: "r" (src)			/* 3 */
195 	: "memory", "cc");
196 	return (res);
197 }
198 
199 /*
200  * Atomically add the value of v to the integer pointed to by p and return
201  * the previous value of *p.
202  */
203 static __inline u_int
204 atomic_fetchadd_int(volatile u_int *p, u_int v)
205 {
206 
207 	__asm __volatile(
208 	"	" MPLOCKED "		"
209 	"	xaddl	%0,%1 ;		"
210 	"# atomic_fetchadd_int"
211 	: "+r" (v),			/* 0 */
212 	  "+m" (*p)			/* 1 */
213 	: : "cc");
214 	return (v);
215 }
216 
217 /*
218  * Atomically add the value of v to the long integer pointed to by p and return
219  * the previous value of *p.
220  */
221 static __inline u_long
222 atomic_fetchadd_long(volatile u_long *p, u_long v)
223 {
224 
225 	__asm __volatile(
226 	"	" MPLOCKED "		"
227 	"	xaddq	%0,%1 ;		"
228 	"# atomic_fetchadd_long"
229 	: "+r" (v),			/* 0 */
230 	  "+m" (*p)			/* 1 */
231 	: : "cc");
232 	return (v);
233 }
234 
235 static __inline int
236 atomic_testandset_int(volatile u_int *p, u_int v)
237 {
238 	u_char res;
239 
240 	__asm __volatile(
241 	"	" MPLOCKED "		"
242 	"	btsl	%2,%1 ;		"
243 	"	setc	%0 ;		"
244 	"# atomic_testandset_int"
245 	: "=q" (res),			/* 0 */
246 	  "+m" (*p)			/* 1 */
247 	: "Ir" (v & 0x1f)		/* 2 */
248 	: "cc");
249 	return (res);
250 }
251 
252 static __inline int
253 atomic_testandset_long(volatile u_long *p, u_int v)
254 {
255 	u_char res;
256 
257 	__asm __volatile(
258 	"	" MPLOCKED "		"
259 	"	btsq	%2,%1 ;		"
260 	"	setc	%0 ;		"
261 	"# atomic_testandset_long"
262 	: "=q" (res),			/* 0 */
263 	  "+m" (*p)			/* 1 */
264 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
265 	: "cc");
266 	return (res);
267 }
268 
269 static __inline int
270 atomic_testandclear_int(volatile u_int *p, u_int v)
271 {
272 	u_char res;
273 
274 	__asm __volatile(
275 	"	" MPLOCKED "		"
276 	"	btrl	%2,%1 ;		"
277 	"	setc	%0 ;		"
278 	"# atomic_testandclear_int"
279 	: "=q" (res),			/* 0 */
280 	  "+m" (*p)			/* 1 */
281 	: "Ir" (v & 0x1f)		/* 2 */
282 	: "cc");
283 	return (res);
284 }
285 
286 static __inline int
287 atomic_testandclear_long(volatile u_long *p, u_int v)
288 {
289 	u_char res;
290 
291 	__asm __volatile(
292 	"	" MPLOCKED "		"
293 	"	btrq	%2,%1 ;		"
294 	"	setc	%0 ;		"
295 	"# atomic_testandclear_long"
296 	: "=q" (res),			/* 0 */
297 	  "+m" (*p)			/* 1 */
298 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
299 	: "cc");
300 	return (res);
301 }
302 
303 /*
304  * We assume that a = b will do atomic loads and stores.  Due to the
305  * IA32 memory model, a simple store guarantees release semantics.
306  *
307  * However, a load may pass a store if they are performed on distinct
308  * addresses, so we need a Store/Load barrier for sequentially
309  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
310  * Store/Load barrier, as recommended by the AMD Software Optimization
311  * Guide, and not mfence.  To avoid false data dependencies, we use a
312  * special address for "mem".  In the kernel, we use a private per-cpu
313  * cache line.  In user space, we use a word in the stack's red zone
314  * (-8(%rsp)).
315  *
316  * For UP kernels, however, the memory of the single processor is
317  * always consistent, so we only need to stop the compiler from
318  * reordering accesses in a way that violates the semantics of acquire
319  * and release.
320  */
321 
322 #if defined(_KERNEL)
323 
324 /*
325  * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
326  *
327  * The open-coded number is used instead of the symbolic expression to
328  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
329  * An assertion in amd64/vm_machdep.c ensures that the value is correct.
330  */
331 #define	OFFSETOF_MONITORBUF	0x180
332 
333 #if defined(SMP)
334 static __inline void
335 __storeload_barrier(void)
336 {
337 
338 	__asm __volatile("lock; addl $0,%%gs:%0"
339 	    : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
340 }
341 #else /* _KERNEL && UP */
342 static __inline void
343 __storeload_barrier(void)
344 {
345 
346 	__compiler_membar();
347 }
348 #endif /* SMP */
349 #else /* !_KERNEL */
350 static __inline void
351 __storeload_barrier(void)
352 {
353 
354 	__asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
355 }
356 #endif /* _KERNEL*/
357 
358 #define	ATOMIC_LOAD(TYPE)					\
359 static __inline u_##TYPE					\
360 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
361 {								\
362 	u_##TYPE res;						\
363 								\
364 	res = *p;						\
365 	__compiler_membar();					\
366 	return (res);						\
367 }								\
368 struct __hack
369 
370 #define	ATOMIC_STORE(TYPE)					\
371 static __inline void						\
372 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
373 {								\
374 								\
375 	__compiler_membar();					\
376 	*p = v;							\
377 }								\
378 struct __hack
379 
380 static __inline void
381 atomic_thread_fence_acq(void)
382 {
383 
384 	__compiler_membar();
385 }
386 
387 static __inline void
388 atomic_thread_fence_rel(void)
389 {
390 
391 	__compiler_membar();
392 }
393 
394 static __inline void
395 atomic_thread_fence_acq_rel(void)
396 {
397 
398 	__compiler_membar();
399 }
400 
401 static __inline void
402 atomic_thread_fence_seq_cst(void)
403 {
404 
405 	__storeload_barrier();
406 }
407 
408 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
409 
410 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
411 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
412 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
413 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
414 
415 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
416 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
417 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
418 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
419 
420 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
421 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
422 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
423 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
424 
425 ATOMIC_ASM(set,	     long,  "orq %1,%0",   "ir",  v);
426 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "ir", ~v);
427 ATOMIC_ASM(add,	     long,  "addq %1,%0",  "ir",  v);
428 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "ir",  v);
429 
430 #define	ATOMIC_LOADSTORE(TYPE)					\
431 	ATOMIC_LOAD(TYPE);					\
432 	ATOMIC_STORE(TYPE)
433 
434 ATOMIC_LOADSTORE(char);
435 ATOMIC_LOADSTORE(short);
436 ATOMIC_LOADSTORE(int);
437 ATOMIC_LOADSTORE(long);
438 
439 #undef ATOMIC_ASM
440 #undef ATOMIC_LOAD
441 #undef ATOMIC_STORE
442 #undef ATOMIC_LOADSTORE
443 #ifndef WANT_FUNCTIONS
444 
445 /* Read the current value and store a new value in the destination. */
446 #ifdef __GNUCLIKE_ASM
447 
448 static __inline u_int
449 atomic_swap_int(volatile u_int *p, u_int v)
450 {
451 
452 	__asm __volatile(
453 	"	xchgl	%1,%0 ;		"
454 	"# atomic_swap_int"
455 	: "+r" (v),			/* 0 */
456 	  "+m" (*p));			/* 1 */
457 	return (v);
458 }
459 
460 static __inline u_long
461 atomic_swap_long(volatile u_long *p, u_long v)
462 {
463 
464 	__asm __volatile(
465 	"	xchgq	%1,%0 ;		"
466 	"# atomic_swap_long"
467 	: "+r" (v),			/* 0 */
468 	  "+m" (*p));			/* 1 */
469 	return (v);
470 }
471 
472 #else /* !__GNUCLIKE_ASM */
473 
474 u_int	atomic_swap_int(volatile u_int *p, u_int v);
475 u_long	atomic_swap_long(volatile u_long *p, u_long v);
476 
477 #endif /* __GNUCLIKE_ASM */
478 
479 #define	atomic_set_acq_char		atomic_set_barr_char
480 #define	atomic_set_rel_char		atomic_set_barr_char
481 #define	atomic_clear_acq_char		atomic_clear_barr_char
482 #define	atomic_clear_rel_char		atomic_clear_barr_char
483 #define	atomic_add_acq_char		atomic_add_barr_char
484 #define	atomic_add_rel_char		atomic_add_barr_char
485 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
486 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
487 
488 #define	atomic_set_acq_short		atomic_set_barr_short
489 #define	atomic_set_rel_short		atomic_set_barr_short
490 #define	atomic_clear_acq_short		atomic_clear_barr_short
491 #define	atomic_clear_rel_short		atomic_clear_barr_short
492 #define	atomic_add_acq_short		atomic_add_barr_short
493 #define	atomic_add_rel_short		atomic_add_barr_short
494 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
495 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
496 
497 #define	atomic_set_acq_int		atomic_set_barr_int
498 #define	atomic_set_rel_int		atomic_set_barr_int
499 #define	atomic_clear_acq_int		atomic_clear_barr_int
500 #define	atomic_clear_rel_int		atomic_clear_barr_int
501 #define	atomic_add_acq_int		atomic_add_barr_int
502 #define	atomic_add_rel_int		atomic_add_barr_int
503 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
504 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
505 #define	atomic_cmpset_acq_int		atomic_cmpset_int
506 #define	atomic_cmpset_rel_int		atomic_cmpset_int
507 
508 #define	atomic_set_acq_long		atomic_set_barr_long
509 #define	atomic_set_rel_long		atomic_set_barr_long
510 #define	atomic_clear_acq_long		atomic_clear_barr_long
511 #define	atomic_clear_rel_long		atomic_clear_barr_long
512 #define	atomic_add_acq_long		atomic_add_barr_long
513 #define	atomic_add_rel_long		atomic_add_barr_long
514 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
515 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
516 #define	atomic_cmpset_acq_long		atomic_cmpset_long
517 #define	atomic_cmpset_rel_long		atomic_cmpset_long
518 
519 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
520 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
521 
522 /* Operations on 8-bit bytes. */
523 #define	atomic_set_8		atomic_set_char
524 #define	atomic_set_acq_8	atomic_set_acq_char
525 #define	atomic_set_rel_8	atomic_set_rel_char
526 #define	atomic_clear_8		atomic_clear_char
527 #define	atomic_clear_acq_8	atomic_clear_acq_char
528 #define	atomic_clear_rel_8	atomic_clear_rel_char
529 #define	atomic_add_8		atomic_add_char
530 #define	atomic_add_acq_8	atomic_add_acq_char
531 #define	atomic_add_rel_8	atomic_add_rel_char
532 #define	atomic_subtract_8	atomic_subtract_char
533 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
534 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
535 #define	atomic_load_acq_8	atomic_load_acq_char
536 #define	atomic_store_rel_8	atomic_store_rel_char
537 
538 /* Operations on 16-bit words. */
539 #define	atomic_set_16		atomic_set_short
540 #define	atomic_set_acq_16	atomic_set_acq_short
541 #define	atomic_set_rel_16	atomic_set_rel_short
542 #define	atomic_clear_16		atomic_clear_short
543 #define	atomic_clear_acq_16	atomic_clear_acq_short
544 #define	atomic_clear_rel_16	atomic_clear_rel_short
545 #define	atomic_add_16		atomic_add_short
546 #define	atomic_add_acq_16	atomic_add_acq_short
547 #define	atomic_add_rel_16	atomic_add_rel_short
548 #define	atomic_subtract_16	atomic_subtract_short
549 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
550 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
551 #define	atomic_load_acq_16	atomic_load_acq_short
552 #define	atomic_store_rel_16	atomic_store_rel_short
553 
554 /* Operations on 32-bit double words. */
555 #define	atomic_set_32		atomic_set_int
556 #define	atomic_set_acq_32	atomic_set_acq_int
557 #define	atomic_set_rel_32	atomic_set_rel_int
558 #define	atomic_clear_32		atomic_clear_int
559 #define	atomic_clear_acq_32	atomic_clear_acq_int
560 #define	atomic_clear_rel_32	atomic_clear_rel_int
561 #define	atomic_add_32		atomic_add_int
562 #define	atomic_add_acq_32	atomic_add_acq_int
563 #define	atomic_add_rel_32	atomic_add_rel_int
564 #define	atomic_subtract_32	atomic_subtract_int
565 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
566 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
567 #define	atomic_load_acq_32	atomic_load_acq_int
568 #define	atomic_store_rel_32	atomic_store_rel_int
569 #define	atomic_cmpset_32	atomic_cmpset_int
570 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
571 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
572 #define	atomic_swap_32		atomic_swap_int
573 #define	atomic_readandclear_32	atomic_readandclear_int
574 #define	atomic_fetchadd_32	atomic_fetchadd_int
575 #define	atomic_testandset_32	atomic_testandset_int
576 #define	atomic_testandclear_32	atomic_testandclear_int
577 
578 /* Operations on 64-bit quad words. */
579 #define	atomic_set_64		atomic_set_long
580 #define	atomic_set_acq_64	atomic_set_acq_long
581 #define	atomic_set_rel_64	atomic_set_rel_long
582 #define	atomic_clear_64		atomic_clear_long
583 #define	atomic_clear_acq_64	atomic_clear_acq_long
584 #define	atomic_clear_rel_64	atomic_clear_rel_long
585 #define	atomic_add_64		atomic_add_long
586 #define	atomic_add_acq_64	atomic_add_acq_long
587 #define	atomic_add_rel_64	atomic_add_rel_long
588 #define	atomic_subtract_64	atomic_subtract_long
589 #define	atomic_subtract_acq_64	atomic_subtract_acq_long
590 #define	atomic_subtract_rel_64	atomic_subtract_rel_long
591 #define	atomic_load_acq_64	atomic_load_acq_long
592 #define	atomic_store_rel_64	atomic_store_rel_long
593 #define	atomic_cmpset_64	atomic_cmpset_long
594 #define	atomic_cmpset_acq_64	atomic_cmpset_acq_long
595 #define	atomic_cmpset_rel_64	atomic_cmpset_rel_long
596 #define	atomic_swap_64		atomic_swap_long
597 #define	atomic_readandclear_64	atomic_readandclear_long
598 #define	atomic_fetchadd_64	atomic_fetchadd_long
599 #define	atomic_testandset_64	atomic_testandset_long
600 #define	atomic_testandclear_64	atomic_testandclear_long
601 
602 /* Operations on pointers. */
603 #define	atomic_set_ptr		atomic_set_long
604 #define	atomic_set_acq_ptr	atomic_set_acq_long
605 #define	atomic_set_rel_ptr	atomic_set_rel_long
606 #define	atomic_clear_ptr	atomic_clear_long
607 #define	atomic_clear_acq_ptr	atomic_clear_acq_long
608 #define	atomic_clear_rel_ptr	atomic_clear_rel_long
609 #define	atomic_add_ptr		atomic_add_long
610 #define	atomic_add_acq_ptr	atomic_add_acq_long
611 #define	atomic_add_rel_ptr	atomic_add_rel_long
612 #define	atomic_subtract_ptr	atomic_subtract_long
613 #define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
614 #define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
615 #define	atomic_load_acq_ptr	atomic_load_acq_long
616 #define	atomic_store_rel_ptr	atomic_store_rel_long
617 #define	atomic_cmpset_ptr	atomic_cmpset_long
618 #define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
619 #define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
620 #define	atomic_swap_ptr		atomic_swap_long
621 #define	atomic_readandclear_ptr	atomic_readandclear_long
622 
623 #endif /* !WANT_FUNCTIONS */
624 
625 #endif /* !_MACHINE_ATOMIC_H_ */
626