xref: /freebsd/sys/amd64/include/atomic.h (revision b775c213c272f55c4750338fd483275c408aab7a)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define	_MACHINE_ATOMIC_H_
30 
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34 
35 #define	mb()	__asm __volatile("mfence;" : : : "memory")
36 #define	wmb()	__asm __volatile("sfence;" : : : "memory")
37 #define	rmb()	__asm __volatile("lfence;" : : : "memory")
38 
39 /*
40  * Various simple operations on memory, each of which is atomic in the
41  * presence of interrupts and multiple processors.
42  *
43  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
44  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
45  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
46  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
47  *
48  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
49  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
50  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
51  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
52  *
53  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
54  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
55  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
56  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
57  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
58  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
59  *
60  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
61  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
62  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
63  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
64  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
65  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
66  */
67 
68 /*
69  * The above functions are expanded inline in the statically-linked
70  * kernel.  Lock prefixes are generated if an SMP kernel is being
71  * built.
72  *
73  * Kernel modules call real functions which are built into the kernel.
74  * This allows kernel modules to be portable between UP and SMP systems.
75  */
76 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
77 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
78 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
79 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
80 
81 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
82 int	atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
83 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
84 u_long	atomic_fetchadd_long(volatile u_long *p, u_long v);
85 int	atomic_testandset_int(volatile u_int *p, u_int v);
86 int	atomic_testandset_long(volatile u_long *p, u_int v);
87 void	atomic_thread_fence_acq(void);
88 void	atomic_thread_fence_acq_rel(void);
89 void	atomic_thread_fence_rel(void);
90 void	atomic_thread_fence_seq_cst(void);
91 
92 #define	ATOMIC_LOAD(TYPE)					\
93 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
94 #define	ATOMIC_STORE(TYPE)					\
95 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
96 
97 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
98 
99 /*
100  * For userland, always use lock prefixes so that the binaries will run
101  * on both SMP and !SMP systems.
102  */
103 #if defined(SMP) || !defined(_KERNEL)
104 #define	MPLOCKED	"lock ; "
105 #else
106 #define	MPLOCKED
107 #endif
108 
109 /*
110  * The assembly is volatilized to avoid code chunk removal by the compiler.
111  * GCC aggressively reorders operations and memory clobbering is necessary
112  * in order to avoid that for memory barriers.
113  */
114 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
115 static __inline void					\
116 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
117 {							\
118 	__asm __volatile(MPLOCKED OP			\
119 	: "+m" (*p)					\
120 	: CONS (V)					\
121 	: "cc");					\
122 }							\
123 							\
124 static __inline void					\
125 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
126 {							\
127 	__asm __volatile(MPLOCKED OP			\
128 	: "+m" (*p)					\
129 	: CONS (V)					\
130 	: "memory", "cc");				\
131 }							\
132 struct __hack
133 
134 /*
135  * Atomic compare and set, used by the mutex functions
136  *
137  * if (*dst == expect) *dst = src (all 32 bit words)
138  *
139  * Returns 0 on failure, non-zero on success
140  */
141 
142 static __inline int
143 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
144 {
145 	u_char res;
146 
147 	__asm __volatile(
148 	"	" MPLOCKED "		"
149 	"	cmpxchgl %3,%1 ;	"
150 	"       sete	%0 ;		"
151 	"# atomic_cmpset_int"
152 	: "=q" (res),			/* 0 */
153 	  "+m" (*dst),			/* 1 */
154 	  "+a" (expect)			/* 2 */
155 	: "r" (src)			/* 3 */
156 	: "memory", "cc");
157 	return (res);
158 }
159 
160 static __inline int
161 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
162 {
163 	u_char res;
164 
165 	__asm __volatile(
166 	"	" MPLOCKED "		"
167 	"	cmpxchgq %3,%1 ;	"
168 	"       sete	%0 ;		"
169 	"# atomic_cmpset_long"
170 	: "=q" (res),			/* 0 */
171 	  "+m" (*dst),			/* 1 */
172 	  "+a" (expect)			/* 2 */
173 	: "r" (src)			/* 3 */
174 	: "memory", "cc");
175 	return (res);
176 }
177 
178 /*
179  * Atomically add the value of v to the integer pointed to by p and return
180  * the previous value of *p.
181  */
182 static __inline u_int
183 atomic_fetchadd_int(volatile u_int *p, u_int v)
184 {
185 
186 	__asm __volatile(
187 	"	" MPLOCKED "		"
188 	"	xaddl	%0,%1 ;		"
189 	"# atomic_fetchadd_int"
190 	: "+r" (v),			/* 0 */
191 	  "+m" (*p)			/* 1 */
192 	: : "cc");
193 	return (v);
194 }
195 
196 /*
197  * Atomically add the value of v to the long integer pointed to by p and return
198  * the previous value of *p.
199  */
200 static __inline u_long
201 atomic_fetchadd_long(volatile u_long *p, u_long v)
202 {
203 
204 	__asm __volatile(
205 	"	" MPLOCKED "		"
206 	"	xaddq	%0,%1 ;		"
207 	"# atomic_fetchadd_long"
208 	: "+r" (v),			/* 0 */
209 	  "+m" (*p)			/* 1 */
210 	: : "cc");
211 	return (v);
212 }
213 
214 static __inline int
215 atomic_testandset_int(volatile u_int *p, u_int v)
216 {
217 	u_char res;
218 
219 	__asm __volatile(
220 	"	" MPLOCKED "		"
221 	"	btsl	%2,%1 ;		"
222 	"	setc	%0 ;		"
223 	"# atomic_testandset_int"
224 	: "=q" (res),			/* 0 */
225 	  "+m" (*p)			/* 1 */
226 	: "Ir" (v & 0x1f)		/* 2 */
227 	: "cc");
228 	return (res);
229 }
230 
231 static __inline int
232 atomic_testandset_long(volatile u_long *p, u_int v)
233 {
234 	u_char res;
235 
236 	__asm __volatile(
237 	"	" MPLOCKED "		"
238 	"	btsq	%2,%1 ;		"
239 	"	setc	%0 ;		"
240 	"# atomic_testandset_long"
241 	: "=q" (res),			/* 0 */
242 	  "+m" (*p)			/* 1 */
243 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
244 	: "cc");
245 	return (res);
246 }
247 
248 /*
249  * We assume that a = b will do atomic loads and stores.  Due to the
250  * IA32 memory model, a simple store guarantees release semantics.
251  *
252  * However, a load may pass a store if they are performed on distinct
253  * addresses, so for atomic_load_acq we introduce a Store/Load barrier
254  * before the load in SMP kernels.  We use "lock addl $0,mem", as
255  * recommended by the AMD Software Optimization Guide, and not mfence.
256  * In the kernel, we use a private per-cpu cache line as the target
257  * for the locked addition, to avoid introducing false data
258  * dependencies.  In userspace, a word in the red zone on the stack
259  * (-8(%rsp)) is utilized.
260  *
261  * For UP kernels, however, the memory of the single processor is
262  * always consistent, so we only need to stop the compiler from
263  * reordering accesses in a way that violates the semantics of acquire
264  * and release.
265  */
266 
267 #if defined(_KERNEL)
268 
269 /*
270  * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
271  *
272  * The open-coded number is used instead of the symbolic expression to
273  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
274  * An assertion in amd64/vm_machdep.c ensures that the value is correct.
275  */
276 #define	OFFSETOF_MONITORBUF	0x180
277 
278 #if defined(SMP)
279 static __inline void
280 __storeload_barrier(void)
281 {
282 
283 	__asm __volatile("lock; addl $0,%%gs:%0"
284 	    : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
285 }
286 #else /* _KERNEL && UP */
287 static __inline void
288 __storeload_barrier(void)
289 {
290 
291 	__compiler_membar();
292 }
293 #endif /* SMP */
294 #else /* !_KERNEL */
295 static __inline void
296 __storeload_barrier(void)
297 {
298 
299 	__asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
300 }
301 #endif /* _KERNEL*/
302 
303 /*
304  * C11-standard acq/rel semantics only apply when the variable in the
305  * call is the same for acq as it is for rel.  However, our previous
306  * (x86) implementations provided much stronger ordering than required
307  * (essentially what is called seq_cst order in C11).  This
308  * implementation provides the historical strong ordering since some
309  * callers depend on it.
310  */
311 
312 #define	ATOMIC_LOAD(TYPE)					\
313 static __inline u_##TYPE					\
314 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
315 {								\
316 	u_##TYPE res;						\
317 								\
318 	__storeload_barrier();					\
319 	res = *p;						\
320 	__compiler_membar();					\
321 	return (res);						\
322 }								\
323 struct __hack
324 
325 #define	ATOMIC_STORE(TYPE)					\
326 static __inline void						\
327 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
328 {								\
329 								\
330 	__compiler_membar();					\
331 	*p = v;							\
332 }								\
333 struct __hack
334 
335 static __inline void
336 atomic_thread_fence_acq(void)
337 {
338 
339 	__compiler_membar();
340 }
341 
342 static __inline void
343 atomic_thread_fence_rel(void)
344 {
345 
346 	__compiler_membar();
347 }
348 
349 static __inline void
350 atomic_thread_fence_acq_rel(void)
351 {
352 
353 	__compiler_membar();
354 }
355 
356 static __inline void
357 atomic_thread_fence_seq_cst(void)
358 {
359 
360 	__storeload_barrier();
361 }
362 
363 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
364 
365 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
366 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
367 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
368 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
369 
370 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
371 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
372 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
373 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
374 
375 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
376 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
377 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
378 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
379 
380 ATOMIC_ASM(set,	     long,  "orq %1,%0",   "ir",  v);
381 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "ir", ~v);
382 ATOMIC_ASM(add,	     long,  "addq %1,%0",  "ir",  v);
383 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "ir",  v);
384 
385 #define	ATOMIC_LOADSTORE(TYPE)					\
386 	ATOMIC_LOAD(TYPE);					\
387 	ATOMIC_STORE(TYPE)
388 
389 ATOMIC_LOADSTORE(char);
390 ATOMIC_LOADSTORE(short);
391 ATOMIC_LOADSTORE(int);
392 ATOMIC_LOADSTORE(long);
393 
394 #undef ATOMIC_ASM
395 #undef ATOMIC_LOAD
396 #undef ATOMIC_STORE
397 #undef ATOMIC_LOADSTORE
398 #ifndef WANT_FUNCTIONS
399 
400 /* Read the current value and store a new value in the destination. */
401 #ifdef __GNUCLIKE_ASM
402 
403 static __inline u_int
404 atomic_swap_int(volatile u_int *p, u_int v)
405 {
406 
407 	__asm __volatile(
408 	"	xchgl	%1,%0 ;		"
409 	"# atomic_swap_int"
410 	: "+r" (v),			/* 0 */
411 	  "+m" (*p));			/* 1 */
412 	return (v);
413 }
414 
415 static __inline u_long
416 atomic_swap_long(volatile u_long *p, u_long v)
417 {
418 
419 	__asm __volatile(
420 	"	xchgq	%1,%0 ;		"
421 	"# atomic_swap_long"
422 	: "+r" (v),			/* 0 */
423 	  "+m" (*p));			/* 1 */
424 	return (v);
425 }
426 
427 #else /* !__GNUCLIKE_ASM */
428 
429 u_int	atomic_swap_int(volatile u_int *p, u_int v);
430 u_long	atomic_swap_long(volatile u_long *p, u_long v);
431 
432 #endif /* __GNUCLIKE_ASM */
433 
434 #define	atomic_set_acq_char		atomic_set_barr_char
435 #define	atomic_set_rel_char		atomic_set_barr_char
436 #define	atomic_clear_acq_char		atomic_clear_barr_char
437 #define	atomic_clear_rel_char		atomic_clear_barr_char
438 #define	atomic_add_acq_char		atomic_add_barr_char
439 #define	atomic_add_rel_char		atomic_add_barr_char
440 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
441 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
442 
443 #define	atomic_set_acq_short		atomic_set_barr_short
444 #define	atomic_set_rel_short		atomic_set_barr_short
445 #define	atomic_clear_acq_short		atomic_clear_barr_short
446 #define	atomic_clear_rel_short		atomic_clear_barr_short
447 #define	atomic_add_acq_short		atomic_add_barr_short
448 #define	atomic_add_rel_short		atomic_add_barr_short
449 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
450 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
451 
452 #define	atomic_set_acq_int		atomic_set_barr_int
453 #define	atomic_set_rel_int		atomic_set_barr_int
454 #define	atomic_clear_acq_int		atomic_clear_barr_int
455 #define	atomic_clear_rel_int		atomic_clear_barr_int
456 #define	atomic_add_acq_int		atomic_add_barr_int
457 #define	atomic_add_rel_int		atomic_add_barr_int
458 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
459 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
460 #define	atomic_cmpset_acq_int		atomic_cmpset_int
461 #define	atomic_cmpset_rel_int		atomic_cmpset_int
462 
463 #define	atomic_set_acq_long		atomic_set_barr_long
464 #define	atomic_set_rel_long		atomic_set_barr_long
465 #define	atomic_clear_acq_long		atomic_clear_barr_long
466 #define	atomic_clear_rel_long		atomic_clear_barr_long
467 #define	atomic_add_acq_long		atomic_add_barr_long
468 #define	atomic_add_rel_long		atomic_add_barr_long
469 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
470 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
471 #define	atomic_cmpset_acq_long		atomic_cmpset_long
472 #define	atomic_cmpset_rel_long		atomic_cmpset_long
473 
474 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
475 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
476 
477 /* Operations on 8-bit bytes. */
478 #define	atomic_set_8		atomic_set_char
479 #define	atomic_set_acq_8	atomic_set_acq_char
480 #define	atomic_set_rel_8	atomic_set_rel_char
481 #define	atomic_clear_8		atomic_clear_char
482 #define	atomic_clear_acq_8	atomic_clear_acq_char
483 #define	atomic_clear_rel_8	atomic_clear_rel_char
484 #define	atomic_add_8		atomic_add_char
485 #define	atomic_add_acq_8	atomic_add_acq_char
486 #define	atomic_add_rel_8	atomic_add_rel_char
487 #define	atomic_subtract_8	atomic_subtract_char
488 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
489 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
490 #define	atomic_load_acq_8	atomic_load_acq_char
491 #define	atomic_store_rel_8	atomic_store_rel_char
492 
493 /* Operations on 16-bit words. */
494 #define	atomic_set_16		atomic_set_short
495 #define	atomic_set_acq_16	atomic_set_acq_short
496 #define	atomic_set_rel_16	atomic_set_rel_short
497 #define	atomic_clear_16		atomic_clear_short
498 #define	atomic_clear_acq_16	atomic_clear_acq_short
499 #define	atomic_clear_rel_16	atomic_clear_rel_short
500 #define	atomic_add_16		atomic_add_short
501 #define	atomic_add_acq_16	atomic_add_acq_short
502 #define	atomic_add_rel_16	atomic_add_rel_short
503 #define	atomic_subtract_16	atomic_subtract_short
504 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
505 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
506 #define	atomic_load_acq_16	atomic_load_acq_short
507 #define	atomic_store_rel_16	atomic_store_rel_short
508 
509 /* Operations on 32-bit double words. */
510 #define	atomic_set_32		atomic_set_int
511 #define	atomic_set_acq_32	atomic_set_acq_int
512 #define	atomic_set_rel_32	atomic_set_rel_int
513 #define	atomic_clear_32		atomic_clear_int
514 #define	atomic_clear_acq_32	atomic_clear_acq_int
515 #define	atomic_clear_rel_32	atomic_clear_rel_int
516 #define	atomic_add_32		atomic_add_int
517 #define	atomic_add_acq_32	atomic_add_acq_int
518 #define	atomic_add_rel_32	atomic_add_rel_int
519 #define	atomic_subtract_32	atomic_subtract_int
520 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
521 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
522 #define	atomic_load_acq_32	atomic_load_acq_int
523 #define	atomic_store_rel_32	atomic_store_rel_int
524 #define	atomic_cmpset_32	atomic_cmpset_int
525 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
526 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
527 #define	atomic_swap_32		atomic_swap_int
528 #define	atomic_readandclear_32	atomic_readandclear_int
529 #define	atomic_fetchadd_32	atomic_fetchadd_int
530 #define	atomic_testandset_32	atomic_testandset_int
531 
532 /* Operations on 64-bit quad words. */
533 #define	atomic_set_64		atomic_set_long
534 #define	atomic_set_acq_64	atomic_set_acq_long
535 #define	atomic_set_rel_64	atomic_set_rel_long
536 #define	atomic_clear_64		atomic_clear_long
537 #define	atomic_clear_acq_64	atomic_clear_acq_long
538 #define	atomic_clear_rel_64	atomic_clear_rel_long
539 #define	atomic_add_64		atomic_add_long
540 #define	atomic_add_acq_64	atomic_add_acq_long
541 #define	atomic_add_rel_64	atomic_add_rel_long
542 #define	atomic_subtract_64	atomic_subtract_long
543 #define	atomic_subtract_acq_64	atomic_subtract_acq_long
544 #define	atomic_subtract_rel_64	atomic_subtract_rel_long
545 #define	atomic_load_acq_64	atomic_load_acq_long
546 #define	atomic_store_rel_64	atomic_store_rel_long
547 #define	atomic_cmpset_64	atomic_cmpset_long
548 #define	atomic_cmpset_acq_64	atomic_cmpset_acq_long
549 #define	atomic_cmpset_rel_64	atomic_cmpset_rel_long
550 #define	atomic_swap_64		atomic_swap_long
551 #define	atomic_readandclear_64	atomic_readandclear_long
552 #define	atomic_testandset_64	atomic_testandset_long
553 
554 /* Operations on pointers. */
555 #define	atomic_set_ptr		atomic_set_long
556 #define	atomic_set_acq_ptr	atomic_set_acq_long
557 #define	atomic_set_rel_ptr	atomic_set_rel_long
558 #define	atomic_clear_ptr	atomic_clear_long
559 #define	atomic_clear_acq_ptr	atomic_clear_acq_long
560 #define	atomic_clear_rel_ptr	atomic_clear_rel_long
561 #define	atomic_add_ptr		atomic_add_long
562 #define	atomic_add_acq_ptr	atomic_add_acq_long
563 #define	atomic_add_rel_ptr	atomic_add_rel_long
564 #define	atomic_subtract_ptr	atomic_subtract_long
565 #define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
566 #define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
567 #define	atomic_load_acq_ptr	atomic_load_acq_long
568 #define	atomic_store_rel_ptr	atomic_store_rel_long
569 #define	atomic_cmpset_ptr	atomic_cmpset_long
570 #define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
571 #define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
572 #define	atomic_swap_ptr		atomic_swap_long
573 #define	atomic_readandclear_ptr	atomic_readandclear_long
574 
575 #endif /* !WANT_FUNCTIONS */
576 
577 #endif /* !_MACHINE_ATOMIC_H_ */
578