xref: /freebsd/sys/amd64/include/atomic.h (revision 410556f1f10fd35b350102725fd8504c3cb0afc8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1998 Doug Rabson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 #ifndef _MACHINE_ATOMIC_H_
31 #define	_MACHINE_ATOMIC_H_
32 
33 #ifndef _SYS_CDEFS_H_
34 #error this file needs sys/cdefs.h as a prerequisite
35 #endif
36 
37 /*
38  * To express interprocessor (as opposed to processor and device) memory
39  * ordering constraints, use the atomic_*() functions with acquire and release
40  * semantics rather than the *mb() functions.  An architecture's memory
41  * ordering (or memory consistency) model governs the order in which a
42  * program's accesses to different locations may be performed by an
43  * implementation of that architecture.  In general, for memory regions
44  * defined as writeback cacheable, the memory ordering implemented by amd64
45  * processors preserves the program ordering of a load followed by a load, a
46  * load followed by a store, and a store followed by a store.  Only a store
47  * followed by a load to a different memory location may be reordered.
48  * Therefore, except for special cases, like non-temporal memory accesses or
49  * memory regions defined as write combining, the memory ordering effects
50  * provided by the sfence instruction in the wmb() function and the lfence
51  * instruction in the rmb() function are redundant.  In contrast, the
52  * atomic_*() functions with acquire and release semantics do not perform
53  * redundant instructions for ordinary cases of interprocessor memory
54  * ordering on any architecture.
55  */
56 #define	mb()	__asm __volatile("mfence;" : : : "memory")
57 #define	wmb()	__asm __volatile("sfence;" : : : "memory")
58 #define	rmb()	__asm __volatile("lfence;" : : : "memory")
59 
60 #ifdef _KERNEL
61 /*
62  * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
63  *
64  * The open-coded number is used instead of the symbolic expression to
65  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
66  * An assertion in amd64/vm_machdep.c ensures that the value is correct.
67  */
68 #define	OFFSETOF_MONITORBUF	0x100
69 #endif
70 
71 #ifndef SAN_RUNTIME
72 #if defined(KASAN)
73 #define	ATOMIC_SAN_PREFIX	kasan
74 #elif defined(KCSAN)
75 #define	ATOMIC_SAN_PREFIX	kcsan
76 #endif
77 #endif
78 
79 #ifdef ATOMIC_SAN_PREFIX
80 #include <sys/atomic_san.h>
81 #else
82 #include <sys/atomic_common.h>
83 
84 /*
85  * Various simple operations on memory, each of which is atomic in the
86  * presence of interrupts and multiple processors.
87  *
88  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
89  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
90  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
91  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
92  *
93  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
94  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
95  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
96  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
97  *
98  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
99  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
100  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
101  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
102  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
103  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
104  *
105  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
106  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
107  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
108  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
109  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
110  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
111  */
112 
113 /*
114  * The above functions are expanded inline in the statically-linked
115  * kernel.  Lock prefixes are generated if an SMP kernel is being
116  * built.
117  *
118  * Kernel modules call real functions which are built into the kernel.
119  * This allows kernel modules to be portable between UP and SMP systems.
120  */
121 #if !defined(__GNUCLIKE_ASM)
122 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
123 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
124 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
125 
126 int	atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
127 int	atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
128 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
129 int	atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
130 int	atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
131 int	atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
132 	    u_short src);
133 int	atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
134 int	atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src);
135 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
136 u_long	atomic_fetchadd_long(volatile u_long *p, u_long v);
137 int	atomic_testandset_int(volatile u_int *p, u_int v);
138 int	atomic_testandset_long(volatile u_long *p, u_int v);
139 int	atomic_testandclear_int(volatile u_int *p, u_int v);
140 int	atomic_testandclear_long(volatile u_long *p, u_int v);
141 void	atomic_thread_fence_acq(void);
142 void	atomic_thread_fence_acq_rel(void);
143 void	atomic_thread_fence_rel(void);
144 void	atomic_thread_fence_seq_cst(void);
145 
146 #define	ATOMIC_LOAD(TYPE)					\
147 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
148 #define	ATOMIC_STORE(TYPE)					\
149 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
150 
151 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
152 
153 /*
154  * For userland, always use lock prefixes so that the binaries will run
155  * on both SMP and !SMP systems.
156  */
157 #if defined(SMP) || !defined(_KERNEL) || defined(KLD_MODULE)
158 #define	MPLOCKED	"lock ; "
159 #else
160 #define	MPLOCKED
161 #endif
162 
163 /*
164  * The assembly is volatilized to avoid code chunk removal by the compiler.
165  * GCC aggressively reorders operations and memory clobbering is necessary
166  * in order to avoid that for memory barriers.
167  */
168 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
169 static __inline void					\
170 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
171 {							\
172 	__asm __volatile(MPLOCKED OP			\
173 	: "+m" (*p)					\
174 	: CONS (V)					\
175 	: "cc");					\
176 }							\
177 							\
178 static __inline void					\
179 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
180 {							\
181 	__asm __volatile(MPLOCKED OP			\
182 	: "+m" (*p)					\
183 	: CONS (V)					\
184 	: "memory", "cc");				\
185 }							\
186 struct __hack
187 
188 /*
189  * Atomic compare and set, used by the mutex functions.
190  *
191  * cmpset:
192  *	if (*dst == expect)
193  *		*dst = src
194  *
195  * fcmpset:
196  *	if (*dst == *expect)
197  *		*dst = src
198  *	else
199  *		*expect = *dst
200  *
201  * Returns 0 on failure, non-zero on success.
202  */
203 #define	ATOMIC_CMPSET(TYPE)				\
204 static __inline int					\
205 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
206 {							\
207 	u_char res;					\
208 							\
209 	__asm __volatile(				\
210 	"	" MPLOCKED "		"		\
211 	"	cmpxchg %3,%1 ;	"			\
212 	"# atomic_cmpset_" #TYPE "	"		\
213 	: "=@cce" (res),		/* 0 */		\
214 	  "+m" (*dst),			/* 1 */		\
215 	  "+a" (expect)			/* 2 */		\
216 	: "r" (src)			/* 3 */		\
217 	: "memory", "cc");				\
218 	return (res);					\
219 }							\
220 							\
221 static __inline int					\
222 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
223 {							\
224 	u_char res;					\
225 							\
226 	__asm __volatile(				\
227 	"	" MPLOCKED "		"		\
228 	"	cmpxchg %3,%1 ;		"		\
229 	"# atomic_fcmpset_" #TYPE "	"		\
230 	: "=@cce" (res),		/* 0 */		\
231 	  "+m" (*dst),			/* 1 */		\
232 	  "+a" (*expect)		/* 2 */		\
233 	: "r" (src)			/* 3 */		\
234 	: "memory", "cc");				\
235 	return (res);					\
236 }
237 
238 ATOMIC_CMPSET(char);
239 ATOMIC_CMPSET(short);
240 ATOMIC_CMPSET(int);
241 ATOMIC_CMPSET(long);
242 
243 /*
244  * Atomically add the value of v to the integer pointed to by p and return
245  * the previous value of *p.
246  */
247 static __inline u_int
248 atomic_fetchadd_int(volatile u_int *p, u_int v)
249 {
250 
251 	__asm __volatile(
252 	"	" MPLOCKED "		"
253 	"	xaddl	%0,%1 ;		"
254 	"# atomic_fetchadd_int"
255 	: "+r" (v),			/* 0 */
256 	  "+m" (*p)			/* 1 */
257 	: : "cc");
258 	return (v);
259 }
260 
261 /*
262  * Atomically add the value of v to the long integer pointed to by p and return
263  * the previous value of *p.
264  */
265 static __inline u_long
266 atomic_fetchadd_long(volatile u_long *p, u_long v)
267 {
268 
269 	__asm __volatile(
270 	"	" MPLOCKED "		"
271 	"	xaddq	%0,%1 ;		"
272 	"# atomic_fetchadd_long"
273 	: "+r" (v),			/* 0 */
274 	  "+m" (*p)			/* 1 */
275 	: : "cc");
276 	return (v);
277 }
278 
279 static __inline int
280 atomic_testandset_int(volatile u_int *p, u_int v)
281 {
282 	u_char res;
283 
284 	__asm __volatile(
285 	"	" MPLOCKED "		"
286 	"	btsl	%2,%1 ;		"
287 	"# atomic_testandset_int"
288 	: "=@ccc" (res),		/* 0 */
289 	  "+m" (*p)			/* 1 */
290 	: "Ir" (v & 0x1f)		/* 2 */
291 	: "cc");
292 	return (res);
293 }
294 
295 static __inline int
296 atomic_testandset_long(volatile u_long *p, u_int v)
297 {
298 	u_char res;
299 
300 	__asm __volatile(
301 	"	" MPLOCKED "		"
302 	"	btsq	%2,%1 ;		"
303 	"# atomic_testandset_long"
304 	: "=@ccc" (res),		/* 0 */
305 	  "+m" (*p)			/* 1 */
306 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
307 	: "cc");
308 	return (res);
309 }
310 
311 static __inline int
312 atomic_testandclear_int(volatile u_int *p, u_int v)
313 {
314 	u_char res;
315 
316 	__asm __volatile(
317 	"	" MPLOCKED "		"
318 	"	btrl	%2,%1 ;		"
319 	"# atomic_testandclear_int"
320 	: "=@ccc" (res),		/* 0 */
321 	  "+m" (*p)			/* 1 */
322 	: "Ir" (v & 0x1f)		/* 2 */
323 	: "cc");
324 	return (res);
325 }
326 
327 static __inline int
328 atomic_testandclear_long(volatile u_long *p, u_int v)
329 {
330 	u_char res;
331 
332 	__asm __volatile(
333 	"	" MPLOCKED "		"
334 	"	btrq	%2,%1 ;		"
335 	"# atomic_testandclear_long"
336 	: "=@ccc" (res),		/* 0 */
337 	  "+m" (*p)			/* 1 */
338 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
339 	: "cc");
340 	return (res);
341 }
342 
343 /*
344  * We assume that a = b will do atomic loads and stores.  Due to the
345  * IA32 memory model, a simple store guarantees release semantics.
346  *
347  * However, a load may pass a store if they are performed on distinct
348  * addresses, so we need a Store/Load barrier for sequentially
349  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
350  * Store/Load barrier, as recommended by the AMD Software Optimization
351  * Guide, and not mfence.  To avoid false data dependencies, we use a
352  * special address for "mem".  In the kernel, we use a private per-cpu
353  * cache line.  In user space, we use a word in the stack's red zone
354  * (-8(%rsp)).
355  *
356  * For UP kernels, however, the memory of the single processor is
357  * always consistent, so we only need to stop the compiler from
358  * reordering accesses in a way that violates the semantics of acquire
359  * and release.
360  */
361 
362 #if defined(_KERNEL)
363 
364 #if defined(SMP) || defined(KLD_MODULE)
365 static __inline void
366 __storeload_barrier(void)
367 {
368 
369 	__asm __volatile("lock; addl $0,%%gs:%0"
370 	    : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
371 }
372 #else /* _KERNEL && UP */
373 static __inline void
374 __storeload_barrier(void)
375 {
376 
377 	__compiler_membar();
378 }
379 #endif /* SMP */
380 #else /* !_KERNEL */
381 static __inline void
382 __storeload_barrier(void)
383 {
384 
385 	__asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
386 }
387 #endif /* _KERNEL*/
388 
389 #define	ATOMIC_LOAD(TYPE)					\
390 static __inline u_##TYPE					\
391 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
392 {								\
393 	u_##TYPE res;						\
394 								\
395 	res = *p;						\
396 	__compiler_membar();					\
397 	return (res);						\
398 }								\
399 struct __hack
400 
401 #define	ATOMIC_STORE(TYPE)					\
402 static __inline void						\
403 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
404 {								\
405 								\
406 	__compiler_membar();					\
407 	*p = v;							\
408 }								\
409 struct __hack
410 
411 static __inline void
412 atomic_thread_fence_acq(void)
413 {
414 
415 	__compiler_membar();
416 }
417 
418 static __inline void
419 atomic_thread_fence_rel(void)
420 {
421 
422 	__compiler_membar();
423 }
424 
425 static __inline void
426 atomic_thread_fence_acq_rel(void)
427 {
428 
429 	__compiler_membar();
430 }
431 
432 static __inline void
433 atomic_thread_fence_seq_cst(void)
434 {
435 
436 	__storeload_barrier();
437 }
438 
439 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
440 
441 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
442 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
443 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
444 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
445 
446 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
447 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
448 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
449 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
450 
451 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
452 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
453 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
454 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
455 
456 ATOMIC_ASM(set,	     long,  "orq %1,%0",   "er",  v);
457 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "er", ~v);
458 ATOMIC_ASM(add,	     long,  "addq %1,%0",  "er",  v);
459 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "er",  v);
460 
461 #define	ATOMIC_LOADSTORE(TYPE)					\
462 	ATOMIC_LOAD(TYPE);					\
463 	ATOMIC_STORE(TYPE)
464 
465 ATOMIC_LOADSTORE(char);
466 ATOMIC_LOADSTORE(short);
467 ATOMIC_LOADSTORE(int);
468 ATOMIC_LOADSTORE(long);
469 
470 #undef ATOMIC_ASM
471 #undef ATOMIC_LOAD
472 #undef ATOMIC_STORE
473 #undef ATOMIC_LOADSTORE
474 #ifndef WANT_FUNCTIONS
475 
476 /* Read the current value and store a new value in the destination. */
477 #ifdef __GNUCLIKE_ASM
478 
479 static __inline u_int
480 atomic_swap_int(volatile u_int *p, u_int v)
481 {
482 
483 	__asm __volatile(
484 	"	xchgl	%1,%0 ;		"
485 	"# atomic_swap_int"
486 	: "+r" (v),			/* 0 */
487 	  "+m" (*p));			/* 1 */
488 	return (v);
489 }
490 
491 static __inline u_long
492 atomic_swap_long(volatile u_long *p, u_long v)
493 {
494 
495 	__asm __volatile(
496 	"	xchgq	%1,%0 ;		"
497 	"# atomic_swap_long"
498 	: "+r" (v),			/* 0 */
499 	  "+m" (*p));			/* 1 */
500 	return (v);
501 }
502 
503 #else /* !__GNUCLIKE_ASM */
504 
505 u_int	atomic_swap_int(volatile u_int *p, u_int v);
506 u_long	atomic_swap_long(volatile u_long *p, u_long v);
507 
508 #endif /* __GNUCLIKE_ASM */
509 
510 #define	atomic_set_acq_char		atomic_set_barr_char
511 #define	atomic_set_rel_char		atomic_set_barr_char
512 #define	atomic_clear_acq_char		atomic_clear_barr_char
513 #define	atomic_clear_rel_char		atomic_clear_barr_char
514 #define	atomic_add_acq_char		atomic_add_barr_char
515 #define	atomic_add_rel_char		atomic_add_barr_char
516 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
517 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
518 #define	atomic_cmpset_acq_char		atomic_cmpset_char
519 #define	atomic_cmpset_rel_char		atomic_cmpset_char
520 #define	atomic_fcmpset_acq_char		atomic_fcmpset_char
521 #define	atomic_fcmpset_rel_char		atomic_fcmpset_char
522 
523 #define	atomic_set_acq_short		atomic_set_barr_short
524 #define	atomic_set_rel_short		atomic_set_barr_short
525 #define	atomic_clear_acq_short		atomic_clear_barr_short
526 #define	atomic_clear_rel_short		atomic_clear_barr_short
527 #define	atomic_add_acq_short		atomic_add_barr_short
528 #define	atomic_add_rel_short		atomic_add_barr_short
529 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
530 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
531 #define	atomic_cmpset_acq_short		atomic_cmpset_short
532 #define	atomic_cmpset_rel_short		atomic_cmpset_short
533 #define	atomic_fcmpset_acq_short	atomic_fcmpset_short
534 #define	atomic_fcmpset_rel_short	atomic_fcmpset_short
535 
536 #define	atomic_set_acq_int		atomic_set_barr_int
537 #define	atomic_set_rel_int		atomic_set_barr_int
538 #define	atomic_clear_acq_int		atomic_clear_barr_int
539 #define	atomic_clear_rel_int		atomic_clear_barr_int
540 #define	atomic_add_acq_int		atomic_add_barr_int
541 #define	atomic_add_rel_int		atomic_add_barr_int
542 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
543 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
544 #define	atomic_cmpset_acq_int		atomic_cmpset_int
545 #define	atomic_cmpset_rel_int		atomic_cmpset_int
546 #define	atomic_fcmpset_acq_int		atomic_fcmpset_int
547 #define	atomic_fcmpset_rel_int		atomic_fcmpset_int
548 
549 #define	atomic_set_acq_long		atomic_set_barr_long
550 #define	atomic_set_rel_long		atomic_set_barr_long
551 #define	atomic_clear_acq_long		atomic_clear_barr_long
552 #define	atomic_clear_rel_long		atomic_clear_barr_long
553 #define	atomic_add_acq_long		atomic_add_barr_long
554 #define	atomic_add_rel_long		atomic_add_barr_long
555 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
556 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
557 #define	atomic_cmpset_acq_long		atomic_cmpset_long
558 #define	atomic_cmpset_rel_long		atomic_cmpset_long
559 #define	atomic_fcmpset_acq_long		atomic_fcmpset_long
560 #define	atomic_fcmpset_rel_long		atomic_fcmpset_long
561 
562 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
563 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
564 #define	atomic_testandset_acq_long	atomic_testandset_long
565 
566 /* Operations on 8-bit bytes. */
567 #define	atomic_set_8		atomic_set_char
568 #define	atomic_set_acq_8	atomic_set_acq_char
569 #define	atomic_set_rel_8	atomic_set_rel_char
570 #define	atomic_clear_8		atomic_clear_char
571 #define	atomic_clear_acq_8	atomic_clear_acq_char
572 #define	atomic_clear_rel_8	atomic_clear_rel_char
573 #define	atomic_add_8		atomic_add_char
574 #define	atomic_add_acq_8	atomic_add_acq_char
575 #define	atomic_add_rel_8	atomic_add_rel_char
576 #define	atomic_subtract_8	atomic_subtract_char
577 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
578 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
579 #define	atomic_load_acq_8	atomic_load_acq_char
580 #define	atomic_store_rel_8	atomic_store_rel_char
581 #define	atomic_cmpset_8		atomic_cmpset_char
582 #define	atomic_cmpset_acq_8	atomic_cmpset_acq_char
583 #define	atomic_cmpset_rel_8	atomic_cmpset_rel_char
584 #define	atomic_fcmpset_8	atomic_fcmpset_char
585 #define	atomic_fcmpset_acq_8	atomic_fcmpset_acq_char
586 #define	atomic_fcmpset_rel_8	atomic_fcmpset_rel_char
587 
588 /* Operations on 16-bit words. */
589 #define	atomic_set_16		atomic_set_short
590 #define	atomic_set_acq_16	atomic_set_acq_short
591 #define	atomic_set_rel_16	atomic_set_rel_short
592 #define	atomic_clear_16		atomic_clear_short
593 #define	atomic_clear_acq_16	atomic_clear_acq_short
594 #define	atomic_clear_rel_16	atomic_clear_rel_short
595 #define	atomic_add_16		atomic_add_short
596 #define	atomic_add_acq_16	atomic_add_acq_short
597 #define	atomic_add_rel_16	atomic_add_rel_short
598 #define	atomic_subtract_16	atomic_subtract_short
599 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
600 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
601 #define	atomic_load_acq_16	atomic_load_acq_short
602 #define	atomic_store_rel_16	atomic_store_rel_short
603 #define	atomic_cmpset_16	atomic_cmpset_short
604 #define	atomic_cmpset_acq_16	atomic_cmpset_acq_short
605 #define	atomic_cmpset_rel_16	atomic_cmpset_rel_short
606 #define	atomic_fcmpset_16	atomic_fcmpset_short
607 #define	atomic_fcmpset_acq_16	atomic_fcmpset_acq_short
608 #define	atomic_fcmpset_rel_16	atomic_fcmpset_rel_short
609 
610 /* Operations on 32-bit double words. */
611 #define	atomic_set_32		atomic_set_int
612 #define	atomic_set_acq_32	atomic_set_acq_int
613 #define	atomic_set_rel_32	atomic_set_rel_int
614 #define	atomic_clear_32		atomic_clear_int
615 #define	atomic_clear_acq_32	atomic_clear_acq_int
616 #define	atomic_clear_rel_32	atomic_clear_rel_int
617 #define	atomic_add_32		atomic_add_int
618 #define	atomic_add_acq_32	atomic_add_acq_int
619 #define	atomic_add_rel_32	atomic_add_rel_int
620 #define	atomic_subtract_32	atomic_subtract_int
621 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
622 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
623 #define	atomic_load_acq_32	atomic_load_acq_int
624 #define	atomic_store_rel_32	atomic_store_rel_int
625 #define	atomic_cmpset_32	atomic_cmpset_int
626 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
627 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
628 #define	atomic_fcmpset_32	atomic_fcmpset_int
629 #define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
630 #define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
631 #define	atomic_swap_32		atomic_swap_int
632 #define	atomic_readandclear_32	atomic_readandclear_int
633 #define	atomic_fetchadd_32	atomic_fetchadd_int
634 #define	atomic_testandset_32	atomic_testandset_int
635 #define	atomic_testandclear_32	atomic_testandclear_int
636 
637 /* Operations on 64-bit quad words. */
638 #define	atomic_set_64		atomic_set_long
639 #define	atomic_set_acq_64	atomic_set_acq_long
640 #define	atomic_set_rel_64	atomic_set_rel_long
641 #define	atomic_clear_64		atomic_clear_long
642 #define	atomic_clear_acq_64	atomic_clear_acq_long
643 #define	atomic_clear_rel_64	atomic_clear_rel_long
644 #define	atomic_add_64		atomic_add_long
645 #define	atomic_add_acq_64	atomic_add_acq_long
646 #define	atomic_add_rel_64	atomic_add_rel_long
647 #define	atomic_subtract_64	atomic_subtract_long
648 #define	atomic_subtract_acq_64	atomic_subtract_acq_long
649 #define	atomic_subtract_rel_64	atomic_subtract_rel_long
650 #define	atomic_load_acq_64	atomic_load_acq_long
651 #define	atomic_store_rel_64	atomic_store_rel_long
652 #define	atomic_cmpset_64	atomic_cmpset_long
653 #define	atomic_cmpset_acq_64	atomic_cmpset_acq_long
654 #define	atomic_cmpset_rel_64	atomic_cmpset_rel_long
655 #define	atomic_fcmpset_64	atomic_fcmpset_long
656 #define	atomic_fcmpset_acq_64	atomic_fcmpset_acq_long
657 #define	atomic_fcmpset_rel_64	atomic_fcmpset_rel_long
658 #define	atomic_swap_64		atomic_swap_long
659 #define	atomic_readandclear_64	atomic_readandclear_long
660 #define	atomic_fetchadd_64	atomic_fetchadd_long
661 #define	atomic_testandset_64	atomic_testandset_long
662 #define	atomic_testandclear_64	atomic_testandclear_long
663 
664 /* Operations on pointers. */
665 #define	atomic_set_ptr		atomic_set_long
666 #define	atomic_set_acq_ptr	atomic_set_acq_long
667 #define	atomic_set_rel_ptr	atomic_set_rel_long
668 #define	atomic_clear_ptr	atomic_clear_long
669 #define	atomic_clear_acq_ptr	atomic_clear_acq_long
670 #define	atomic_clear_rel_ptr	atomic_clear_rel_long
671 #define	atomic_add_ptr		atomic_add_long
672 #define	atomic_add_acq_ptr	atomic_add_acq_long
673 #define	atomic_add_rel_ptr	atomic_add_rel_long
674 #define	atomic_subtract_ptr	atomic_subtract_long
675 #define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
676 #define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
677 #define	atomic_load_acq_ptr	atomic_load_acq_long
678 #define	atomic_store_rel_ptr	atomic_store_rel_long
679 #define	atomic_cmpset_ptr	atomic_cmpset_long
680 #define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
681 #define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
682 #define	atomic_fcmpset_ptr	atomic_fcmpset_long
683 #define	atomic_fcmpset_acq_ptr	atomic_fcmpset_acq_long
684 #define	atomic_fcmpset_rel_ptr	atomic_fcmpset_rel_long
685 #define	atomic_swap_ptr		atomic_swap_long
686 #define	atomic_readandclear_ptr	atomic_readandclear_long
687 
688 #endif /* !WANT_FUNCTIONS */
689 
690 #endif /* !ATOMIC_SAN_PREFIX */
691 
692 #endif /* !_MACHINE_ATOMIC_H_ */
693