xref: /freebsd/sys/amd64/include/atomic.h (revision 4b50c451720d8b427757a6da1dd2bb4c52cd9e35)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1998 Doug Rabson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 #ifndef _MACHINE_ATOMIC_H_
31 #define	_MACHINE_ATOMIC_H_
32 
33 #ifndef _SYS_CDEFS_H_
34 #error this file needs sys/cdefs.h as a prerequisite
35 #endif
36 
37 /*
38  * To express interprocessor (as opposed to processor and device) memory
39  * ordering constraints, use the atomic_*() functions with acquire and release
40  * semantics rather than the *mb() functions.  An architecture's memory
41  * ordering (or memory consistency) model governs the order in which a
42  * program's accesses to different locations may be performed by an
43  * implementation of that architecture.  In general, for memory regions
44  * defined as writeback cacheable, the memory ordering implemented by amd64
45  * processors preserves the program ordering of a load followed by a load, a
46  * load followed by a store, and a store followed by a store.  Only a store
47  * followed by a load to a different memory location may be reordered.
48  * Therefore, except for special cases, like non-temporal memory accesses or
49  * memory regions defined as write combining, the memory ordering effects
50  * provided by the sfence instruction in the wmb() function and the lfence
51  * instruction in the rmb() function are redundant.  In contrast, the
52  * atomic_*() functions with acquire and release semantics do not perform
53  * redundant instructions for ordinary cases of interprocessor memory
54  * ordering on any architecture.
55  */
56 #define	mb()	__asm __volatile("mfence;" : : : "memory")
57 #define	wmb()	__asm __volatile("sfence;" : : : "memory")
58 #define	rmb()	__asm __volatile("lfence;" : : : "memory")
59 
60 #ifdef _KERNEL
61 /*
62  * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
63  *
64  * The open-coded number is used instead of the symbolic expression to
65  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
66  * An assertion in amd64/vm_machdep.c ensures that the value is correct.
67  */
68 #define	OFFSETOF_MONITORBUF	0x100
69 #endif
70 
71 #if defined(KCSAN) && !defined(KCSAN_RUNTIME)
72 #include <sys/_cscan_atomic.h>
73 #else
74 #include <sys/atomic_common.h>
75 
76 /*
77  * Various simple operations on memory, each of which is atomic in the
78  * presence of interrupts and multiple processors.
79  *
80  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
81  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
82  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
83  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
84  *
85  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
86  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
87  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
88  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
89  *
90  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
91  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
92  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
93  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
94  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
95  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
96  *
97  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
98  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
99  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
100  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
101  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
102  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
103  */
104 
105 /*
106  * The above functions are expanded inline in the statically-linked
107  * kernel.  Lock prefixes are generated if an SMP kernel is being
108  * built.
109  *
110  * Kernel modules call real functions which are built into the kernel.
111  * This allows kernel modules to be portable between UP and SMP systems.
112  */
113 #if !defined(__GNUCLIKE_ASM)
114 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
115 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
116 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
117 
118 int	atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
119 int	atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
120 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
121 int	atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
122 int	atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
123 int	atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
124 	    u_short src);
125 int	atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
126 int	atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src);
127 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
128 u_long	atomic_fetchadd_long(volatile u_long *p, u_long v);
129 int	atomic_testandset_int(volatile u_int *p, u_int v);
130 int	atomic_testandset_long(volatile u_long *p, u_int v);
131 int	atomic_testandclear_int(volatile u_int *p, u_int v);
132 int	atomic_testandclear_long(volatile u_long *p, u_int v);
133 void	atomic_thread_fence_acq(void);
134 void	atomic_thread_fence_acq_rel(void);
135 void	atomic_thread_fence_rel(void);
136 void	atomic_thread_fence_seq_cst(void);
137 
138 #define	ATOMIC_LOAD(TYPE)					\
139 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
140 #define	ATOMIC_STORE(TYPE)					\
141 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
142 
143 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
144 
145 /*
146  * For userland, always use lock prefixes so that the binaries will run
147  * on both SMP and !SMP systems.
148  */
149 #if defined(SMP) || !defined(_KERNEL) || defined(KLD_MODULE)
150 #define	MPLOCKED	"lock ; "
151 #else
152 #define	MPLOCKED
153 #endif
154 
155 /*
156  * The assembly is volatilized to avoid code chunk removal by the compiler.
157  * GCC aggressively reorders operations and memory clobbering is necessary
158  * in order to avoid that for memory barriers.
159  */
160 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
161 static __inline void					\
162 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
163 {							\
164 	__asm __volatile(MPLOCKED OP			\
165 	: "+m" (*p)					\
166 	: CONS (V)					\
167 	: "cc");					\
168 }							\
169 							\
170 static __inline void					\
171 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
172 {							\
173 	__asm __volatile(MPLOCKED OP			\
174 	: "+m" (*p)					\
175 	: CONS (V)					\
176 	: "memory", "cc");				\
177 }							\
178 struct __hack
179 
180 /*
181  * Atomic compare and set, used by the mutex functions.
182  *
183  * cmpset:
184  *	if (*dst == expect)
185  *		*dst = src
186  *
187  * fcmpset:
188  *	if (*dst == *expect)
189  *		*dst = src
190  *	else
191  *		*expect = *dst
192  *
193  * Returns 0 on failure, non-zero on success.
194  */
195 #define	ATOMIC_CMPSET(TYPE)				\
196 static __inline int					\
197 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
198 {							\
199 	u_char res;					\
200 							\
201 	__asm __volatile(				\
202 	"	" MPLOCKED "		"		\
203 	"	cmpxchg %3,%1 ;	"			\
204 	"	sete	%0 ;		"		\
205 	"# atomic_cmpset_" #TYPE "	"		\
206 	: "=q" (res),			/* 0 */		\
207 	  "+m" (*dst),			/* 1 */		\
208 	  "+a" (expect)			/* 2 */		\
209 	: "r" (src)			/* 3 */		\
210 	: "memory", "cc");				\
211 	return (res);					\
212 }							\
213 							\
214 static __inline int					\
215 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
216 {							\
217 	u_char res;					\
218 							\
219 	__asm __volatile(				\
220 	"	" MPLOCKED "		"		\
221 	"	cmpxchg %3,%1 ;		"		\
222 	"	sete	%0 ;		"		\
223 	"# atomic_fcmpset_" #TYPE "	"		\
224 	: "=q" (res),			/* 0 */		\
225 	  "+m" (*dst),			/* 1 */		\
226 	  "+a" (*expect)		/* 2 */		\
227 	: "r" (src)			/* 3 */		\
228 	: "memory", "cc");				\
229 	return (res);					\
230 }
231 
232 ATOMIC_CMPSET(char);
233 ATOMIC_CMPSET(short);
234 ATOMIC_CMPSET(int);
235 ATOMIC_CMPSET(long);
236 
237 /*
238  * Atomically add the value of v to the integer pointed to by p and return
239  * the previous value of *p.
240  */
241 static __inline u_int
242 atomic_fetchadd_int(volatile u_int *p, u_int v)
243 {
244 
245 	__asm __volatile(
246 	"	" MPLOCKED "		"
247 	"	xaddl	%0,%1 ;		"
248 	"# atomic_fetchadd_int"
249 	: "+r" (v),			/* 0 */
250 	  "+m" (*p)			/* 1 */
251 	: : "cc");
252 	return (v);
253 }
254 
255 /*
256  * Atomically add the value of v to the long integer pointed to by p and return
257  * the previous value of *p.
258  */
259 static __inline u_long
260 atomic_fetchadd_long(volatile u_long *p, u_long v)
261 {
262 
263 	__asm __volatile(
264 	"	" MPLOCKED "		"
265 	"	xaddq	%0,%1 ;		"
266 	"# atomic_fetchadd_long"
267 	: "+r" (v),			/* 0 */
268 	  "+m" (*p)			/* 1 */
269 	: : "cc");
270 	return (v);
271 }
272 
273 static __inline int
274 atomic_testandset_int(volatile u_int *p, u_int v)
275 {
276 	u_char res;
277 
278 	__asm __volatile(
279 	"	" MPLOCKED "		"
280 	"	btsl	%2,%1 ;		"
281 	"	setc	%0 ;		"
282 	"# atomic_testandset_int"
283 	: "=q" (res),			/* 0 */
284 	  "+m" (*p)			/* 1 */
285 	: "Ir" (v & 0x1f)		/* 2 */
286 	: "cc");
287 	return (res);
288 }
289 
290 static __inline int
291 atomic_testandset_long(volatile u_long *p, u_int v)
292 {
293 	u_char res;
294 
295 	__asm __volatile(
296 	"	" MPLOCKED "		"
297 	"	btsq	%2,%1 ;		"
298 	"	setc	%0 ;		"
299 	"# atomic_testandset_long"
300 	: "=q" (res),			/* 0 */
301 	  "+m" (*p)			/* 1 */
302 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
303 	: "cc");
304 	return (res);
305 }
306 
307 static __inline int
308 atomic_testandclear_int(volatile u_int *p, u_int v)
309 {
310 	u_char res;
311 
312 	__asm __volatile(
313 	"	" MPLOCKED "		"
314 	"	btrl	%2,%1 ;		"
315 	"	setc	%0 ;		"
316 	"# atomic_testandclear_int"
317 	: "=q" (res),			/* 0 */
318 	  "+m" (*p)			/* 1 */
319 	: "Ir" (v & 0x1f)		/* 2 */
320 	: "cc");
321 	return (res);
322 }
323 
324 static __inline int
325 atomic_testandclear_long(volatile u_long *p, u_int v)
326 {
327 	u_char res;
328 
329 	__asm __volatile(
330 	"	" MPLOCKED "		"
331 	"	btrq	%2,%1 ;		"
332 	"	setc	%0 ;		"
333 	"# atomic_testandclear_long"
334 	: "=q" (res),			/* 0 */
335 	  "+m" (*p)			/* 1 */
336 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
337 	: "cc");
338 	return (res);
339 }
340 
341 /*
342  * We assume that a = b will do atomic loads and stores.  Due to the
343  * IA32 memory model, a simple store guarantees release semantics.
344  *
345  * However, a load may pass a store if they are performed on distinct
346  * addresses, so we need a Store/Load barrier for sequentially
347  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
348  * Store/Load barrier, as recommended by the AMD Software Optimization
349  * Guide, and not mfence.  To avoid false data dependencies, we use a
350  * special address for "mem".  In the kernel, we use a private per-cpu
351  * cache line.  In user space, we use a word in the stack's red zone
352  * (-8(%rsp)).
353  *
354  * For UP kernels, however, the memory of the single processor is
355  * always consistent, so we only need to stop the compiler from
356  * reordering accesses in a way that violates the semantics of acquire
357  * and release.
358  */
359 
360 #if defined(_KERNEL)
361 
362 #if defined(SMP) || defined(KLD_MODULE)
363 static __inline void
364 __storeload_barrier(void)
365 {
366 
367 	__asm __volatile("lock; addl $0,%%gs:%0"
368 	    : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
369 }
370 #else /* _KERNEL && UP */
371 static __inline void
372 __storeload_barrier(void)
373 {
374 
375 	__compiler_membar();
376 }
377 #endif /* SMP */
378 #else /* !_KERNEL */
379 static __inline void
380 __storeload_barrier(void)
381 {
382 
383 	__asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
384 }
385 #endif /* _KERNEL*/
386 
387 #define	ATOMIC_LOAD(TYPE)					\
388 static __inline u_##TYPE					\
389 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
390 {								\
391 	u_##TYPE res;						\
392 								\
393 	res = *p;						\
394 	__compiler_membar();					\
395 	return (res);						\
396 }								\
397 struct __hack
398 
399 #define	ATOMIC_STORE(TYPE)					\
400 static __inline void						\
401 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
402 {								\
403 								\
404 	__compiler_membar();					\
405 	*p = v;							\
406 }								\
407 struct __hack
408 
409 static __inline void
410 atomic_thread_fence_acq(void)
411 {
412 
413 	__compiler_membar();
414 }
415 
416 static __inline void
417 atomic_thread_fence_rel(void)
418 {
419 
420 	__compiler_membar();
421 }
422 
423 static __inline void
424 atomic_thread_fence_acq_rel(void)
425 {
426 
427 	__compiler_membar();
428 }
429 
430 static __inline void
431 atomic_thread_fence_seq_cst(void)
432 {
433 
434 	__storeload_barrier();
435 }
436 
437 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
438 
439 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
440 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
441 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
442 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
443 
444 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
445 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
446 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
447 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
448 
449 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
450 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
451 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
452 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
453 
454 ATOMIC_ASM(set,	     long,  "orq %1,%0",   "er",  v);
455 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "er", ~v);
456 ATOMIC_ASM(add,	     long,  "addq %1,%0",  "er",  v);
457 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "er",  v);
458 
459 #define	ATOMIC_LOADSTORE(TYPE)					\
460 	ATOMIC_LOAD(TYPE);					\
461 	ATOMIC_STORE(TYPE)
462 
463 ATOMIC_LOADSTORE(char);
464 ATOMIC_LOADSTORE(short);
465 ATOMIC_LOADSTORE(int);
466 ATOMIC_LOADSTORE(long);
467 
468 #undef ATOMIC_ASM
469 #undef ATOMIC_LOAD
470 #undef ATOMIC_STORE
471 #undef ATOMIC_LOADSTORE
472 #ifndef WANT_FUNCTIONS
473 
474 /* Read the current value and store a new value in the destination. */
475 #ifdef __GNUCLIKE_ASM
476 
477 static __inline u_int
478 atomic_swap_int(volatile u_int *p, u_int v)
479 {
480 
481 	__asm __volatile(
482 	"	xchgl	%1,%0 ;		"
483 	"# atomic_swap_int"
484 	: "+r" (v),			/* 0 */
485 	  "+m" (*p));			/* 1 */
486 	return (v);
487 }
488 
489 static __inline u_long
490 atomic_swap_long(volatile u_long *p, u_long v)
491 {
492 
493 	__asm __volatile(
494 	"	xchgq	%1,%0 ;		"
495 	"# atomic_swap_long"
496 	: "+r" (v),			/* 0 */
497 	  "+m" (*p));			/* 1 */
498 	return (v);
499 }
500 
501 #else /* !__GNUCLIKE_ASM */
502 
503 u_int	atomic_swap_int(volatile u_int *p, u_int v);
504 u_long	atomic_swap_long(volatile u_long *p, u_long v);
505 
506 #endif /* __GNUCLIKE_ASM */
507 
508 #define	atomic_set_acq_char		atomic_set_barr_char
509 #define	atomic_set_rel_char		atomic_set_barr_char
510 #define	atomic_clear_acq_char		atomic_clear_barr_char
511 #define	atomic_clear_rel_char		atomic_clear_barr_char
512 #define	atomic_add_acq_char		atomic_add_barr_char
513 #define	atomic_add_rel_char		atomic_add_barr_char
514 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
515 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
516 #define	atomic_cmpset_acq_char		atomic_cmpset_char
517 #define	atomic_cmpset_rel_char		atomic_cmpset_char
518 #define	atomic_fcmpset_acq_char		atomic_fcmpset_char
519 #define	atomic_fcmpset_rel_char		atomic_fcmpset_char
520 
521 #define	atomic_set_acq_short		atomic_set_barr_short
522 #define	atomic_set_rel_short		atomic_set_barr_short
523 #define	atomic_clear_acq_short		atomic_clear_barr_short
524 #define	atomic_clear_rel_short		atomic_clear_barr_short
525 #define	atomic_add_acq_short		atomic_add_barr_short
526 #define	atomic_add_rel_short		atomic_add_barr_short
527 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
528 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
529 #define	atomic_cmpset_acq_short		atomic_cmpset_short
530 #define	atomic_cmpset_rel_short		atomic_cmpset_short
531 #define	atomic_fcmpset_acq_short	atomic_fcmpset_short
532 #define	atomic_fcmpset_rel_short	atomic_fcmpset_short
533 
534 #define	atomic_set_acq_int		atomic_set_barr_int
535 #define	atomic_set_rel_int		atomic_set_barr_int
536 #define	atomic_clear_acq_int		atomic_clear_barr_int
537 #define	atomic_clear_rel_int		atomic_clear_barr_int
538 #define	atomic_add_acq_int		atomic_add_barr_int
539 #define	atomic_add_rel_int		atomic_add_barr_int
540 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
541 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
542 #define	atomic_cmpset_acq_int		atomic_cmpset_int
543 #define	atomic_cmpset_rel_int		atomic_cmpset_int
544 #define	atomic_fcmpset_acq_int		atomic_fcmpset_int
545 #define	atomic_fcmpset_rel_int		atomic_fcmpset_int
546 
547 #define	atomic_set_acq_long		atomic_set_barr_long
548 #define	atomic_set_rel_long		atomic_set_barr_long
549 #define	atomic_clear_acq_long		atomic_clear_barr_long
550 #define	atomic_clear_rel_long		atomic_clear_barr_long
551 #define	atomic_add_acq_long		atomic_add_barr_long
552 #define	atomic_add_rel_long		atomic_add_barr_long
553 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
554 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
555 #define	atomic_cmpset_acq_long		atomic_cmpset_long
556 #define	atomic_cmpset_rel_long		atomic_cmpset_long
557 #define	atomic_fcmpset_acq_long		atomic_fcmpset_long
558 #define	atomic_fcmpset_rel_long		atomic_fcmpset_long
559 
560 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
561 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
562 
563 /* Operations on 8-bit bytes. */
564 #define	atomic_set_8		atomic_set_char
565 #define	atomic_set_acq_8	atomic_set_acq_char
566 #define	atomic_set_rel_8	atomic_set_rel_char
567 #define	atomic_clear_8		atomic_clear_char
568 #define	atomic_clear_acq_8	atomic_clear_acq_char
569 #define	atomic_clear_rel_8	atomic_clear_rel_char
570 #define	atomic_add_8		atomic_add_char
571 #define	atomic_add_acq_8	atomic_add_acq_char
572 #define	atomic_add_rel_8	atomic_add_rel_char
573 #define	atomic_subtract_8	atomic_subtract_char
574 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
575 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
576 #define	atomic_load_acq_8	atomic_load_acq_char
577 #define	atomic_store_rel_8	atomic_store_rel_char
578 #define	atomic_cmpset_8		atomic_cmpset_char
579 #define	atomic_cmpset_acq_8	atomic_cmpset_acq_char
580 #define	atomic_cmpset_rel_8	atomic_cmpset_rel_char
581 #define	atomic_fcmpset_8	atomic_fcmpset_char
582 #define	atomic_fcmpset_acq_8	atomic_fcmpset_acq_char
583 #define	atomic_fcmpset_rel_8	atomic_fcmpset_rel_char
584 
585 /* Operations on 16-bit words. */
586 #define	atomic_set_16		atomic_set_short
587 #define	atomic_set_acq_16	atomic_set_acq_short
588 #define	atomic_set_rel_16	atomic_set_rel_short
589 #define	atomic_clear_16		atomic_clear_short
590 #define	atomic_clear_acq_16	atomic_clear_acq_short
591 #define	atomic_clear_rel_16	atomic_clear_rel_short
592 #define	atomic_add_16		atomic_add_short
593 #define	atomic_add_acq_16	atomic_add_acq_short
594 #define	atomic_add_rel_16	atomic_add_rel_short
595 #define	atomic_subtract_16	atomic_subtract_short
596 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
597 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
598 #define	atomic_load_acq_16	atomic_load_acq_short
599 #define	atomic_store_rel_16	atomic_store_rel_short
600 #define	atomic_cmpset_16	atomic_cmpset_short
601 #define	atomic_cmpset_acq_16	atomic_cmpset_acq_short
602 #define	atomic_cmpset_rel_16	atomic_cmpset_rel_short
603 #define	atomic_fcmpset_16	atomic_fcmpset_short
604 #define	atomic_fcmpset_acq_16	atomic_fcmpset_acq_short
605 #define	atomic_fcmpset_rel_16	atomic_fcmpset_rel_short
606 
607 /* Operations on 32-bit double words. */
608 #define	atomic_set_32		atomic_set_int
609 #define	atomic_set_acq_32	atomic_set_acq_int
610 #define	atomic_set_rel_32	atomic_set_rel_int
611 #define	atomic_clear_32		atomic_clear_int
612 #define	atomic_clear_acq_32	atomic_clear_acq_int
613 #define	atomic_clear_rel_32	atomic_clear_rel_int
614 #define	atomic_add_32		atomic_add_int
615 #define	atomic_add_acq_32	atomic_add_acq_int
616 #define	atomic_add_rel_32	atomic_add_rel_int
617 #define	atomic_subtract_32	atomic_subtract_int
618 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
619 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
620 #define	atomic_load_acq_32	atomic_load_acq_int
621 #define	atomic_store_rel_32	atomic_store_rel_int
622 #define	atomic_cmpset_32	atomic_cmpset_int
623 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
624 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
625 #define	atomic_fcmpset_32	atomic_fcmpset_int
626 #define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
627 #define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
628 #define	atomic_swap_32		atomic_swap_int
629 #define	atomic_readandclear_32	atomic_readandclear_int
630 #define	atomic_fetchadd_32	atomic_fetchadd_int
631 #define	atomic_testandset_32	atomic_testandset_int
632 #define	atomic_testandclear_32	atomic_testandclear_int
633 
634 /* Operations on 64-bit quad words. */
635 #define	atomic_set_64		atomic_set_long
636 #define	atomic_set_acq_64	atomic_set_acq_long
637 #define	atomic_set_rel_64	atomic_set_rel_long
638 #define	atomic_clear_64		atomic_clear_long
639 #define	atomic_clear_acq_64	atomic_clear_acq_long
640 #define	atomic_clear_rel_64	atomic_clear_rel_long
641 #define	atomic_add_64		atomic_add_long
642 #define	atomic_add_acq_64	atomic_add_acq_long
643 #define	atomic_add_rel_64	atomic_add_rel_long
644 #define	atomic_subtract_64	atomic_subtract_long
645 #define	atomic_subtract_acq_64	atomic_subtract_acq_long
646 #define	atomic_subtract_rel_64	atomic_subtract_rel_long
647 #define	atomic_load_acq_64	atomic_load_acq_long
648 #define	atomic_store_rel_64	atomic_store_rel_long
649 #define	atomic_cmpset_64	atomic_cmpset_long
650 #define	atomic_cmpset_acq_64	atomic_cmpset_acq_long
651 #define	atomic_cmpset_rel_64	atomic_cmpset_rel_long
652 #define	atomic_fcmpset_64	atomic_fcmpset_long
653 #define	atomic_fcmpset_acq_64	atomic_fcmpset_acq_long
654 #define	atomic_fcmpset_rel_64	atomic_fcmpset_rel_long
655 #define	atomic_swap_64		atomic_swap_long
656 #define	atomic_readandclear_64	atomic_readandclear_long
657 #define	atomic_fetchadd_64	atomic_fetchadd_long
658 #define	atomic_testandset_64	atomic_testandset_long
659 #define	atomic_testandclear_64	atomic_testandclear_long
660 
661 /* Operations on pointers. */
662 #define	atomic_set_ptr		atomic_set_long
663 #define	atomic_set_acq_ptr	atomic_set_acq_long
664 #define	atomic_set_rel_ptr	atomic_set_rel_long
665 #define	atomic_clear_ptr	atomic_clear_long
666 #define	atomic_clear_acq_ptr	atomic_clear_acq_long
667 #define	atomic_clear_rel_ptr	atomic_clear_rel_long
668 #define	atomic_add_ptr		atomic_add_long
669 #define	atomic_add_acq_ptr	atomic_add_acq_long
670 #define	atomic_add_rel_ptr	atomic_add_rel_long
671 #define	atomic_subtract_ptr	atomic_subtract_long
672 #define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
673 #define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
674 #define	atomic_load_acq_ptr	atomic_load_acq_long
675 #define	atomic_store_rel_ptr	atomic_store_rel_long
676 #define	atomic_cmpset_ptr	atomic_cmpset_long
677 #define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
678 #define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
679 #define	atomic_fcmpset_ptr	atomic_fcmpset_long
680 #define	atomic_fcmpset_acq_ptr	atomic_fcmpset_acq_long
681 #define	atomic_fcmpset_rel_ptr	atomic_fcmpset_rel_long
682 #define	atomic_swap_ptr		atomic_swap_long
683 #define	atomic_readandclear_ptr	atomic_readandclear_long
684 
685 #endif /* !WANT_FUNCTIONS */
686 
687 #endif /* KCSAN && !KCSAN_RUNTIME */
688 
689 #endif /* !_MACHINE_ATOMIC_H_ */
690