xref: /freebsd/sys/amd64/include/atomic.h (revision 911f0260390e18cf85f3dbf2c719b593efdc1e3c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1998 Doug Rabson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifdef __i386__
32 #include <i386/atomic.h>
33 #else /* !__i386__ */
34 
35 #ifndef _MACHINE_ATOMIC_H_
36 #define	_MACHINE_ATOMIC_H_
37 
38 /*
39  * To express interprocessor (as opposed to processor and device) memory
40  * ordering constraints, use the atomic_*() functions with acquire and release
41  * semantics rather than the *mb() functions.  An architecture's memory
42  * ordering (or memory consistency) model governs the order in which a
43  * program's accesses to different locations may be performed by an
44  * implementation of that architecture.  In general, for memory regions
45  * defined as writeback cacheable, the memory ordering implemented by amd64
46  * processors preserves the program ordering of a load followed by a load, a
47  * load followed by a store, and a store followed by a store.  Only a store
48  * followed by a load to a different memory location may be reordered.
49  * Therefore, except for special cases, like non-temporal memory accesses or
50  * memory regions defined as write combining, the memory ordering effects
51  * provided by the sfence instruction in the wmb() function and the lfence
52  * instruction in the rmb() function are redundant.  In contrast, the
53  * atomic_*() functions with acquire and release semantics do not perform
54  * redundant instructions for ordinary cases of interprocessor memory
55  * ordering on any architecture.
56  */
57 #define	mb()	__asm __volatile("mfence;" : : : "memory")
58 #define	wmb()	__asm __volatile("sfence;" : : : "memory")
59 #define	rmb()	__asm __volatile("lfence;" : : : "memory")
60 
61 #ifdef _KERNEL
62 /*
63  * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
64  *
65  * The open-coded number is used instead of the symbolic expression to
66  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
67  * An assertion in amd64/vm_machdep.c ensures that the value is correct.
68  */
69 #define	OFFSETOF_MONITORBUF	0x100
70 #endif
71 
72 #if defined(SAN_NEEDS_INTERCEPTORS) && !defined(SAN_RUNTIME)
73 #include <sys/atomic_san.h>
74 #else
75 #include <sys/atomic_common.h>
76 
77 /*
78  * Various simple operations on memory, each of which is atomic in the
79  * presence of interrupts and multiple processors.
80  *
81  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
82  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
83  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
84  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
85  *
86  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
87  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
88  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
89  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
90  *
91  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
92  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
93  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
94  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
95  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
96  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
97  *
98  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
99  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
100  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
101  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
102  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
103  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
104  */
105 
106 /*
107  * Always use lock prefixes.  The result is slighly less optimal for
108  * UP systems, but it matters less now, and sometimes UP is emulated
109  * over SMP.
110  *
111  * The assembly is volatilized to avoid code chunk removal by the compiler.
112  * GCC aggressively reorders operations and memory clobbering is necessary
113  * in order to avoid that for memory barriers.
114  */
115 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
116 static __inline void					\
117 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
118 {							\
119 	__asm __volatile("lock; " OP			\
120 	: "+m" (*p)					\
121 	: CONS (V)					\
122 	: "cc");					\
123 }							\
124 							\
125 static __inline void					\
126 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
127 {							\
128 	__asm __volatile("lock; " OP			\
129 	: "+m" (*p)					\
130 	: CONS (V)					\
131 	: "memory", "cc");				\
132 }							\
133 struct __hack
134 
135 /*
136  * Atomic compare and set, used by the mutex functions.
137  *
138  * cmpset:
139  *	if (*dst == expect)
140  *		*dst = src
141  *
142  * fcmpset:
143  *	if (*dst == *expect)
144  *		*dst = src
145  *	else
146  *		*expect = *dst
147  *
148  * Returns 0 on failure, non-zero on success.
149  */
150 #define	ATOMIC_CMPSET(TYPE)				\
151 static __inline int					\
152 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
153 {							\
154 	u_char res;					\
155 							\
156 	__asm __volatile(				\
157 	" lock; cmpxchg %3,%1 ;	"			\
158 	"# atomic_cmpset_" #TYPE "	"		\
159 	: "=@cce" (res),		/* 0 */		\
160 	  "+m" (*dst),			/* 1 */		\
161 	  "+a" (expect)			/* 2 */		\
162 	: "r" (src)			/* 3 */		\
163 	: "memory", "cc");				\
164 	return (res);					\
165 }							\
166 							\
167 static __inline int					\
168 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
169 {							\
170 	u_char res;					\
171 							\
172 	__asm __volatile(				\
173 	" lock; cmpxchg %3,%1 ;		"		\
174 	"# atomic_fcmpset_" #TYPE "	"		\
175 	: "=@cce" (res),		/* 0 */		\
176 	  "+m" (*dst),			/* 1 */		\
177 	  "+a" (*expect)		/* 2 */		\
178 	: "r" (src)			/* 3 */		\
179 	: "memory", "cc");				\
180 	return (res);					\
181 }
182 
183 ATOMIC_CMPSET(char);
184 ATOMIC_CMPSET(short);
185 ATOMIC_CMPSET(int);
186 ATOMIC_CMPSET(long);
187 
188 /*
189  * Atomically add the value of v to the integer pointed to by p and return
190  * the previous value of *p.
191  */
192 static __inline u_int
193 atomic_fetchadd_int(volatile u_int *p, u_int v)
194 {
195 
196 	__asm __volatile(
197 	" lock; xaddl	%0,%1 ;		"
198 	"# atomic_fetchadd_int"
199 	: "+r" (v),			/* 0 */
200 	  "+m" (*p)			/* 1 */
201 	: : "cc");
202 	return (v);
203 }
204 
205 /*
206  * Atomically add the value of v to the long integer pointed to by p and return
207  * the previous value of *p.
208  */
209 static __inline u_long
210 atomic_fetchadd_long(volatile u_long *p, u_long v)
211 {
212 
213 	__asm __volatile(
214 	" lock;	xaddq	%0,%1 ;		"
215 	"# atomic_fetchadd_long"
216 	: "+r" (v),			/* 0 */
217 	  "+m" (*p)			/* 1 */
218 	: : "cc");
219 	return (v);
220 }
221 
222 static __inline int
223 atomic_testandset_int(volatile u_int *p, u_int v)
224 {
225 	u_char res;
226 
227 	__asm __volatile(
228 	" lock;	btsl	%2,%1 ;		"
229 	"# atomic_testandset_int"
230 	: "=@ccc" (res),		/* 0 */
231 	  "+m" (*p)			/* 1 */
232 	: "Ir" (v & 0x1f)		/* 2 */
233 	: "cc");
234 	return (res);
235 }
236 
237 static __inline int
238 atomic_testandset_long(volatile u_long *p, u_int v)
239 {
240 	u_char res;
241 
242 	__asm __volatile(
243 	" lock;	btsq	%2,%1 ;		"
244 	"# atomic_testandset_long"
245 	: "=@ccc" (res),		/* 0 */
246 	  "+m" (*p)			/* 1 */
247 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
248 	: "cc");
249 	return (res);
250 }
251 
252 static __inline int
253 atomic_testandclear_int(volatile u_int *p, u_int v)
254 {
255 	u_char res;
256 
257 	__asm __volatile(
258 	" lock;	btrl	%2,%1 ;		"
259 	"# atomic_testandclear_int"
260 	: "=@ccc" (res),		/* 0 */
261 	  "+m" (*p)			/* 1 */
262 	: "Ir" (v & 0x1f)		/* 2 */
263 	: "cc");
264 	return (res);
265 }
266 
267 static __inline int
268 atomic_testandclear_long(volatile u_long *p, u_int v)
269 {
270 	u_char res;
271 
272 	__asm __volatile(
273 	" lock;	btrq	%2,%1 ;		"
274 	"# atomic_testandclear_long"
275 	: "=@ccc" (res),		/* 0 */
276 	  "+m" (*p)			/* 1 */
277 	: "Jr" ((u_long)(v & 0x3f))	/* 2 */
278 	: "cc");
279 	return (res);
280 }
281 
282 /*
283  * We assume that a = b will do atomic loads and stores.  Due to the
284  * IA32 memory model, a simple store guarantees release semantics.
285  *
286  * However, a load may pass a store if they are performed on distinct
287  * addresses, so we need a Store/Load barrier for sequentially
288  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
289  * Store/Load barrier, as recommended by the AMD Software Optimization
290  * Guide, and not mfence.  To avoid false data dependencies, we use a
291  * special address for "mem".  In the kernel, we use a private per-cpu
292  * cache line.  In user space, we use a word in the stack's red zone
293  * (-8(%rsp)).
294  */
295 
296 static __inline void
297 __storeload_barrier(void)
298 {
299 #if defined(_KERNEL)
300 	__asm __volatile("lock; addl $0,%%gs:%0"
301 	    : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
302 #else /* !_KERNEL */
303 	__asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
304 #endif /* _KERNEL*/
305 }
306 
307 #define	ATOMIC_LOAD(TYPE)					\
308 static __inline u_##TYPE					\
309 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
310 {								\
311 	u_##TYPE res;						\
312 								\
313 	res = *p;						\
314 	__compiler_membar();					\
315 	return (res);						\
316 }								\
317 struct __hack
318 
319 #define	ATOMIC_STORE(TYPE)					\
320 static __inline void						\
321 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
322 {								\
323 								\
324 	__compiler_membar();					\
325 	*p = v;							\
326 }								\
327 struct __hack
328 
329 static __inline void
330 atomic_thread_fence_acq(void)
331 {
332 
333 	__compiler_membar();
334 }
335 
336 static __inline void
337 atomic_thread_fence_rel(void)
338 {
339 
340 	__compiler_membar();
341 }
342 
343 static __inline void
344 atomic_thread_fence_acq_rel(void)
345 {
346 
347 	__compiler_membar();
348 }
349 
350 static __inline void
351 atomic_thread_fence_seq_cst(void)
352 {
353 
354 	__storeload_barrier();
355 }
356 
357 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
358 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
359 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
360 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
361 
362 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
363 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
364 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
365 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
366 
367 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
368 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
369 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
370 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
371 
372 ATOMIC_ASM(set,	     long,  "orq %1,%0",   "er",  v);
373 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "er", ~v);
374 ATOMIC_ASM(add,	     long,  "addq %1,%0",  "er",  v);
375 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "er",  v);
376 
377 #define	ATOMIC_LOADSTORE(TYPE)					\
378 	ATOMIC_LOAD(TYPE);					\
379 	ATOMIC_STORE(TYPE)
380 
381 ATOMIC_LOADSTORE(char);
382 ATOMIC_LOADSTORE(short);
383 ATOMIC_LOADSTORE(int);
384 ATOMIC_LOADSTORE(long);
385 
386 #undef ATOMIC_ASM
387 #undef ATOMIC_LOAD
388 #undef ATOMIC_STORE
389 #undef ATOMIC_LOADSTORE
390 #ifndef WANT_FUNCTIONS
391 
392 /* Read the current value and store a new value in the destination. */
393 static __inline u_int
394 atomic_swap_int(volatile u_int *p, u_int v)
395 {
396 
397 	__asm __volatile(
398 	"	xchgl	%1,%0 ;		"
399 	"# atomic_swap_int"
400 	: "+r" (v),			/* 0 */
401 	  "+m" (*p));			/* 1 */
402 	return (v);
403 }
404 
405 static __inline u_long
406 atomic_swap_long(volatile u_long *p, u_long v)
407 {
408 
409 	__asm __volatile(
410 	"	xchgq	%1,%0 ;		"
411 	"# atomic_swap_long"
412 	: "+r" (v),			/* 0 */
413 	  "+m" (*p));			/* 1 */
414 	return (v);
415 }
416 
417 #define	atomic_set_acq_char		atomic_set_barr_char
418 #define	atomic_set_rel_char		atomic_set_barr_char
419 #define	atomic_clear_acq_char		atomic_clear_barr_char
420 #define	atomic_clear_rel_char		atomic_clear_barr_char
421 #define	atomic_add_acq_char		atomic_add_barr_char
422 #define	atomic_add_rel_char		atomic_add_barr_char
423 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
424 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
425 #define	atomic_cmpset_acq_char		atomic_cmpset_char
426 #define	atomic_cmpset_rel_char		atomic_cmpset_char
427 #define	atomic_fcmpset_acq_char		atomic_fcmpset_char
428 #define	atomic_fcmpset_rel_char		atomic_fcmpset_char
429 
430 #define	atomic_set_acq_short		atomic_set_barr_short
431 #define	atomic_set_rel_short		atomic_set_barr_short
432 #define	atomic_clear_acq_short		atomic_clear_barr_short
433 #define	atomic_clear_rel_short		atomic_clear_barr_short
434 #define	atomic_add_acq_short		atomic_add_barr_short
435 #define	atomic_add_rel_short		atomic_add_barr_short
436 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
437 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
438 #define	atomic_cmpset_acq_short		atomic_cmpset_short
439 #define	atomic_cmpset_rel_short		atomic_cmpset_short
440 #define	atomic_fcmpset_acq_short	atomic_fcmpset_short
441 #define	atomic_fcmpset_rel_short	atomic_fcmpset_short
442 
443 #define	atomic_set_acq_int		atomic_set_barr_int
444 #define	atomic_set_rel_int		atomic_set_barr_int
445 #define	atomic_clear_acq_int		atomic_clear_barr_int
446 #define	atomic_clear_rel_int		atomic_clear_barr_int
447 #define	atomic_add_acq_int		atomic_add_barr_int
448 #define	atomic_add_rel_int		atomic_add_barr_int
449 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
450 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
451 #define	atomic_cmpset_acq_int		atomic_cmpset_int
452 #define	atomic_cmpset_rel_int		atomic_cmpset_int
453 #define	atomic_fcmpset_acq_int		atomic_fcmpset_int
454 #define	atomic_fcmpset_rel_int		atomic_fcmpset_int
455 
456 #define	atomic_set_acq_long		atomic_set_barr_long
457 #define	atomic_set_rel_long		atomic_set_barr_long
458 #define	atomic_clear_acq_long		atomic_clear_barr_long
459 #define	atomic_clear_rel_long		atomic_clear_barr_long
460 #define	atomic_add_acq_long		atomic_add_barr_long
461 #define	atomic_add_rel_long		atomic_add_barr_long
462 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
463 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
464 #define	atomic_cmpset_acq_long		atomic_cmpset_long
465 #define	atomic_cmpset_rel_long		atomic_cmpset_long
466 #define	atomic_fcmpset_acq_long		atomic_fcmpset_long
467 #define	atomic_fcmpset_rel_long		atomic_fcmpset_long
468 
469 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
470 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
471 #define	atomic_testandset_acq_long	atomic_testandset_long
472 
473 /* Operations on 8-bit bytes. */
474 #define	atomic_set_8		atomic_set_char
475 #define	atomic_set_acq_8	atomic_set_acq_char
476 #define	atomic_set_rel_8	atomic_set_rel_char
477 #define	atomic_clear_8		atomic_clear_char
478 #define	atomic_clear_acq_8	atomic_clear_acq_char
479 #define	atomic_clear_rel_8	atomic_clear_rel_char
480 #define	atomic_add_8		atomic_add_char
481 #define	atomic_add_acq_8	atomic_add_acq_char
482 #define	atomic_add_rel_8	atomic_add_rel_char
483 #define	atomic_subtract_8	atomic_subtract_char
484 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
485 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
486 #define	atomic_load_acq_8	atomic_load_acq_char
487 #define	atomic_store_rel_8	atomic_store_rel_char
488 #define	atomic_cmpset_8		atomic_cmpset_char
489 #define	atomic_cmpset_acq_8	atomic_cmpset_acq_char
490 #define	atomic_cmpset_rel_8	atomic_cmpset_rel_char
491 #define	atomic_fcmpset_8	atomic_fcmpset_char
492 #define	atomic_fcmpset_acq_8	atomic_fcmpset_acq_char
493 #define	atomic_fcmpset_rel_8	atomic_fcmpset_rel_char
494 
495 /* Operations on 16-bit words. */
496 #define	atomic_set_16		atomic_set_short
497 #define	atomic_set_acq_16	atomic_set_acq_short
498 #define	atomic_set_rel_16	atomic_set_rel_short
499 #define	atomic_clear_16		atomic_clear_short
500 #define	atomic_clear_acq_16	atomic_clear_acq_short
501 #define	atomic_clear_rel_16	atomic_clear_rel_short
502 #define	atomic_add_16		atomic_add_short
503 #define	atomic_add_acq_16	atomic_add_acq_short
504 #define	atomic_add_rel_16	atomic_add_rel_short
505 #define	atomic_subtract_16	atomic_subtract_short
506 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
507 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
508 #define	atomic_load_acq_16	atomic_load_acq_short
509 #define	atomic_store_rel_16	atomic_store_rel_short
510 #define	atomic_cmpset_16	atomic_cmpset_short
511 #define	atomic_cmpset_acq_16	atomic_cmpset_acq_short
512 #define	atomic_cmpset_rel_16	atomic_cmpset_rel_short
513 #define	atomic_fcmpset_16	atomic_fcmpset_short
514 #define	atomic_fcmpset_acq_16	atomic_fcmpset_acq_short
515 #define	atomic_fcmpset_rel_16	atomic_fcmpset_rel_short
516 
517 /* Operations on 32-bit double words. */
518 #define	atomic_set_32		atomic_set_int
519 #define	atomic_set_acq_32	atomic_set_acq_int
520 #define	atomic_set_rel_32	atomic_set_rel_int
521 #define	atomic_clear_32		atomic_clear_int
522 #define	atomic_clear_acq_32	atomic_clear_acq_int
523 #define	atomic_clear_rel_32	atomic_clear_rel_int
524 #define	atomic_add_32		atomic_add_int
525 #define	atomic_add_acq_32	atomic_add_acq_int
526 #define	atomic_add_rel_32	atomic_add_rel_int
527 #define	atomic_subtract_32	atomic_subtract_int
528 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
529 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
530 #define	atomic_load_acq_32	atomic_load_acq_int
531 #define	atomic_store_rel_32	atomic_store_rel_int
532 #define	atomic_cmpset_32	atomic_cmpset_int
533 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
534 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
535 #define	atomic_fcmpset_32	atomic_fcmpset_int
536 #define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
537 #define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
538 #define	atomic_swap_32		atomic_swap_int
539 #define	atomic_readandclear_32	atomic_readandclear_int
540 #define	atomic_fetchadd_32	atomic_fetchadd_int
541 #define	atomic_testandset_32	atomic_testandset_int
542 #define	atomic_testandclear_32	atomic_testandclear_int
543 
544 /* Operations on 64-bit quad words. */
545 #define	atomic_set_64		atomic_set_long
546 #define	atomic_set_acq_64	atomic_set_acq_long
547 #define	atomic_set_rel_64	atomic_set_rel_long
548 #define	atomic_clear_64		atomic_clear_long
549 #define	atomic_clear_acq_64	atomic_clear_acq_long
550 #define	atomic_clear_rel_64	atomic_clear_rel_long
551 #define	atomic_add_64		atomic_add_long
552 #define	atomic_add_acq_64	atomic_add_acq_long
553 #define	atomic_add_rel_64	atomic_add_rel_long
554 #define	atomic_subtract_64	atomic_subtract_long
555 #define	atomic_subtract_acq_64	atomic_subtract_acq_long
556 #define	atomic_subtract_rel_64	atomic_subtract_rel_long
557 #define	atomic_load_acq_64	atomic_load_acq_long
558 #define	atomic_store_rel_64	atomic_store_rel_long
559 #define	atomic_cmpset_64	atomic_cmpset_long
560 #define	atomic_cmpset_acq_64	atomic_cmpset_acq_long
561 #define	atomic_cmpset_rel_64	atomic_cmpset_rel_long
562 #define	atomic_fcmpset_64	atomic_fcmpset_long
563 #define	atomic_fcmpset_acq_64	atomic_fcmpset_acq_long
564 #define	atomic_fcmpset_rel_64	atomic_fcmpset_rel_long
565 #define	atomic_swap_64		atomic_swap_long
566 #define	atomic_readandclear_64	atomic_readandclear_long
567 #define	atomic_fetchadd_64	atomic_fetchadd_long
568 #define	atomic_testandset_64	atomic_testandset_long
569 #define	atomic_testandclear_64	atomic_testandclear_long
570 
571 /* Operations on pointers. */
572 #define	atomic_set_ptr		atomic_set_long
573 #define	atomic_set_acq_ptr	atomic_set_acq_long
574 #define	atomic_set_rel_ptr	atomic_set_rel_long
575 #define	atomic_clear_ptr	atomic_clear_long
576 #define	atomic_clear_acq_ptr	atomic_clear_acq_long
577 #define	atomic_clear_rel_ptr	atomic_clear_rel_long
578 #define	atomic_add_ptr		atomic_add_long
579 #define	atomic_add_acq_ptr	atomic_add_acq_long
580 #define	atomic_add_rel_ptr	atomic_add_rel_long
581 #define	atomic_subtract_ptr	atomic_subtract_long
582 #define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
583 #define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
584 #define	atomic_load_acq_ptr	atomic_load_acq_long
585 #define	atomic_store_rel_ptr	atomic_store_rel_long
586 #define	atomic_cmpset_ptr	atomic_cmpset_long
587 #define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
588 #define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
589 #define	atomic_fcmpset_ptr	atomic_fcmpset_long
590 #define	atomic_fcmpset_acq_ptr	atomic_fcmpset_acq_long
591 #define	atomic_fcmpset_rel_ptr	atomic_fcmpset_rel_long
592 #define	atomic_swap_ptr		atomic_swap_long
593 #define	atomic_readandclear_ptr	atomic_readandclear_long
594 
595 #endif /* !WANT_FUNCTIONS */
596 
597 #endif /* !SAN_NEEDS_INTERCEPTORS || SAN_RUNTIME */
598 
599 #endif /* !_MACHINE_ATOMIC_H_ */
600 
601 #endif /* __i386__ */
602