xref: /freebsd/sys/i386/include/atomic.h (revision 718cf2ccb9956613756ab15d7a0e28f2c8e91cab)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define	_MACHINE_ATOMIC_H_
30 
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34 
35 #ifdef _KERNEL
36 #include <machine/md_var.h>
37 #include <machine/specialreg.h>
38 #endif
39 
40 #ifndef __OFFSETOF_MONITORBUF
41 /*
42  * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
43  *
44  * The open-coded number is used instead of the symbolic expression to
45  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
46  * An assertion in i386/vm_machdep.c ensures that the value is correct.
47  */
48 #define	__OFFSETOF_MONITORBUF	0x80
49 
50 static __inline void
51 __mbk(void)
52 {
53 
54 	__asm __volatile("lock; addl $0,%%fs:%0"
55 	    : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
56 }
57 
58 static __inline void
59 __mbu(void)
60 {
61 
62 	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
63 }
64 #endif
65 
66 /*
67  * Various simple operations on memory, each of which is atomic in the
68  * presence of interrupts and multiple processors.
69  *
70  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
71  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
72  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
73  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
74  *
75  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
76  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
77  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
78  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
79  *
80  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
81  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
82  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
83  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
84  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
85  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
86  *
87  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
88  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
89  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
90  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
91  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
92  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
93  */
94 
95 /*
96  * The above functions are expanded inline in the statically-linked
97  * kernel.  Lock prefixes are generated if an SMP kernel is being
98  * built.
99  *
100  * Kernel modules call real functions which are built into the kernel.
101  * This allows kernel modules to be portable between UP and SMP systems.
102  */
103 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
104 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
105 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
106 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
107 
108 int	atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
109 int	atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
110 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
111 int	atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
112 int	atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
113 	    u_short src);
114 int	atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
115 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
116 int	atomic_testandset_int(volatile u_int *p, u_int v);
117 int	atomic_testandclear_int(volatile u_int *p, u_int v);
118 void	atomic_thread_fence_acq(void);
119 void	atomic_thread_fence_acq_rel(void);
120 void	atomic_thread_fence_rel(void);
121 void	atomic_thread_fence_seq_cst(void);
122 
123 #define	ATOMIC_LOAD(TYPE)					\
124 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
125 #define	ATOMIC_STORE(TYPE)					\
126 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
127 
128 int		atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
129 uint64_t	atomic_load_acq_64(volatile uint64_t *);
130 void		atomic_store_rel_64(volatile uint64_t *, uint64_t);
131 uint64_t	atomic_swap_64(volatile uint64_t *, uint64_t);
132 uint64_t	atomic_fetchadd_64(volatile uint64_t *, uint64_t);
133 
134 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
135 
136 /*
137  * For userland, always use lock prefixes so that the binaries will run
138  * on both SMP and !SMP systems.
139  */
140 #if defined(SMP) || !defined(_KERNEL)
141 #define	MPLOCKED	"lock ; "
142 #else
143 #define	MPLOCKED
144 #endif
145 
146 /*
147  * The assembly is volatilized to avoid code chunk removal by the compiler.
148  * GCC aggressively reorders operations and memory clobbering is necessary
149  * in order to avoid that for memory barriers.
150  */
151 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
152 static __inline void					\
153 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
154 {							\
155 	__asm __volatile(MPLOCKED OP			\
156 	: "+m" (*p)					\
157 	: CONS (V)					\
158 	: "cc");					\
159 }							\
160 							\
161 static __inline void					\
162 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
163 {							\
164 	__asm __volatile(MPLOCKED OP			\
165 	: "+m" (*p)					\
166 	: CONS (V)					\
167 	: "memory", "cc");				\
168 }							\
169 struct __hack
170 
171 /*
172  * Atomic compare and set, used by the mutex functions.
173  *
174  * cmpset:
175  *	if (*dst == expect)
176  *		*dst = src
177  *
178  * fcmpset:
179  *	if (*dst == *expect)
180  *		*dst = src
181  *	else
182  *		*expect = *dst
183  *
184  * Returns 0 on failure, non-zero on success.
185  */
186 #define	ATOMIC_CMPSET(TYPE, CONS)			\
187 static __inline int					\
188 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
189 {							\
190 	u_char res;					\
191 							\
192 	__asm __volatile(				\
193 	"	" MPLOCKED "		"		\
194 	"	cmpxchg	%3,%1 ;		"		\
195 	"	sete	%0 ;		"		\
196 	"# atomic_cmpset_" #TYPE "	"		\
197 	: "=q" (res),			/* 0 */		\
198 	  "+m" (*dst),			/* 1 */		\
199 	  "+a" (expect)			/* 2 */		\
200 	: CONS (src)			/* 3 */		\
201 	: "memory", "cc");				\
202 	return (res);					\
203 }							\
204 							\
205 static __inline int					\
206 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
207 {							\
208 	u_char res;					\
209 							\
210 	__asm __volatile(				\
211 	"	" MPLOCKED "		"		\
212 	"	cmpxchg	%3,%1 ;		"		\
213 	"	sete	%0 ;		"		\
214 	"# atomic_fcmpset_" #TYPE "	"		\
215 	: "=q" (res),			/* 0 */		\
216 	  "+m" (*dst),			/* 1 */		\
217 	  "+a" (*expect)		/* 2 */		\
218 	: CONS (src)			/* 3 */		\
219 	: "memory", "cc");				\
220 	return (res);					\
221 }
222 
223 ATOMIC_CMPSET(char, "q");
224 ATOMIC_CMPSET(short, "r");
225 ATOMIC_CMPSET(int, "r");
226 
227 /*
228  * Atomically add the value of v to the integer pointed to by p and return
229  * the previous value of *p.
230  */
231 static __inline u_int
232 atomic_fetchadd_int(volatile u_int *p, u_int v)
233 {
234 
235 	__asm __volatile(
236 	"	" MPLOCKED "		"
237 	"	xaddl	%0,%1 ;		"
238 	"# atomic_fetchadd_int"
239 	: "+r" (v),			/* 0 */
240 	  "+m" (*p)			/* 1 */
241 	: : "cc");
242 	return (v);
243 }
244 
245 static __inline int
246 atomic_testandset_int(volatile u_int *p, u_int v)
247 {
248 	u_char res;
249 
250 	__asm __volatile(
251 	"	" MPLOCKED "		"
252 	"	btsl	%2,%1 ;		"
253 	"	setc	%0 ;		"
254 	"# atomic_testandset_int"
255 	: "=q" (res),			/* 0 */
256 	  "+m" (*p)			/* 1 */
257 	: "Ir" (v & 0x1f)		/* 2 */
258 	: "cc");
259 	return (res);
260 }
261 
262 static __inline int
263 atomic_testandclear_int(volatile u_int *p, u_int v)
264 {
265 	u_char res;
266 
267 	__asm __volatile(
268 	"	" MPLOCKED "		"
269 	"	btrl	%2,%1 ;		"
270 	"	setc	%0 ;		"
271 	"# atomic_testandclear_int"
272 	: "=q" (res),			/* 0 */
273 	  "+m" (*p)			/* 1 */
274 	: "Ir" (v & 0x1f)		/* 2 */
275 	: "cc");
276 	return (res);
277 }
278 
279 /*
280  * We assume that a = b will do atomic loads and stores.  Due to the
281  * IA32 memory model, a simple store guarantees release semantics.
282  *
283  * However, a load may pass a store if they are performed on distinct
284  * addresses, so we need Store/Load barrier for sequentially
285  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
286  * Store/Load barrier, as recommended by the AMD Software Optimization
287  * Guide, and not mfence.  In the kernel, we use a private per-cpu
288  * cache line for "mem", to avoid introducing false data
289  * dependencies.  In user space, we use the word at the top of the
290  * stack.
291  *
292  * For UP kernels, however, the memory of the single processor is
293  * always consistent, so we only need to stop the compiler from
294  * reordering accesses in a way that violates the semantics of acquire
295  * and release.
296  */
297 
298 #if defined(_KERNEL)
299 #if defined(SMP)
300 #define	__storeload_barrier()	__mbk()
301 #else /* _KERNEL && UP */
302 #define	__storeload_barrier()	__compiler_membar()
303 #endif /* SMP */
304 #else /* !_KERNEL */
305 #define	__storeload_barrier()	__mbu()
306 #endif /* _KERNEL*/
307 
308 #define	ATOMIC_LOAD(TYPE)					\
309 static __inline u_##TYPE					\
310 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
311 {								\
312 	u_##TYPE res;						\
313 								\
314 	res = *p;						\
315 	__compiler_membar();					\
316 	return (res);						\
317 }								\
318 struct __hack
319 
320 #define	ATOMIC_STORE(TYPE)					\
321 static __inline void						\
322 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
323 {								\
324 								\
325 	__compiler_membar();					\
326 	*p = v;							\
327 }								\
328 struct __hack
329 
330 static __inline void
331 atomic_thread_fence_acq(void)
332 {
333 
334 	__compiler_membar();
335 }
336 
337 static __inline void
338 atomic_thread_fence_rel(void)
339 {
340 
341 	__compiler_membar();
342 }
343 
344 static __inline void
345 atomic_thread_fence_acq_rel(void)
346 {
347 
348 	__compiler_membar();
349 }
350 
351 static __inline void
352 atomic_thread_fence_seq_cst(void)
353 {
354 
355 	__storeload_barrier();
356 }
357 
358 #ifdef _KERNEL
359 
360 #ifdef WANT_FUNCTIONS
361 int		atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
362 int		atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
363 uint64_t	atomic_load_acq_64_i386(volatile uint64_t *);
364 uint64_t	atomic_load_acq_64_i586(volatile uint64_t *);
365 void		atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
366 void		atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
367 uint64_t	atomic_swap_64_i386(volatile uint64_t *, uint64_t);
368 uint64_t	atomic_swap_64_i586(volatile uint64_t *, uint64_t);
369 #endif
370 
371 /* I486 does not support SMP or CMPXCHG8B. */
372 static __inline int
373 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
374 {
375 	volatile uint32_t *p;
376 	u_char res;
377 
378 	p = (volatile uint32_t *)dst;
379 	__asm __volatile(
380 	"	pushfl ;		"
381 	"	cli ;			"
382 	"	xorl	%1,%%eax ;	"
383 	"	xorl	%2,%%edx ;	"
384 	"	orl	%%edx,%%eax ;	"
385 	"	jne	1f ;		"
386 	"	movl	%4,%1 ;		"
387 	"	movl	%5,%2 ;		"
388 	"1:				"
389 	"	sete	%3 ;		"
390 	"	popfl"
391 	: "+A" (expect),		/* 0 */
392 	  "+m" (*p),			/* 1 */
393 	  "+m" (*(p + 1)),		/* 2 */
394 	  "=q" (res)			/* 3 */
395 	: "r" ((uint32_t)src),		/* 4 */
396 	  "r" ((uint32_t)(src >> 32))	/* 5 */
397 	: "memory", "cc");
398 	return (res);
399 }
400 
401 static __inline uint64_t
402 atomic_load_acq_64_i386(volatile uint64_t *p)
403 {
404 	volatile uint32_t *q;
405 	uint64_t res;
406 
407 	q = (volatile uint32_t *)p;
408 	__asm __volatile(
409 	"	pushfl ;		"
410 	"	cli ;			"
411 	"	movl	%1,%%eax ;	"
412 	"	movl	%2,%%edx ;	"
413 	"	popfl"
414 	: "=&A" (res)			/* 0 */
415 	: "m" (*q),			/* 1 */
416 	  "m" (*(q + 1))		/* 2 */
417 	: "memory");
418 	return (res);
419 }
420 
421 static __inline void
422 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
423 {
424 	volatile uint32_t *q;
425 
426 	q = (volatile uint32_t *)p;
427 	__asm __volatile(
428 	"	pushfl ;		"
429 	"	cli ;			"
430 	"	movl	%%eax,%0 ;	"
431 	"	movl	%%edx,%1 ;	"
432 	"	popfl"
433 	: "=m" (*q),			/* 0 */
434 	  "=m" (*(q + 1))		/* 1 */
435 	: "A" (v)			/* 2 */
436 	: "memory");
437 }
438 
439 static __inline uint64_t
440 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
441 {
442 	volatile uint32_t *q;
443 	uint64_t res;
444 
445 	q = (volatile uint32_t *)p;
446 	__asm __volatile(
447 	"	pushfl ;		"
448 	"	cli ;			"
449 	"	movl	%1,%%eax ;	"
450 	"	movl	%2,%%edx ;	"
451 	"	movl	%4,%2 ;		"
452 	"	movl	%3,%1 ;		"
453 	"	popfl"
454 	: "=&A" (res),			/* 0 */
455 	  "+m" (*q),			/* 1 */
456 	  "+m" (*(q + 1))		/* 2 */
457 	: "r" ((uint32_t)v),		/* 3 */
458 	  "r" ((uint32_t)(v >> 32)));	/* 4 */
459 	return (res);
460 }
461 
462 static __inline int
463 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
464 {
465 	u_char res;
466 
467 	__asm __volatile(
468 	"	" MPLOCKED "		"
469 	"	cmpxchg8b %1 ;		"
470 	"	sete	%0"
471 	: "=q" (res),			/* 0 */
472 	  "+m" (*dst),			/* 1 */
473 	  "+A" (expect)			/* 2 */
474 	: "b" ((uint32_t)src),		/* 3 */
475 	  "c" ((uint32_t)(src >> 32))	/* 4 */
476 	: "memory", "cc");
477 	return (res);
478 }
479 
480 static __inline uint64_t
481 atomic_load_acq_64_i586(volatile uint64_t *p)
482 {
483 	uint64_t res;
484 
485 	__asm __volatile(
486 	"	movl	%%ebx,%%eax ;	"
487 	"	movl	%%ecx,%%edx ;	"
488 	"	" MPLOCKED "		"
489 	"	cmpxchg8b %1"
490 	: "=&A" (res),			/* 0 */
491 	  "+m" (*p)			/* 1 */
492 	: : "memory", "cc");
493 	return (res);
494 }
495 
496 static __inline void
497 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
498 {
499 
500 	__asm __volatile(
501 	"	movl	%%eax,%%ebx ;	"
502 	"	movl	%%edx,%%ecx ;	"
503 	"1:				"
504 	"	" MPLOCKED "		"
505 	"	cmpxchg8b %0 ;		"
506 	"	jne	1b"
507 	: "+m" (*p),			/* 0 */
508 	  "+A" (v)			/* 1 */
509 	: : "ebx", "ecx", "memory", "cc");
510 }
511 
512 static __inline uint64_t
513 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
514 {
515 
516 	__asm __volatile(
517 	"	movl	%%eax,%%ebx ;	"
518 	"	movl	%%edx,%%ecx ;	"
519 	"1:				"
520 	"	" MPLOCKED "		"
521 	"	cmpxchg8b %0 ;		"
522 	"	jne	1b"
523 	: "+m" (*p),			/* 0 */
524 	  "+A" (v)			/* 1 */
525 	: : "ebx", "ecx", "memory", "cc");
526 	return (v);
527 }
528 
529 static __inline int
530 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
531 {
532 
533 	if ((cpu_feature & CPUID_CX8) == 0)
534 		return (atomic_cmpset_64_i386(dst, expect, src));
535 	else
536 		return (atomic_cmpset_64_i586(dst, expect, src));
537 }
538 
539 static __inline uint64_t
540 atomic_load_acq_64(volatile uint64_t *p)
541 {
542 
543 	if ((cpu_feature & CPUID_CX8) == 0)
544 		return (atomic_load_acq_64_i386(p));
545 	else
546 		return (atomic_load_acq_64_i586(p));
547 }
548 
549 static __inline void
550 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
551 {
552 
553 	if ((cpu_feature & CPUID_CX8) == 0)
554 		atomic_store_rel_64_i386(p, v);
555 	else
556 		atomic_store_rel_64_i586(p, v);
557 }
558 
559 static __inline uint64_t
560 atomic_swap_64(volatile uint64_t *p, uint64_t v)
561 {
562 
563 	if ((cpu_feature & CPUID_CX8) == 0)
564 		return (atomic_swap_64_i386(p, v));
565 	else
566 		return (atomic_swap_64_i586(p, v));
567 }
568 
569 static __inline uint64_t
570 atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
571 {
572 
573 	for (;;) {
574 		uint64_t t = *p;
575 		if (atomic_cmpset_64(p, t, t + v))
576 			return (t);
577 	}
578 }
579 
580 #endif /* _KERNEL */
581 
582 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
583 
584 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
585 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
586 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
587 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
588 
589 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
590 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
591 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
592 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
593 
594 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
595 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
596 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
597 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
598 
599 ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
600 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
601 ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
602 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
603 
604 #define	ATOMIC_LOADSTORE(TYPE)				\
605 	ATOMIC_LOAD(TYPE);				\
606 	ATOMIC_STORE(TYPE)
607 
608 ATOMIC_LOADSTORE(char);
609 ATOMIC_LOADSTORE(short);
610 ATOMIC_LOADSTORE(int);
611 ATOMIC_LOADSTORE(long);
612 
613 #undef ATOMIC_ASM
614 #undef ATOMIC_LOAD
615 #undef ATOMIC_STORE
616 #undef ATOMIC_LOADSTORE
617 
618 #ifndef WANT_FUNCTIONS
619 
620 static __inline int
621 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
622 {
623 
624 	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
625 	    (u_int)src));
626 }
627 
628 static __inline u_long
629 atomic_fetchadd_long(volatile u_long *p, u_long v)
630 {
631 
632 	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
633 }
634 
635 static __inline int
636 atomic_testandset_long(volatile u_long *p, u_int v)
637 {
638 
639 	return (atomic_testandset_int((volatile u_int *)p, v));
640 }
641 
642 static __inline int
643 atomic_testandclear_long(volatile u_long *p, u_int v)
644 {
645 
646 	return (atomic_testandclear_int((volatile u_int *)p, v));
647 }
648 
649 /* Read the current value and store a new value in the destination. */
650 #ifdef __GNUCLIKE_ASM
651 
652 static __inline u_int
653 atomic_swap_int(volatile u_int *p, u_int v)
654 {
655 
656 	__asm __volatile(
657 	"	xchgl	%1,%0 ;		"
658 	"# atomic_swap_int"
659 	: "+r" (v),			/* 0 */
660 	  "+m" (*p));			/* 1 */
661 	return (v);
662 }
663 
664 static __inline u_long
665 atomic_swap_long(volatile u_long *p, u_long v)
666 {
667 
668 	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
669 }
670 
671 #else /* !__GNUCLIKE_ASM */
672 
673 u_int	atomic_swap_int(volatile u_int *p, u_int v);
674 u_long	atomic_swap_long(volatile u_long *p, u_long v);
675 
676 #endif /* __GNUCLIKE_ASM */
677 
678 #define	atomic_set_acq_char		atomic_set_barr_char
679 #define	atomic_set_rel_char		atomic_set_barr_char
680 #define	atomic_clear_acq_char		atomic_clear_barr_char
681 #define	atomic_clear_rel_char		atomic_clear_barr_char
682 #define	atomic_add_acq_char		atomic_add_barr_char
683 #define	atomic_add_rel_char		atomic_add_barr_char
684 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
685 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
686 #define	atomic_cmpset_acq_char		atomic_cmpset_char
687 #define	atomic_cmpset_rel_char		atomic_cmpset_char
688 #define	atomic_fcmpset_acq_char		atomic_fcmpset_char
689 #define	atomic_fcmpset_rel_char		atomic_fcmpset_char
690 
691 #define	atomic_set_acq_short		atomic_set_barr_short
692 #define	atomic_set_rel_short		atomic_set_barr_short
693 #define	atomic_clear_acq_short		atomic_clear_barr_short
694 #define	atomic_clear_rel_short		atomic_clear_barr_short
695 #define	atomic_add_acq_short		atomic_add_barr_short
696 #define	atomic_add_rel_short		atomic_add_barr_short
697 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
698 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
699 #define	atomic_cmpset_acq_short		atomic_cmpset_short
700 #define	atomic_cmpset_rel_short		atomic_cmpset_short
701 #define	atomic_fcmpset_acq_short	atomic_fcmpset_short
702 #define	atomic_fcmpset_rel_short	atomic_fcmpset_short
703 
704 #define	atomic_set_acq_int		atomic_set_barr_int
705 #define	atomic_set_rel_int		atomic_set_barr_int
706 #define	atomic_clear_acq_int		atomic_clear_barr_int
707 #define	atomic_clear_rel_int		atomic_clear_barr_int
708 #define	atomic_add_acq_int		atomic_add_barr_int
709 #define	atomic_add_rel_int		atomic_add_barr_int
710 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
711 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
712 #define	atomic_cmpset_acq_int		atomic_cmpset_int
713 #define	atomic_cmpset_rel_int		atomic_cmpset_int
714 #define	atomic_fcmpset_acq_int		atomic_fcmpset_int
715 #define	atomic_fcmpset_rel_int		atomic_fcmpset_int
716 
717 #define	atomic_set_acq_long		atomic_set_barr_long
718 #define	atomic_set_rel_long		atomic_set_barr_long
719 #define	atomic_clear_acq_long		atomic_clear_barr_long
720 #define	atomic_clear_rel_long		atomic_clear_barr_long
721 #define	atomic_add_acq_long		atomic_add_barr_long
722 #define	atomic_add_rel_long		atomic_add_barr_long
723 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
724 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
725 #define	atomic_cmpset_acq_long		atomic_cmpset_long
726 #define	atomic_cmpset_rel_long		atomic_cmpset_long
727 #define	atomic_fcmpset_acq_long		atomic_fcmpset_long
728 #define	atomic_fcmpset_rel_long		atomic_fcmpset_long
729 
730 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
731 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
732 
733 /* Operations on 8-bit bytes. */
734 #define	atomic_set_8		atomic_set_char
735 #define	atomic_set_acq_8	atomic_set_acq_char
736 #define	atomic_set_rel_8	atomic_set_rel_char
737 #define	atomic_clear_8		atomic_clear_char
738 #define	atomic_clear_acq_8	atomic_clear_acq_char
739 #define	atomic_clear_rel_8	atomic_clear_rel_char
740 #define	atomic_add_8		atomic_add_char
741 #define	atomic_add_acq_8	atomic_add_acq_char
742 #define	atomic_add_rel_8	atomic_add_rel_char
743 #define	atomic_subtract_8	atomic_subtract_char
744 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
745 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
746 #define	atomic_load_acq_8	atomic_load_acq_char
747 #define	atomic_store_rel_8	atomic_store_rel_char
748 #define	atomic_cmpset_8		atomic_cmpset_char
749 #define	atomic_cmpset_acq_8	atomic_cmpset_acq_char
750 #define	atomic_cmpset_rel_8	atomic_cmpset_rel_char
751 #define	atomic_fcmpset_8	atomic_fcmpset_char
752 #define	atomic_fcmpset_acq_8	atomic_fcmpset_acq_char
753 #define	atomic_fcmpset_rel_8	atomic_fcmpset_rel_char
754 
755 /* Operations on 16-bit words. */
756 #define	atomic_set_16		atomic_set_short
757 #define	atomic_set_acq_16	atomic_set_acq_short
758 #define	atomic_set_rel_16	atomic_set_rel_short
759 #define	atomic_clear_16		atomic_clear_short
760 #define	atomic_clear_acq_16	atomic_clear_acq_short
761 #define	atomic_clear_rel_16	atomic_clear_rel_short
762 #define	atomic_add_16		atomic_add_short
763 #define	atomic_add_acq_16	atomic_add_acq_short
764 #define	atomic_add_rel_16	atomic_add_rel_short
765 #define	atomic_subtract_16	atomic_subtract_short
766 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
767 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
768 #define	atomic_load_acq_16	atomic_load_acq_short
769 #define	atomic_store_rel_16	atomic_store_rel_short
770 #define	atomic_cmpset_16	atomic_cmpset_short
771 #define	atomic_cmpset_acq_16	atomic_cmpset_acq_short
772 #define	atomic_cmpset_rel_16	atomic_cmpset_rel_short
773 #define	atomic_fcmpset_16	atomic_fcmpset_short
774 #define	atomic_fcmpset_acq_16	atomic_fcmpset_acq_short
775 #define	atomic_fcmpset_rel_16	atomic_fcmpset_rel_short
776 
777 /* Operations on 32-bit double words. */
778 #define	atomic_set_32		atomic_set_int
779 #define	atomic_set_acq_32	atomic_set_acq_int
780 #define	atomic_set_rel_32	atomic_set_rel_int
781 #define	atomic_clear_32		atomic_clear_int
782 #define	atomic_clear_acq_32	atomic_clear_acq_int
783 #define	atomic_clear_rel_32	atomic_clear_rel_int
784 #define	atomic_add_32		atomic_add_int
785 #define	atomic_add_acq_32	atomic_add_acq_int
786 #define	atomic_add_rel_32	atomic_add_rel_int
787 #define	atomic_subtract_32	atomic_subtract_int
788 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
789 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
790 #define	atomic_load_acq_32	atomic_load_acq_int
791 #define	atomic_store_rel_32	atomic_store_rel_int
792 #define	atomic_cmpset_32	atomic_cmpset_int
793 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
794 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
795 #define	atomic_fcmpset_32	atomic_fcmpset_int
796 #define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
797 #define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
798 #define	atomic_swap_32		atomic_swap_int
799 #define	atomic_readandclear_32	atomic_readandclear_int
800 #define	atomic_fetchadd_32	atomic_fetchadd_int
801 #define	atomic_testandset_32	atomic_testandset_int
802 #define	atomic_testandclear_32	atomic_testandclear_int
803 
804 /* Operations on pointers. */
805 #define	atomic_set_ptr(p, v) \
806 	atomic_set_int((volatile u_int *)(p), (u_int)(v))
807 #define	atomic_set_acq_ptr(p, v) \
808 	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
809 #define	atomic_set_rel_ptr(p, v) \
810 	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
811 #define	atomic_clear_ptr(p, v) \
812 	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
813 #define	atomic_clear_acq_ptr(p, v) \
814 	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
815 #define	atomic_clear_rel_ptr(p, v) \
816 	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
817 #define	atomic_add_ptr(p, v) \
818 	atomic_add_int((volatile u_int *)(p), (u_int)(v))
819 #define	atomic_add_acq_ptr(p, v) \
820 	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
821 #define	atomic_add_rel_ptr(p, v) \
822 	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
823 #define	atomic_subtract_ptr(p, v) \
824 	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
825 #define	atomic_subtract_acq_ptr(p, v) \
826 	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
827 #define	atomic_subtract_rel_ptr(p, v) \
828 	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
829 #define	atomic_load_acq_ptr(p) \
830 	atomic_load_acq_int((volatile u_int *)(p))
831 #define	atomic_store_rel_ptr(p, v) \
832 	atomic_store_rel_int((volatile u_int *)(p), (v))
833 #define	atomic_cmpset_ptr(dst, old, new) \
834 	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
835 #define	atomic_cmpset_acq_ptr(dst, old, new) \
836 	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
837 	    (u_int)(new))
838 #define	atomic_cmpset_rel_ptr(dst, old, new) \
839 	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
840 	    (u_int)(new))
841 #define	atomic_fcmpset_ptr(dst, old, new) \
842 	atomic_fcmpset_int((volatile u_int *)(dst), (u_int *)(old), (u_int)(new))
843 #define	atomic_fcmpset_acq_ptr(dst, old, new) \
844 	atomic_fcmpset_acq_int((volatile u_int *)(dst), (u_int *)(old), \
845 	    (u_int)(new))
846 #define	atomic_fcmpset_rel_ptr(dst, old, new) \
847 	atomic_fcmpset_rel_int((volatile u_int *)(dst), (u_int *)(old), \
848 	    (u_int)(new))
849 #define	atomic_swap_ptr(p, v) \
850 	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
851 #define	atomic_readandclear_ptr(p) \
852 	atomic_readandclear_int((volatile u_int *)(p))
853 
854 #endif /* !WANT_FUNCTIONS */
855 
856 #if defined(_KERNEL)
857 #define	mb()	__mbk()
858 #define	wmb()	__mbk()
859 #define	rmb()	__mbk()
860 #else
861 #define	mb()	__mbu()
862 #define	wmb()	__mbu()
863 #define	rmb()	__mbu()
864 #endif
865 
866 #endif /* !_MACHINE_ATOMIC_H_ */
867