xref: /freebsd/sys/i386/include/atomic.h (revision f0cfa1b168014f56c02b83e5f28412cc5f78d117)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1998 Doug Rabson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 #ifndef _MACHINE_ATOMIC_H_
31 #define	_MACHINE_ATOMIC_H_
32 
33 #ifndef _SYS_CDEFS_H_
34 #error this file needs sys/cdefs.h as a prerequisite
35 #endif
36 
37 #ifdef _KERNEL
38 #include <machine/md_var.h>
39 #include <machine/specialreg.h>
40 #endif
41 
42 #ifndef __OFFSETOF_MONITORBUF
43 /*
44  * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
45  *
46  * The open-coded number is used instead of the symbolic expression to
47  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
48  * An assertion in i386/vm_machdep.c ensures that the value is correct.
49  */
50 #define	__OFFSETOF_MONITORBUF	0x80
51 
52 static __inline void
53 __mbk(void)
54 {
55 
56 	__asm __volatile("lock; addl $0,%%fs:%0"
57 	    : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
58 }
59 
60 static __inline void
61 __mbu(void)
62 {
63 
64 	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
65 }
66 #endif
67 
68 /*
69  * Various simple operations on memory, each of which is atomic in the
70  * presence of interrupts and multiple processors.
71  *
72  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
73  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
74  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
75  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
76  *
77  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
78  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
79  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
80  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
81  *
82  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
83  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
84  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
85  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
86  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
87  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
88  *
89  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
90  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
91  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
92  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
93  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
94  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
95  */
96 
97 /*
98  * The above functions are expanded inline in the statically-linked
99  * kernel.  Lock prefixes are generated if an SMP kernel is being
100  * built.
101  *
102  * Kernel modules call real functions which are built into the kernel.
103  * This allows kernel modules to be portable between UP and SMP systems.
104  */
105 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
106 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
107 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
108 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
109 
110 int	atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
111 int	atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
112 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
113 int	atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
114 int	atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
115 	    u_short src);
116 int	atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
117 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
118 int	atomic_testandset_int(volatile u_int *p, u_int v);
119 int	atomic_testandclear_int(volatile u_int *p, u_int v);
120 void	atomic_thread_fence_acq(void);
121 void	atomic_thread_fence_acq_rel(void);
122 void	atomic_thread_fence_rel(void);
123 void	atomic_thread_fence_seq_cst(void);
124 
125 #define	ATOMIC_LOAD(TYPE)					\
126 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
127 #define	ATOMIC_STORE(TYPE)					\
128 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
129 
130 int		atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
131 uint64_t	atomic_load_acq_64(volatile uint64_t *);
132 void		atomic_store_rel_64(volatile uint64_t *, uint64_t);
133 uint64_t	atomic_swap_64(volatile uint64_t *, uint64_t);
134 uint64_t	atomic_fetchadd_64(volatile uint64_t *, uint64_t);
135 
136 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
137 
138 /*
139  * For userland, always use lock prefixes so that the binaries will run
140  * on both SMP and !SMP systems.
141  */
142 #if defined(SMP) || !defined(_KERNEL)
143 #define	MPLOCKED	"lock ; "
144 #else
145 #define	MPLOCKED
146 #endif
147 
148 /*
149  * The assembly is volatilized to avoid code chunk removal by the compiler.
150  * GCC aggressively reorders operations and memory clobbering is necessary
151  * in order to avoid that for memory barriers.
152  */
153 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
154 static __inline void					\
155 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
156 {							\
157 	__asm __volatile(MPLOCKED OP			\
158 	: "+m" (*p)					\
159 	: CONS (V)					\
160 	: "cc");					\
161 }							\
162 							\
163 static __inline void					\
164 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
165 {							\
166 	__asm __volatile(MPLOCKED OP			\
167 	: "+m" (*p)					\
168 	: CONS (V)					\
169 	: "memory", "cc");				\
170 }							\
171 struct __hack
172 
173 /*
174  * Atomic compare and set, used by the mutex functions.
175  *
176  * cmpset:
177  *	if (*dst == expect)
178  *		*dst = src
179  *
180  * fcmpset:
181  *	if (*dst == *expect)
182  *		*dst = src
183  *	else
184  *		*expect = *dst
185  *
186  * Returns 0 on failure, non-zero on success.
187  */
188 #define	ATOMIC_CMPSET(TYPE, CONS)			\
189 static __inline int					\
190 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
191 {							\
192 	u_char res;					\
193 							\
194 	__asm __volatile(				\
195 	"	" MPLOCKED "		"		\
196 	"	cmpxchg	%3,%1 ;		"		\
197 	"	sete	%0 ;		"		\
198 	"# atomic_cmpset_" #TYPE "	"		\
199 	: "=q" (res),			/* 0 */		\
200 	  "+m" (*dst),			/* 1 */		\
201 	  "+a" (expect)			/* 2 */		\
202 	: CONS (src)			/* 3 */		\
203 	: "memory", "cc");				\
204 	return (res);					\
205 }							\
206 							\
207 static __inline int					\
208 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
209 {							\
210 	u_char res;					\
211 							\
212 	__asm __volatile(				\
213 	"	" MPLOCKED "		"		\
214 	"	cmpxchg	%3,%1 ;		"		\
215 	"	sete	%0 ;		"		\
216 	"# atomic_fcmpset_" #TYPE "	"		\
217 	: "=q" (res),			/* 0 */		\
218 	  "+m" (*dst),			/* 1 */		\
219 	  "+a" (*expect)		/* 2 */		\
220 	: CONS (src)			/* 3 */		\
221 	: "memory", "cc");				\
222 	return (res);					\
223 }
224 
225 ATOMIC_CMPSET(char, "q");
226 ATOMIC_CMPSET(short, "r");
227 ATOMIC_CMPSET(int, "r");
228 
229 /*
230  * Atomically add the value of v to the integer pointed to by p and return
231  * the previous value of *p.
232  */
233 static __inline u_int
234 atomic_fetchadd_int(volatile u_int *p, u_int v)
235 {
236 
237 	__asm __volatile(
238 	"	" MPLOCKED "		"
239 	"	xaddl	%0,%1 ;		"
240 	"# atomic_fetchadd_int"
241 	: "+r" (v),			/* 0 */
242 	  "+m" (*p)			/* 1 */
243 	: : "cc");
244 	return (v);
245 }
246 
247 static __inline int
248 atomic_testandset_int(volatile u_int *p, u_int v)
249 {
250 	u_char res;
251 
252 	__asm __volatile(
253 	"	" MPLOCKED "		"
254 	"	btsl	%2,%1 ;		"
255 	"	setc	%0 ;		"
256 	"# atomic_testandset_int"
257 	: "=q" (res),			/* 0 */
258 	  "+m" (*p)			/* 1 */
259 	: "Ir" (v & 0x1f)		/* 2 */
260 	: "cc");
261 	return (res);
262 }
263 
264 static __inline int
265 atomic_testandclear_int(volatile u_int *p, u_int v)
266 {
267 	u_char res;
268 
269 	__asm __volatile(
270 	"	" MPLOCKED "		"
271 	"	btrl	%2,%1 ;		"
272 	"	setc	%0 ;		"
273 	"# atomic_testandclear_int"
274 	: "=q" (res),			/* 0 */
275 	  "+m" (*p)			/* 1 */
276 	: "Ir" (v & 0x1f)		/* 2 */
277 	: "cc");
278 	return (res);
279 }
280 
281 /*
282  * We assume that a = b will do atomic loads and stores.  Due to the
283  * IA32 memory model, a simple store guarantees release semantics.
284  *
285  * However, a load may pass a store if they are performed on distinct
286  * addresses, so we need Store/Load barrier for sequentially
287  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
288  * Store/Load barrier, as recommended by the AMD Software Optimization
289  * Guide, and not mfence.  In the kernel, we use a private per-cpu
290  * cache line for "mem", to avoid introducing false data
291  * dependencies.  In user space, we use the word at the top of the
292  * stack.
293  *
294  * For UP kernels, however, the memory of the single processor is
295  * always consistent, so we only need to stop the compiler from
296  * reordering accesses in a way that violates the semantics of acquire
297  * and release.
298  */
299 
300 #if defined(_KERNEL)
301 #if defined(SMP)
302 #define	__storeload_barrier()	__mbk()
303 #else /* _KERNEL && UP */
304 #define	__storeload_barrier()	__compiler_membar()
305 #endif /* SMP */
306 #else /* !_KERNEL */
307 #define	__storeload_barrier()	__mbu()
308 #endif /* _KERNEL*/
309 
310 #define	ATOMIC_LOAD(TYPE)					\
311 static __inline u_##TYPE					\
312 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
313 {								\
314 	u_##TYPE res;						\
315 								\
316 	res = *p;						\
317 	__compiler_membar();					\
318 	return (res);						\
319 }								\
320 struct __hack
321 
322 #define	ATOMIC_STORE(TYPE)					\
323 static __inline void						\
324 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
325 {								\
326 								\
327 	__compiler_membar();					\
328 	*p = v;							\
329 }								\
330 struct __hack
331 
332 static __inline void
333 atomic_thread_fence_acq(void)
334 {
335 
336 	__compiler_membar();
337 }
338 
339 static __inline void
340 atomic_thread_fence_rel(void)
341 {
342 
343 	__compiler_membar();
344 }
345 
346 static __inline void
347 atomic_thread_fence_acq_rel(void)
348 {
349 
350 	__compiler_membar();
351 }
352 
353 static __inline void
354 atomic_thread_fence_seq_cst(void)
355 {
356 
357 	__storeload_barrier();
358 }
359 
360 #ifdef _KERNEL
361 
362 #ifdef WANT_FUNCTIONS
363 int		atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
364 int		atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
365 uint64_t	atomic_load_acq_64_i386(volatile uint64_t *);
366 uint64_t	atomic_load_acq_64_i586(volatile uint64_t *);
367 void		atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
368 void		atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
369 uint64_t	atomic_swap_64_i386(volatile uint64_t *, uint64_t);
370 uint64_t	atomic_swap_64_i586(volatile uint64_t *, uint64_t);
371 #endif
372 
373 /* I486 does not support SMP or CMPXCHG8B. */
374 static __inline int
375 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
376 {
377 	volatile uint32_t *p;
378 	u_char res;
379 
380 	p = (volatile uint32_t *)dst;
381 	__asm __volatile(
382 	"	pushfl ;		"
383 	"	cli ;			"
384 	"	xorl	%1,%%eax ;	"
385 	"	xorl	%2,%%edx ;	"
386 	"	orl	%%edx,%%eax ;	"
387 	"	jne	1f ;		"
388 	"	movl	%4,%1 ;		"
389 	"	movl	%5,%2 ;		"
390 	"1:				"
391 	"	sete	%3 ;		"
392 	"	popfl"
393 	: "+A" (expect),		/* 0 */
394 	  "+m" (*p),			/* 1 */
395 	  "+m" (*(p + 1)),		/* 2 */
396 	  "=q" (res)			/* 3 */
397 	: "r" ((uint32_t)src),		/* 4 */
398 	  "r" ((uint32_t)(src >> 32))	/* 5 */
399 	: "memory", "cc");
400 	return (res);
401 }
402 
403 static __inline uint64_t
404 atomic_load_acq_64_i386(volatile uint64_t *p)
405 {
406 	volatile uint32_t *q;
407 	uint64_t res;
408 
409 	q = (volatile uint32_t *)p;
410 	__asm __volatile(
411 	"	pushfl ;		"
412 	"	cli ;			"
413 	"	movl	%1,%%eax ;	"
414 	"	movl	%2,%%edx ;	"
415 	"	popfl"
416 	: "=&A" (res)			/* 0 */
417 	: "m" (*q),			/* 1 */
418 	  "m" (*(q + 1))		/* 2 */
419 	: "memory");
420 	return (res);
421 }
422 
423 static __inline void
424 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
425 {
426 	volatile uint32_t *q;
427 
428 	q = (volatile uint32_t *)p;
429 	__asm __volatile(
430 	"	pushfl ;		"
431 	"	cli ;			"
432 	"	movl	%%eax,%0 ;	"
433 	"	movl	%%edx,%1 ;	"
434 	"	popfl"
435 	: "=m" (*q),			/* 0 */
436 	  "=m" (*(q + 1))		/* 1 */
437 	: "A" (v)			/* 2 */
438 	: "memory");
439 }
440 
441 static __inline uint64_t
442 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
443 {
444 	volatile uint32_t *q;
445 	uint64_t res;
446 
447 	q = (volatile uint32_t *)p;
448 	__asm __volatile(
449 	"	pushfl ;		"
450 	"	cli ;			"
451 	"	movl	%1,%%eax ;	"
452 	"	movl	%2,%%edx ;	"
453 	"	movl	%4,%2 ;		"
454 	"	movl	%3,%1 ;		"
455 	"	popfl"
456 	: "=&A" (res),			/* 0 */
457 	  "+m" (*q),			/* 1 */
458 	  "+m" (*(q + 1))		/* 2 */
459 	: "r" ((uint32_t)v),		/* 3 */
460 	  "r" ((uint32_t)(v >> 32)));	/* 4 */
461 	return (res);
462 }
463 
464 static __inline int
465 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
466 {
467 	u_char res;
468 
469 	__asm __volatile(
470 	"	" MPLOCKED "		"
471 	"	cmpxchg8b %1 ;		"
472 	"	sete	%0"
473 	: "=q" (res),			/* 0 */
474 	  "+m" (*dst),			/* 1 */
475 	  "+A" (expect)			/* 2 */
476 	: "b" ((uint32_t)src),		/* 3 */
477 	  "c" ((uint32_t)(src >> 32))	/* 4 */
478 	: "memory", "cc");
479 	return (res);
480 }
481 
482 static __inline uint64_t
483 atomic_load_acq_64_i586(volatile uint64_t *p)
484 {
485 	uint64_t res;
486 
487 	__asm __volatile(
488 	"	movl	%%ebx,%%eax ;	"
489 	"	movl	%%ecx,%%edx ;	"
490 	"	" MPLOCKED "		"
491 	"	cmpxchg8b %1"
492 	: "=&A" (res),			/* 0 */
493 	  "+m" (*p)			/* 1 */
494 	: : "memory", "cc");
495 	return (res);
496 }
497 
498 static __inline void
499 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
500 {
501 
502 	__asm __volatile(
503 	"	movl	%%eax,%%ebx ;	"
504 	"	movl	%%edx,%%ecx ;	"
505 	"1:				"
506 	"	" MPLOCKED "		"
507 	"	cmpxchg8b %0 ;		"
508 	"	jne	1b"
509 	: "+m" (*p),			/* 0 */
510 	  "+A" (v)			/* 1 */
511 	: : "ebx", "ecx", "memory", "cc");
512 }
513 
514 static __inline uint64_t
515 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
516 {
517 
518 	__asm __volatile(
519 	"	movl	%%eax,%%ebx ;	"
520 	"	movl	%%edx,%%ecx ;	"
521 	"1:				"
522 	"	" MPLOCKED "		"
523 	"	cmpxchg8b %0 ;		"
524 	"	jne	1b"
525 	: "+m" (*p),			/* 0 */
526 	  "+A" (v)			/* 1 */
527 	: : "ebx", "ecx", "memory", "cc");
528 	return (v);
529 }
530 
531 static __inline int
532 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
533 {
534 
535 	if ((cpu_feature & CPUID_CX8) == 0)
536 		return (atomic_cmpset_64_i386(dst, expect, src));
537 	else
538 		return (atomic_cmpset_64_i586(dst, expect, src));
539 }
540 
541 static __inline uint64_t
542 atomic_load_acq_64(volatile uint64_t *p)
543 {
544 
545 	if ((cpu_feature & CPUID_CX8) == 0)
546 		return (atomic_load_acq_64_i386(p));
547 	else
548 		return (atomic_load_acq_64_i586(p));
549 }
550 
551 static __inline void
552 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
553 {
554 
555 	if ((cpu_feature & CPUID_CX8) == 0)
556 		atomic_store_rel_64_i386(p, v);
557 	else
558 		atomic_store_rel_64_i586(p, v);
559 }
560 
561 static __inline uint64_t
562 atomic_swap_64(volatile uint64_t *p, uint64_t v)
563 {
564 
565 	if ((cpu_feature & CPUID_CX8) == 0)
566 		return (atomic_swap_64_i386(p, v));
567 	else
568 		return (atomic_swap_64_i586(p, v));
569 }
570 
571 static __inline uint64_t
572 atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
573 {
574 
575 	for (;;) {
576 		uint64_t t = *p;
577 		if (atomic_cmpset_64(p, t, t + v))
578 			return (t);
579 	}
580 }
581 
582 #endif /* _KERNEL */
583 
584 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
585 
586 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
587 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
588 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
589 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
590 
591 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
592 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
593 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
594 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
595 
596 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
597 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
598 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
599 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
600 
601 ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
602 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
603 ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
604 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
605 
606 #define	ATOMIC_LOADSTORE(TYPE)				\
607 	ATOMIC_LOAD(TYPE);				\
608 	ATOMIC_STORE(TYPE)
609 
610 ATOMIC_LOADSTORE(char);
611 ATOMIC_LOADSTORE(short);
612 ATOMIC_LOADSTORE(int);
613 ATOMIC_LOADSTORE(long);
614 
615 #undef ATOMIC_ASM
616 #undef ATOMIC_LOAD
617 #undef ATOMIC_STORE
618 #undef ATOMIC_LOADSTORE
619 
620 #ifndef WANT_FUNCTIONS
621 
622 static __inline int
623 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
624 {
625 
626 	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
627 	    (u_int)src));
628 }
629 
630 static __inline u_long
631 atomic_fetchadd_long(volatile u_long *p, u_long v)
632 {
633 
634 	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
635 }
636 
637 static __inline int
638 atomic_testandset_long(volatile u_long *p, u_int v)
639 {
640 
641 	return (atomic_testandset_int((volatile u_int *)p, v));
642 }
643 
644 static __inline int
645 atomic_testandclear_long(volatile u_long *p, u_int v)
646 {
647 
648 	return (atomic_testandclear_int((volatile u_int *)p, v));
649 }
650 
651 /* Read the current value and store a new value in the destination. */
652 #ifdef __GNUCLIKE_ASM
653 
654 static __inline u_int
655 atomic_swap_int(volatile u_int *p, u_int v)
656 {
657 
658 	__asm __volatile(
659 	"	xchgl	%1,%0 ;		"
660 	"# atomic_swap_int"
661 	: "+r" (v),			/* 0 */
662 	  "+m" (*p));			/* 1 */
663 	return (v);
664 }
665 
666 static __inline u_long
667 atomic_swap_long(volatile u_long *p, u_long v)
668 {
669 
670 	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
671 }
672 
673 #else /* !__GNUCLIKE_ASM */
674 
675 u_int	atomic_swap_int(volatile u_int *p, u_int v);
676 u_long	atomic_swap_long(volatile u_long *p, u_long v);
677 
678 #endif /* __GNUCLIKE_ASM */
679 
680 #define	atomic_set_acq_char		atomic_set_barr_char
681 #define	atomic_set_rel_char		atomic_set_barr_char
682 #define	atomic_clear_acq_char		atomic_clear_barr_char
683 #define	atomic_clear_rel_char		atomic_clear_barr_char
684 #define	atomic_add_acq_char		atomic_add_barr_char
685 #define	atomic_add_rel_char		atomic_add_barr_char
686 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
687 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
688 #define	atomic_cmpset_acq_char		atomic_cmpset_char
689 #define	atomic_cmpset_rel_char		atomic_cmpset_char
690 #define	atomic_fcmpset_acq_char		atomic_fcmpset_char
691 #define	atomic_fcmpset_rel_char		atomic_fcmpset_char
692 
693 #define	atomic_set_acq_short		atomic_set_barr_short
694 #define	atomic_set_rel_short		atomic_set_barr_short
695 #define	atomic_clear_acq_short		atomic_clear_barr_short
696 #define	atomic_clear_rel_short		atomic_clear_barr_short
697 #define	atomic_add_acq_short		atomic_add_barr_short
698 #define	atomic_add_rel_short		atomic_add_barr_short
699 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
700 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
701 #define	atomic_cmpset_acq_short		atomic_cmpset_short
702 #define	atomic_cmpset_rel_short		atomic_cmpset_short
703 #define	atomic_fcmpset_acq_short	atomic_fcmpset_short
704 #define	atomic_fcmpset_rel_short	atomic_fcmpset_short
705 
706 #define	atomic_set_acq_int		atomic_set_barr_int
707 #define	atomic_set_rel_int		atomic_set_barr_int
708 #define	atomic_clear_acq_int		atomic_clear_barr_int
709 #define	atomic_clear_rel_int		atomic_clear_barr_int
710 #define	atomic_add_acq_int		atomic_add_barr_int
711 #define	atomic_add_rel_int		atomic_add_barr_int
712 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
713 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
714 #define	atomic_cmpset_acq_int		atomic_cmpset_int
715 #define	atomic_cmpset_rel_int		atomic_cmpset_int
716 #define	atomic_fcmpset_acq_int		atomic_fcmpset_int
717 #define	atomic_fcmpset_rel_int		atomic_fcmpset_int
718 
719 #define	atomic_set_acq_long		atomic_set_barr_long
720 #define	atomic_set_rel_long		atomic_set_barr_long
721 #define	atomic_clear_acq_long		atomic_clear_barr_long
722 #define	atomic_clear_rel_long		atomic_clear_barr_long
723 #define	atomic_add_acq_long		atomic_add_barr_long
724 #define	atomic_add_rel_long		atomic_add_barr_long
725 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
726 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
727 #define	atomic_cmpset_acq_long		atomic_cmpset_long
728 #define	atomic_cmpset_rel_long		atomic_cmpset_long
729 #define	atomic_fcmpset_acq_long		atomic_fcmpset_long
730 #define	atomic_fcmpset_rel_long		atomic_fcmpset_long
731 
732 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
733 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
734 
735 /* Operations on 8-bit bytes. */
736 #define	atomic_set_8		atomic_set_char
737 #define	atomic_set_acq_8	atomic_set_acq_char
738 #define	atomic_set_rel_8	atomic_set_rel_char
739 #define	atomic_clear_8		atomic_clear_char
740 #define	atomic_clear_acq_8	atomic_clear_acq_char
741 #define	atomic_clear_rel_8	atomic_clear_rel_char
742 #define	atomic_add_8		atomic_add_char
743 #define	atomic_add_acq_8	atomic_add_acq_char
744 #define	atomic_add_rel_8	atomic_add_rel_char
745 #define	atomic_subtract_8	atomic_subtract_char
746 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
747 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
748 #define	atomic_load_acq_8	atomic_load_acq_char
749 #define	atomic_store_rel_8	atomic_store_rel_char
750 #define	atomic_cmpset_8		atomic_cmpset_char
751 #define	atomic_cmpset_acq_8	atomic_cmpset_acq_char
752 #define	atomic_cmpset_rel_8	atomic_cmpset_rel_char
753 #define	atomic_fcmpset_8	atomic_fcmpset_char
754 #define	atomic_fcmpset_acq_8	atomic_fcmpset_acq_char
755 #define	atomic_fcmpset_rel_8	atomic_fcmpset_rel_char
756 
757 /* Operations on 16-bit words. */
758 #define	atomic_set_16		atomic_set_short
759 #define	atomic_set_acq_16	atomic_set_acq_short
760 #define	atomic_set_rel_16	atomic_set_rel_short
761 #define	atomic_clear_16		atomic_clear_short
762 #define	atomic_clear_acq_16	atomic_clear_acq_short
763 #define	atomic_clear_rel_16	atomic_clear_rel_short
764 #define	atomic_add_16		atomic_add_short
765 #define	atomic_add_acq_16	atomic_add_acq_short
766 #define	atomic_add_rel_16	atomic_add_rel_short
767 #define	atomic_subtract_16	atomic_subtract_short
768 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
769 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
770 #define	atomic_load_acq_16	atomic_load_acq_short
771 #define	atomic_store_rel_16	atomic_store_rel_short
772 #define	atomic_cmpset_16	atomic_cmpset_short
773 #define	atomic_cmpset_acq_16	atomic_cmpset_acq_short
774 #define	atomic_cmpset_rel_16	atomic_cmpset_rel_short
775 #define	atomic_fcmpset_16	atomic_fcmpset_short
776 #define	atomic_fcmpset_acq_16	atomic_fcmpset_acq_short
777 #define	atomic_fcmpset_rel_16	atomic_fcmpset_rel_short
778 
779 /* Operations on 32-bit double words. */
780 #define	atomic_set_32		atomic_set_int
781 #define	atomic_set_acq_32	atomic_set_acq_int
782 #define	atomic_set_rel_32	atomic_set_rel_int
783 #define	atomic_clear_32		atomic_clear_int
784 #define	atomic_clear_acq_32	atomic_clear_acq_int
785 #define	atomic_clear_rel_32	atomic_clear_rel_int
786 #define	atomic_add_32		atomic_add_int
787 #define	atomic_add_acq_32	atomic_add_acq_int
788 #define	atomic_add_rel_32	atomic_add_rel_int
789 #define	atomic_subtract_32	atomic_subtract_int
790 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
791 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
792 #define	atomic_load_acq_32	atomic_load_acq_int
793 #define	atomic_store_rel_32	atomic_store_rel_int
794 #define	atomic_cmpset_32	atomic_cmpset_int
795 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
796 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
797 #define	atomic_fcmpset_32	atomic_fcmpset_int
798 #define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
799 #define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
800 #define	atomic_swap_32		atomic_swap_int
801 #define	atomic_readandclear_32	atomic_readandclear_int
802 #define	atomic_fetchadd_32	atomic_fetchadd_int
803 #define	atomic_testandset_32	atomic_testandset_int
804 #define	atomic_testandclear_32	atomic_testandclear_int
805 
806 /* Operations on pointers. */
807 #define	atomic_set_ptr(p, v) \
808 	atomic_set_int((volatile u_int *)(p), (u_int)(v))
809 #define	atomic_set_acq_ptr(p, v) \
810 	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
811 #define	atomic_set_rel_ptr(p, v) \
812 	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
813 #define	atomic_clear_ptr(p, v) \
814 	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
815 #define	atomic_clear_acq_ptr(p, v) \
816 	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
817 #define	atomic_clear_rel_ptr(p, v) \
818 	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
819 #define	atomic_add_ptr(p, v) \
820 	atomic_add_int((volatile u_int *)(p), (u_int)(v))
821 #define	atomic_add_acq_ptr(p, v) \
822 	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
823 #define	atomic_add_rel_ptr(p, v) \
824 	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
825 #define	atomic_subtract_ptr(p, v) \
826 	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
827 #define	atomic_subtract_acq_ptr(p, v) \
828 	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
829 #define	atomic_subtract_rel_ptr(p, v) \
830 	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
831 #define	atomic_load_acq_ptr(p) \
832 	atomic_load_acq_int((volatile u_int *)(p))
833 #define	atomic_store_rel_ptr(p, v) \
834 	atomic_store_rel_int((volatile u_int *)(p), (v))
835 #define	atomic_cmpset_ptr(dst, old, new) \
836 	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
837 #define	atomic_cmpset_acq_ptr(dst, old, new) \
838 	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
839 	    (u_int)(new))
840 #define	atomic_cmpset_rel_ptr(dst, old, new) \
841 	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
842 	    (u_int)(new))
843 #define	atomic_fcmpset_ptr(dst, old, new) \
844 	atomic_fcmpset_int((volatile u_int *)(dst), (u_int *)(old), (u_int)(new))
845 #define	atomic_fcmpset_acq_ptr(dst, old, new) \
846 	atomic_fcmpset_acq_int((volatile u_int *)(dst), (u_int *)(old), \
847 	    (u_int)(new))
848 #define	atomic_fcmpset_rel_ptr(dst, old, new) \
849 	atomic_fcmpset_rel_int((volatile u_int *)(dst), (u_int *)(old), \
850 	    (u_int)(new))
851 #define	atomic_swap_ptr(p, v) \
852 	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
853 #define	atomic_readandclear_ptr(p) \
854 	atomic_readandclear_int((volatile u_int *)(p))
855 
856 #endif /* !WANT_FUNCTIONS */
857 
858 #if defined(_KERNEL)
859 #define	mb()	__mbk()
860 #define	wmb()	__mbk()
861 #define	rmb()	__mbk()
862 #else
863 #define	mb()	__mbu()
864 #define	wmb()	__mbu()
865 #define	rmb()	__mbu()
866 #endif
867 
868 #endif /* !_MACHINE_ATOMIC_H_ */
869