xref: /freebsd/sys/i386/include/atomic.h (revision 48cae112b516ce625d38f22fdc07a29d509de845)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define	_MACHINE_ATOMIC_H_
30 
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34 
35 #ifdef _KERNEL
36 #include <machine/md_var.h>
37 #include <machine/specialreg.h>
38 #endif
39 
40 #ifndef __OFFSETOF_MONITORBUF
41 /*
42  * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
43  *
44  * The open-coded number is used instead of the symbolic expression to
45  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
46  * An assertion in i386/vm_machdep.c ensures that the value is correct.
47  */
48 #define	__OFFSETOF_MONITORBUF	0x180
49 
50 static __inline void
51 __mbk(void)
52 {
53 
54 	__asm __volatile("lock; addl $0,%%fs:%0"
55 	    : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
56 }
57 
58 static __inline void
59 __mbu(void)
60 {
61 
62 	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
63 }
64 #endif
65 
66 /*
67  * Various simple operations on memory, each of which is atomic in the
68  * presence of interrupts and multiple processors.
69  *
70  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
71  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
72  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
73  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
74  *
75  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
76  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
77  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
78  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
79  *
80  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
81  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
82  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
83  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
84  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
85  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
86  *
87  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
88  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
89  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
90  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
91  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
92  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
93  */
94 
95 /*
96  * The above functions are expanded inline in the statically-linked
97  * kernel.  Lock prefixes are generated if an SMP kernel is being
98  * built.
99  *
100  * Kernel modules call real functions which are built into the kernel.
101  * This allows kernel modules to be portable between UP and SMP systems.
102  */
103 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
104 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
105 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
106 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
107 
108 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
109 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
110 int	atomic_testandset_int(volatile u_int *p, u_int v);
111 void	atomic_thread_fence_acq(void);
112 void	atomic_thread_fence_acq_rel(void);
113 void	atomic_thread_fence_rel(void);
114 void	atomic_thread_fence_seq_cst(void);
115 
116 #define	ATOMIC_LOAD(TYPE)					\
117 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
118 #define	ATOMIC_STORE(TYPE)					\
119 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
120 
121 int		atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
122 uint64_t	atomic_load_acq_64(volatile uint64_t *);
123 void		atomic_store_rel_64(volatile uint64_t *, uint64_t);
124 uint64_t	atomic_swap_64(volatile uint64_t *, uint64_t);
125 
126 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
127 
128 /*
129  * For userland, always use lock prefixes so that the binaries will run
130  * on both SMP and !SMP systems.
131  */
132 #if defined(SMP) || !defined(_KERNEL)
133 #define	MPLOCKED	"lock ; "
134 #else
135 #define	MPLOCKED
136 #endif
137 
138 /*
139  * The assembly is volatilized to avoid code chunk removal by the compiler.
140  * GCC aggressively reorders operations and memory clobbering is necessary
141  * in order to avoid that for memory barriers.
142  */
143 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
144 static __inline void					\
145 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
146 {							\
147 	__asm __volatile(MPLOCKED OP			\
148 	: "+m" (*p)					\
149 	: CONS (V)					\
150 	: "cc");					\
151 }							\
152 							\
153 static __inline void					\
154 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
155 {							\
156 	__asm __volatile(MPLOCKED OP			\
157 	: "+m" (*p)					\
158 	: CONS (V)					\
159 	: "memory", "cc");				\
160 }							\
161 struct __hack
162 
163 /*
164  * Atomic compare and set, used by the mutex functions
165  *
166  * if (*dst == expect) *dst = src (all 32 bit words)
167  *
168  * Returns 0 on failure, non-zero on success
169  */
170 
171 #ifdef CPU_DISABLE_CMPXCHG
172 
173 static __inline int
174 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
175 {
176 	u_char res;
177 
178 	__asm __volatile(
179 	"	pushfl ;		"
180 	"	cli ;			"
181 	"	cmpl	%3,%1 ;		"
182 	"	jne	1f ;		"
183 	"	movl	%2,%1 ;		"
184 	"1:				"
185 	"       sete	%0 ;		"
186 	"	popfl ;			"
187 	"# atomic_cmpset_int"
188 	: "=q" (res),			/* 0 */
189 	  "+m" (*dst)			/* 1 */
190 	: "r" (src),			/* 2 */
191 	  "r" (expect)			/* 3 */
192 	: "memory");
193 	return (res);
194 }
195 
196 #else /* !CPU_DISABLE_CMPXCHG */
197 
198 static __inline int
199 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
200 {
201 	u_char res;
202 
203 	__asm __volatile(
204 	"	" MPLOCKED "		"
205 	"	cmpxchgl %3,%1 ;	"
206 	"       sete	%0 ;		"
207 	"# atomic_cmpset_int"
208 	: "=q" (res),			/* 0 */
209 	  "+m" (*dst),			/* 1 */
210 	  "+a" (expect)			/* 2 */
211 	: "r" (src)			/* 3 */
212 	: "memory", "cc");
213 	return (res);
214 }
215 
216 #endif /* CPU_DISABLE_CMPXCHG */
217 
218 /*
219  * Atomically add the value of v to the integer pointed to by p and return
220  * the previous value of *p.
221  */
222 static __inline u_int
223 atomic_fetchadd_int(volatile u_int *p, u_int v)
224 {
225 
226 	__asm __volatile(
227 	"	" MPLOCKED "		"
228 	"	xaddl	%0,%1 ;		"
229 	"# atomic_fetchadd_int"
230 	: "+r" (v),			/* 0 */
231 	  "+m" (*p)			/* 1 */
232 	: : "cc");
233 	return (v);
234 }
235 
236 static __inline int
237 atomic_testandset_int(volatile u_int *p, u_int v)
238 {
239 	u_char res;
240 
241 	__asm __volatile(
242 	"	" MPLOCKED "		"
243 	"	btsl	%2,%1 ;		"
244 	"	setc	%0 ;		"
245 	"# atomic_testandset_int"
246 	: "=q" (res),			/* 0 */
247 	  "+m" (*p)			/* 1 */
248 	: "Ir" (v & 0x1f)		/* 2 */
249 	: "cc");
250 	return (res);
251 }
252 
253 /*
254  * We assume that a = b will do atomic loads and stores.  Due to the
255  * IA32 memory model, a simple store guarantees release semantics.
256  *
257  * However, a load may pass a store if they are performed on distinct
258  * addresses, so we need Store/Load barrier for sequentially
259  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
260  * Store/Load barrier, as recommended by the AMD Software Optimization
261  * Guide, and not mfence.  In the kernel, we use a private per-cpu
262  * cache line as the target for the locked addition, to avoid
263  * introducing false data dependencies.  In userspace, a word at the
264  * top of the stack is utilized.
265  *
266  * For UP kernels, however, the memory of the single processor is
267  * always consistent, so we only need to stop the compiler from
268  * reordering accesses in a way that violates the semantics of acquire
269  * and release.
270  */
271 
272 #if defined(_KERNEL)
273 #if defined(SMP)
274 #define	__storeload_barrier()	__mbk()
275 #else /* _KERNEL && UP */
276 #define	__storeload_barrier()	__compiler_membar()
277 #endif /* SMP */
278 #else /* !_KERNEL */
279 #define	__storeload_barrier()	__mbu()
280 #endif /* _KERNEL*/
281 
282 #define	ATOMIC_LOAD(TYPE)					\
283 static __inline u_##TYPE					\
284 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
285 {								\
286 	u_##TYPE res;						\
287 								\
288 	res = *p;						\
289 	__compiler_membar();					\
290 	return (res);						\
291 }								\
292 struct __hack
293 
294 #define	ATOMIC_STORE(TYPE)					\
295 static __inline void						\
296 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
297 {								\
298 								\
299 	__compiler_membar();					\
300 	*p = v;							\
301 }								\
302 struct __hack
303 
304 static __inline void
305 atomic_thread_fence_acq(void)
306 {
307 
308 	__compiler_membar();
309 }
310 
311 static __inline void
312 atomic_thread_fence_rel(void)
313 {
314 
315 	__compiler_membar();
316 }
317 
318 static __inline void
319 atomic_thread_fence_acq_rel(void)
320 {
321 
322 	__compiler_membar();
323 }
324 
325 static __inline void
326 atomic_thread_fence_seq_cst(void)
327 {
328 
329 	__storeload_barrier();
330 }
331 
332 #ifdef _KERNEL
333 
334 #ifdef WANT_FUNCTIONS
335 int		atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
336 int		atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
337 uint64_t	atomic_load_acq_64_i386(volatile uint64_t *);
338 uint64_t	atomic_load_acq_64_i586(volatile uint64_t *);
339 void		atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
340 void		atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
341 uint64_t	atomic_swap_64_i386(volatile uint64_t *, uint64_t);
342 uint64_t	atomic_swap_64_i586(volatile uint64_t *, uint64_t);
343 #endif
344 
345 /* I486 does not support SMP or CMPXCHG8B. */
346 static __inline int
347 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
348 {
349 	volatile uint32_t *p;
350 	u_char res;
351 
352 	p = (volatile uint32_t *)dst;
353 	__asm __volatile(
354 	"	pushfl ;		"
355 	"	cli ;			"
356 	"	xorl	%1,%%eax ;	"
357 	"	xorl	%2,%%edx ;	"
358 	"	orl	%%edx,%%eax ;	"
359 	"	jne	1f ;		"
360 	"	movl	%4,%1 ;		"
361 	"	movl	%5,%2 ;		"
362 	"1:				"
363 	"	sete	%3 ;		"
364 	"	popfl"
365 	: "+A" (expect),		/* 0 */
366 	  "+m" (*p),			/* 1 */
367 	  "+m" (*(p + 1)),		/* 2 */
368 	  "=q" (res)			/* 3 */
369 	: "r" ((uint32_t)src),		/* 4 */
370 	  "r" ((uint32_t)(src >> 32))	/* 5 */
371 	: "memory", "cc");
372 	return (res);
373 }
374 
375 static __inline uint64_t
376 atomic_load_acq_64_i386(volatile uint64_t *p)
377 {
378 	volatile uint32_t *q;
379 	uint64_t res;
380 
381 	q = (volatile uint32_t *)p;
382 	__asm __volatile(
383 	"	pushfl ;		"
384 	"	cli ;			"
385 	"	movl	%1,%%eax ;	"
386 	"	movl	%2,%%edx ;	"
387 	"	popfl"
388 	: "=&A" (res)			/* 0 */
389 	: "m" (*q),			/* 1 */
390 	  "m" (*(q + 1))		/* 2 */
391 	: "memory");
392 	return (res);
393 }
394 
395 static __inline void
396 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
397 {
398 	volatile uint32_t *q;
399 
400 	q = (volatile uint32_t *)p;
401 	__asm __volatile(
402 	"	pushfl ;		"
403 	"	cli ;			"
404 	"	movl	%%eax,%0 ;	"
405 	"	movl	%%edx,%1 ;	"
406 	"	popfl"
407 	: "=m" (*q),			/* 0 */
408 	  "=m" (*(q + 1))		/* 1 */
409 	: "A" (v)			/* 2 */
410 	: "memory");
411 }
412 
413 static __inline uint64_t
414 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
415 {
416 	volatile uint32_t *q;
417 	uint64_t res;
418 
419 	q = (volatile uint32_t *)p;
420 	__asm __volatile(
421 	"	pushfl ;		"
422 	"	cli ;			"
423 	"	movl	%1,%%eax ;	"
424 	"	movl	%2,%%edx ;	"
425 	"	movl	%4,%2 ;		"
426 	"	movl	%3,%1 ;		"
427 	"	popfl"
428 	: "=&A" (res),			/* 0 */
429 	  "+m" (*q),			/* 1 */
430 	  "+m" (*(q + 1))		/* 2 */
431 	: "r" ((uint32_t)v),		/* 3 */
432 	  "r" ((uint32_t)(v >> 32)));	/* 4 */
433 	return (res);
434 }
435 
436 static __inline int
437 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
438 {
439 	u_char res;
440 
441 	__asm __volatile(
442 	"	" MPLOCKED "		"
443 	"	cmpxchg8b %1 ;		"
444 	"	sete	%0"
445 	: "=q" (res),			/* 0 */
446 	  "+m" (*dst),			/* 1 */
447 	  "+A" (expect)			/* 2 */
448 	: "b" ((uint32_t)src),		/* 3 */
449 	  "c" ((uint32_t)(src >> 32))	/* 4 */
450 	: "memory", "cc");
451 	return (res);
452 }
453 
454 static __inline uint64_t
455 atomic_load_acq_64_i586(volatile uint64_t *p)
456 {
457 	uint64_t res;
458 
459 	__asm __volatile(
460 	"	movl	%%ebx,%%eax ;	"
461 	"	movl	%%ecx,%%edx ;	"
462 	"	" MPLOCKED "		"
463 	"	cmpxchg8b %1"
464 	: "=&A" (res),			/* 0 */
465 	  "+m" (*p)			/* 1 */
466 	: : "memory", "cc");
467 	return (res);
468 }
469 
470 static __inline void
471 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
472 {
473 
474 	__asm __volatile(
475 	"	movl	%%eax,%%ebx ;	"
476 	"	movl	%%edx,%%ecx ;	"
477 	"1:				"
478 	"	" MPLOCKED "		"
479 	"	cmpxchg8b %0 ;		"
480 	"	jne	1b"
481 	: "+m" (*p),			/* 0 */
482 	  "+A" (v)			/* 1 */
483 	: : "ebx", "ecx", "memory", "cc");
484 }
485 
486 static __inline uint64_t
487 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
488 {
489 
490 	__asm __volatile(
491 	"	movl	%%eax,%%ebx ;	"
492 	"	movl	%%edx,%%ecx ;	"
493 	"1:				"
494 	"	" MPLOCKED "		"
495 	"	cmpxchg8b %0 ;		"
496 	"	jne	1b"
497 	: "+m" (*p),			/* 0 */
498 	  "+A" (v)			/* 1 */
499 	: : "ebx", "ecx", "memory", "cc");
500 	return (v);
501 }
502 
503 static __inline int
504 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
505 {
506 
507 	if ((cpu_feature & CPUID_CX8) == 0)
508 		return (atomic_cmpset_64_i386(dst, expect, src));
509 	else
510 		return (atomic_cmpset_64_i586(dst, expect, src));
511 }
512 
513 static __inline uint64_t
514 atomic_load_acq_64(volatile uint64_t *p)
515 {
516 
517 	if ((cpu_feature & CPUID_CX8) == 0)
518 		return (atomic_load_acq_64_i386(p));
519 	else
520 		return (atomic_load_acq_64_i586(p));
521 }
522 
523 static __inline void
524 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
525 {
526 
527 	if ((cpu_feature & CPUID_CX8) == 0)
528 		atomic_store_rel_64_i386(p, v);
529 	else
530 		atomic_store_rel_64_i586(p, v);
531 }
532 
533 static __inline uint64_t
534 atomic_swap_64(volatile uint64_t *p, uint64_t v)
535 {
536 
537 	if ((cpu_feature & CPUID_CX8) == 0)
538 		return (atomic_swap_64_i386(p, v));
539 	else
540 		return (atomic_swap_64_i586(p, v));
541 }
542 
543 #endif /* _KERNEL */
544 
545 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
546 
547 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
548 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
549 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
550 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
551 
552 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
553 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
554 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
555 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
556 
557 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
558 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
559 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
560 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
561 
562 ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
563 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
564 ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
565 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
566 
567 #define	ATOMIC_LOADSTORE(TYPE)				\
568 	ATOMIC_LOAD(TYPE);				\
569 	ATOMIC_STORE(TYPE)
570 
571 ATOMIC_LOADSTORE(char);
572 ATOMIC_LOADSTORE(short);
573 ATOMIC_LOADSTORE(int);
574 ATOMIC_LOADSTORE(long);
575 
576 #undef ATOMIC_ASM
577 #undef ATOMIC_LOAD
578 #undef ATOMIC_STORE
579 #undef ATOMIC_LOADSTORE
580 
581 #ifndef WANT_FUNCTIONS
582 
583 static __inline int
584 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
585 {
586 
587 	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
588 	    (u_int)src));
589 }
590 
591 static __inline u_long
592 atomic_fetchadd_long(volatile u_long *p, u_long v)
593 {
594 
595 	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
596 }
597 
598 static __inline int
599 atomic_testandset_long(volatile u_long *p, u_int v)
600 {
601 
602 	return (atomic_testandset_int((volatile u_int *)p, v));
603 }
604 
605 /* Read the current value and store a new value in the destination. */
606 #ifdef __GNUCLIKE_ASM
607 
608 static __inline u_int
609 atomic_swap_int(volatile u_int *p, u_int v)
610 {
611 
612 	__asm __volatile(
613 	"	xchgl	%1,%0 ;		"
614 	"# atomic_swap_int"
615 	: "+r" (v),			/* 0 */
616 	  "+m" (*p));			/* 1 */
617 	return (v);
618 }
619 
620 static __inline u_long
621 atomic_swap_long(volatile u_long *p, u_long v)
622 {
623 
624 	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
625 }
626 
627 #else /* !__GNUCLIKE_ASM */
628 
629 u_int	atomic_swap_int(volatile u_int *p, u_int v);
630 u_long	atomic_swap_long(volatile u_long *p, u_long v);
631 
632 #endif /* __GNUCLIKE_ASM */
633 
634 #define	atomic_set_acq_char		atomic_set_barr_char
635 #define	atomic_set_rel_char		atomic_set_barr_char
636 #define	atomic_clear_acq_char		atomic_clear_barr_char
637 #define	atomic_clear_rel_char		atomic_clear_barr_char
638 #define	atomic_add_acq_char		atomic_add_barr_char
639 #define	atomic_add_rel_char		atomic_add_barr_char
640 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
641 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
642 
643 #define	atomic_set_acq_short		atomic_set_barr_short
644 #define	atomic_set_rel_short		atomic_set_barr_short
645 #define	atomic_clear_acq_short		atomic_clear_barr_short
646 #define	atomic_clear_rel_short		atomic_clear_barr_short
647 #define	atomic_add_acq_short		atomic_add_barr_short
648 #define	atomic_add_rel_short		atomic_add_barr_short
649 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
650 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
651 
652 #define	atomic_set_acq_int		atomic_set_barr_int
653 #define	atomic_set_rel_int		atomic_set_barr_int
654 #define	atomic_clear_acq_int		atomic_clear_barr_int
655 #define	atomic_clear_rel_int		atomic_clear_barr_int
656 #define	atomic_add_acq_int		atomic_add_barr_int
657 #define	atomic_add_rel_int		atomic_add_barr_int
658 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
659 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
660 #define	atomic_cmpset_acq_int		atomic_cmpset_int
661 #define	atomic_cmpset_rel_int		atomic_cmpset_int
662 
663 #define	atomic_set_acq_long		atomic_set_barr_long
664 #define	atomic_set_rel_long		atomic_set_barr_long
665 #define	atomic_clear_acq_long		atomic_clear_barr_long
666 #define	atomic_clear_rel_long		atomic_clear_barr_long
667 #define	atomic_add_acq_long		atomic_add_barr_long
668 #define	atomic_add_rel_long		atomic_add_barr_long
669 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
670 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
671 #define	atomic_cmpset_acq_long		atomic_cmpset_long
672 #define	atomic_cmpset_rel_long		atomic_cmpset_long
673 
674 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
675 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
676 
677 /* Operations on 8-bit bytes. */
678 #define	atomic_set_8		atomic_set_char
679 #define	atomic_set_acq_8	atomic_set_acq_char
680 #define	atomic_set_rel_8	atomic_set_rel_char
681 #define	atomic_clear_8		atomic_clear_char
682 #define	atomic_clear_acq_8	atomic_clear_acq_char
683 #define	atomic_clear_rel_8	atomic_clear_rel_char
684 #define	atomic_add_8		atomic_add_char
685 #define	atomic_add_acq_8	atomic_add_acq_char
686 #define	atomic_add_rel_8	atomic_add_rel_char
687 #define	atomic_subtract_8	atomic_subtract_char
688 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
689 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
690 #define	atomic_load_acq_8	atomic_load_acq_char
691 #define	atomic_store_rel_8	atomic_store_rel_char
692 
693 /* Operations on 16-bit words. */
694 #define	atomic_set_16		atomic_set_short
695 #define	atomic_set_acq_16	atomic_set_acq_short
696 #define	atomic_set_rel_16	atomic_set_rel_short
697 #define	atomic_clear_16		atomic_clear_short
698 #define	atomic_clear_acq_16	atomic_clear_acq_short
699 #define	atomic_clear_rel_16	atomic_clear_rel_short
700 #define	atomic_add_16		atomic_add_short
701 #define	atomic_add_acq_16	atomic_add_acq_short
702 #define	atomic_add_rel_16	atomic_add_rel_short
703 #define	atomic_subtract_16	atomic_subtract_short
704 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
705 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
706 #define	atomic_load_acq_16	atomic_load_acq_short
707 #define	atomic_store_rel_16	atomic_store_rel_short
708 
709 /* Operations on 32-bit double words. */
710 #define	atomic_set_32		atomic_set_int
711 #define	atomic_set_acq_32	atomic_set_acq_int
712 #define	atomic_set_rel_32	atomic_set_rel_int
713 #define	atomic_clear_32		atomic_clear_int
714 #define	atomic_clear_acq_32	atomic_clear_acq_int
715 #define	atomic_clear_rel_32	atomic_clear_rel_int
716 #define	atomic_add_32		atomic_add_int
717 #define	atomic_add_acq_32	atomic_add_acq_int
718 #define	atomic_add_rel_32	atomic_add_rel_int
719 #define	atomic_subtract_32	atomic_subtract_int
720 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
721 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
722 #define	atomic_load_acq_32	atomic_load_acq_int
723 #define	atomic_store_rel_32	atomic_store_rel_int
724 #define	atomic_cmpset_32	atomic_cmpset_int
725 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
726 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
727 #define	atomic_swap_32		atomic_swap_int
728 #define	atomic_readandclear_32	atomic_readandclear_int
729 #define	atomic_fetchadd_32	atomic_fetchadd_int
730 #define	atomic_testandset_32	atomic_testandset_int
731 
732 /* Operations on pointers. */
733 #define	atomic_set_ptr(p, v) \
734 	atomic_set_int((volatile u_int *)(p), (u_int)(v))
735 #define	atomic_set_acq_ptr(p, v) \
736 	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
737 #define	atomic_set_rel_ptr(p, v) \
738 	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
739 #define	atomic_clear_ptr(p, v) \
740 	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
741 #define	atomic_clear_acq_ptr(p, v) \
742 	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
743 #define	atomic_clear_rel_ptr(p, v) \
744 	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
745 #define	atomic_add_ptr(p, v) \
746 	atomic_add_int((volatile u_int *)(p), (u_int)(v))
747 #define	atomic_add_acq_ptr(p, v) \
748 	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
749 #define	atomic_add_rel_ptr(p, v) \
750 	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
751 #define	atomic_subtract_ptr(p, v) \
752 	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
753 #define	atomic_subtract_acq_ptr(p, v) \
754 	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
755 #define	atomic_subtract_rel_ptr(p, v) \
756 	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
757 #define	atomic_load_acq_ptr(p) \
758 	atomic_load_acq_int((volatile u_int *)(p))
759 #define	atomic_store_rel_ptr(p, v) \
760 	atomic_store_rel_int((volatile u_int *)(p), (v))
761 #define	atomic_cmpset_ptr(dst, old, new) \
762 	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
763 #define	atomic_cmpset_acq_ptr(dst, old, new) \
764 	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
765 	    (u_int)(new))
766 #define	atomic_cmpset_rel_ptr(dst, old, new) \
767 	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
768 	    (u_int)(new))
769 #define	atomic_swap_ptr(p, v) \
770 	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
771 #define	atomic_readandclear_ptr(p) \
772 	atomic_readandclear_int((volatile u_int *)(p))
773 
774 #endif /* !WANT_FUNCTIONS */
775 
776 #if defined(_KERNEL)
777 #define	mb()	__mbk()
778 #define	wmb()	__mbk()
779 #define	rmb()	__mbk()
780 #else
781 #define	mb()	__mbu()
782 #define	wmb()	__mbu()
783 #define	rmb()	__mbu()
784 #endif
785 
786 #endif /* !_MACHINE_ATOMIC_H_ */
787