xref: /freebsd/sys/i386/include/atomic.h (revision 0549218b43f028bb5bde430d7fcc6ead6843aa61)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1998 Doug Rabson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define	_MACHINE_ATOMIC_H_
30 
31 #include <sys/atomic_common.h>
32 
33 #ifdef _KERNEL
34 #include <machine/md_var.h>
35 #include <machine/specialreg.h>
36 #endif
37 
38 #ifndef __OFFSETOF_MONITORBUF
39 /*
40  * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
41  *
42  * The open-coded number is used instead of the symbolic expression to
43  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
44  * An assertion in i386/vm_machdep.c ensures that the value is correct.
45  */
46 #define	__OFFSETOF_MONITORBUF	0x80
47 
48 static __inline void
49 __mbk(void)
50 {
51 
52 	__asm __volatile("lock; addl $0,%%fs:%0"
53 	    : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
54 }
55 
56 static __inline void
57 __mbu(void)
58 {
59 
60 	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
61 }
62 #endif
63 
64 /*
65  * Various simple operations on memory, each of which is atomic in the
66  * presence of interrupts and multiple processors.
67  *
68  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
69  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
70  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
71  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
72  *
73  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
74  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
75  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
76  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
77  *
78  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
79  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
80  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
81  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
82  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
83  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
84  *
85  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
86  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
87  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
88  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
89  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
90  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
91  */
92 
93 /*
94  * Always use lock prefixes.  The result is slightly less optimal for
95  * UP systems, but it matters less now, and sometimes UP is emulated
96  * over SMP.
97  *
98  * The assembly is volatilized to avoid code chunk removal by the compiler.
99  * GCC aggressively reorders operations and memory clobbering is necessary
100  * in order to avoid that for memory barriers.
101  */
102 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
103 static __inline void					\
104 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
105 {							\
106 	__asm __volatile("lock; " OP			\
107 	: "+m" (*p)					\
108 	: CONS (V)					\
109 	: "cc");					\
110 }							\
111 							\
112 static __inline void					\
113 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
114 {							\
115 	__asm __volatile("lock; " OP			\
116 	: "+m" (*p)					\
117 	: CONS (V)					\
118 	: "memory", "cc");				\
119 }							\
120 struct __hack
121 
122 /*
123  * Atomic compare and set, used by the mutex functions.
124  *
125  * cmpset:
126  *	if (*dst == expect)
127  *		*dst = src
128  *
129  * fcmpset:
130  *	if (*dst == *expect)
131  *		*dst = src
132  *	else
133  *		*expect = *dst
134  *
135  * Returns 0 on failure, non-zero on success.
136  */
137 #define	ATOMIC_CMPSET(TYPE, CONS)			\
138 static __inline int					\
139 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
140 {							\
141 	u_char res;					\
142 							\
143 	__asm __volatile(				\
144 	"	lock; cmpxchg	%3,%1 ;	"		\
145 	"	sete	%0 ;		"		\
146 	"# atomic_cmpset_" #TYPE "	"		\
147 	: "=q" (res),			/* 0 */		\
148 	  "+m" (*dst),			/* 1 */		\
149 	  "+a" (expect)			/* 2 */		\
150 	: CONS (src)			/* 3 */		\
151 	: "memory", "cc");				\
152 	return (res);					\
153 }							\
154 							\
155 static __inline int					\
156 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
157 {							\
158 	u_char res;					\
159 							\
160 	__asm __volatile(				\
161 	"	lock; cmpxchg	%3,%1 ;	"		\
162 	"	sete	%0 ;		"		\
163 	"# atomic_fcmpset_" #TYPE "	"		\
164 	: "=q" (res),			/* 0 */		\
165 	  "+m" (*dst),			/* 1 */		\
166 	  "+a" (*expect)		/* 2 */		\
167 	: CONS (src)			/* 3 */		\
168 	: "memory", "cc");				\
169 	return (res);					\
170 }
171 
172 ATOMIC_CMPSET(char, "q");
173 ATOMIC_CMPSET(short, "r");
174 ATOMIC_CMPSET(int, "r");
175 
176 /*
177  * Atomically add the value of v to the integer pointed to by p and return
178  * the previous value of *p.
179  */
180 static __inline u_int
181 atomic_fetchadd_int(volatile u_int *p, u_int v)
182 {
183 
184 	__asm __volatile(
185 	"	lock; xaddl	%0,%1 ;	"
186 	"# atomic_fetchadd_int"
187 	: "+r" (v),			/* 0 */
188 	  "+m" (*p)			/* 1 */
189 	: : "cc");
190 	return (v);
191 }
192 
193 static __inline int
194 atomic_testandset_int(volatile u_int *p, u_int v)
195 {
196 	u_char res;
197 
198 	__asm __volatile(
199 	"	lock; btsl	%2,%1 ;	"
200 	"	setc	%0 ;		"
201 	"# atomic_testandset_int"
202 	: "=q" (res),			/* 0 */
203 	  "+m" (*p)			/* 1 */
204 	: "Ir" (v & 0x1f)		/* 2 */
205 	: "cc");
206 	return (res);
207 }
208 
209 static __inline int
210 atomic_testandclear_int(volatile u_int *p, u_int v)
211 {
212 	u_char res;
213 
214 	__asm __volatile(
215 	"	lock; btrl	%2,%1 ;	"
216 	"	setc	%0 ;		"
217 	"# atomic_testandclear_int"
218 	: "=q" (res),			/* 0 */
219 	  "+m" (*p)			/* 1 */
220 	: "Ir" (v & 0x1f)		/* 2 */
221 	: "cc");
222 	return (res);
223 }
224 
225 /*
226  * We assume that a = b will do atomic loads and stores.  Due to the
227  * IA32 memory model, a simple store guarantees release semantics.
228  *
229  * However, a load may pass a store if they are performed on distinct
230  * addresses, so we need Store/Load barrier for sequentially
231  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
232  * Store/Load barrier, as recommended by the AMD Software Optimization
233  * Guide, and not mfence.  In the kernel, we use a private per-cpu
234  * cache line for "mem", to avoid introducing false data
235  * dependencies.  In user space, we use the word at the top of the
236  * stack.
237  *
238  * For UP kernels, however, the memory of the single processor is
239  * always consistent, so we only need to stop the compiler from
240  * reordering accesses in a way that violates the semantics of acquire
241  * and release.
242  */
243 
244 #if defined(_KERNEL)
245 #define	__storeload_barrier()	__mbk()
246 #else /* !_KERNEL */
247 #define	__storeload_barrier()	__mbu()
248 #endif /* _KERNEL*/
249 
250 #define	ATOMIC_LOAD(TYPE)					\
251 static __inline u_##TYPE					\
252 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
253 {								\
254 	u_##TYPE res;						\
255 								\
256 	res = *p;						\
257 	__compiler_membar();					\
258 	return (res);						\
259 }								\
260 struct __hack
261 
262 #define	ATOMIC_STORE(TYPE)					\
263 static __inline void						\
264 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
265 {								\
266 								\
267 	__compiler_membar();					\
268 	*p = v;							\
269 }								\
270 struct __hack
271 
272 static __inline void
273 atomic_thread_fence_acq(void)
274 {
275 
276 	__compiler_membar();
277 }
278 
279 static __inline void
280 atomic_thread_fence_rel(void)
281 {
282 
283 	__compiler_membar();
284 }
285 
286 static __inline void
287 atomic_thread_fence_acq_rel(void)
288 {
289 
290 	__compiler_membar();
291 }
292 
293 static __inline void
294 atomic_thread_fence_seq_cst(void)
295 {
296 
297 	__storeload_barrier();
298 }
299 
300 #ifdef _KERNEL
301 
302 #ifdef WANT_FUNCTIONS
303 int		atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
304 int		atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
305 uint64_t	atomic_load_acq_64_i386(volatile uint64_t *);
306 uint64_t	atomic_load_acq_64_i586(volatile uint64_t *);
307 void		atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
308 void		atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
309 uint64_t	atomic_swap_64_i386(volatile uint64_t *, uint64_t);
310 uint64_t	atomic_swap_64_i586(volatile uint64_t *, uint64_t);
311 #endif
312 
313 /* I486 does not support SMP or CMPXCHG8B. */
314 static __inline int
315 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
316 {
317 	volatile uint32_t *p;
318 	u_char res;
319 
320 	p = (volatile uint32_t *)dst;
321 	__asm __volatile(
322 	"	pushfl ;		"
323 	"	cli ;			"
324 	"	xorl	%1,%%eax ;	"
325 	"	xorl	%2,%%edx ;	"
326 	"	orl	%%edx,%%eax ;	"
327 	"	jne	1f ;		"
328 	"	movl	%4,%1 ;		"
329 	"	movl	%5,%2 ;		"
330 	"1:				"
331 	"	sete	%3 ;		"
332 	"	popfl"
333 	: "+A" (expect),		/* 0 */
334 	  "+m" (*p),			/* 1 */
335 	  "+m" (*(p + 1)),		/* 2 */
336 	  "=q" (res)			/* 3 */
337 	: "r" ((uint32_t)src),		/* 4 */
338 	  "r" ((uint32_t)(src >> 32))	/* 5 */
339 	: "memory", "cc");
340 	return (res);
341 }
342 
343 static __inline int
344 atomic_fcmpset_64_i386(volatile uint64_t *dst, uint64_t *expect, uint64_t src)
345 {
346 
347 	if (atomic_cmpset_64_i386(dst, *expect, src)) {
348 		return (1);
349 	} else {
350 		*expect = *dst;
351 		return (0);
352 	}
353 }
354 
355 static __inline uint64_t
356 atomic_load_acq_64_i386(volatile uint64_t *p)
357 {
358 	volatile uint32_t *q;
359 	uint64_t res;
360 
361 	q = (volatile uint32_t *)p;
362 	__asm __volatile(
363 	"	pushfl ;		"
364 	"	cli ;			"
365 	"	movl	%1,%%eax ;	"
366 	"	movl	%2,%%edx ;	"
367 	"	popfl"
368 	: "=&A" (res)			/* 0 */
369 	: "m" (*q),			/* 1 */
370 	  "m" (*(q + 1))		/* 2 */
371 	: "memory");
372 	return (res);
373 }
374 
375 static __inline void
376 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
377 {
378 	volatile uint32_t *q;
379 
380 	q = (volatile uint32_t *)p;
381 	__asm __volatile(
382 	"	pushfl ;		"
383 	"	cli ;			"
384 	"	movl	%%eax,%0 ;	"
385 	"	movl	%%edx,%1 ;	"
386 	"	popfl"
387 	: "=m" (*q),			/* 0 */
388 	  "=m" (*(q + 1))		/* 1 */
389 	: "A" (v)			/* 2 */
390 	: "memory");
391 }
392 
393 static __inline uint64_t
394 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
395 {
396 	volatile uint32_t *q;
397 	uint64_t res;
398 
399 	q = (volatile uint32_t *)p;
400 	__asm __volatile(
401 	"	pushfl ;		"
402 	"	cli ;			"
403 	"	movl	%1,%%eax ;	"
404 	"	movl	%2,%%edx ;	"
405 	"	movl	%4,%2 ;		"
406 	"	movl	%3,%1 ;		"
407 	"	popfl"
408 	: "=&A" (res),			/* 0 */
409 	  "+m" (*q),			/* 1 */
410 	  "+m" (*(q + 1))		/* 2 */
411 	: "r" ((uint32_t)v),		/* 3 */
412 	  "r" ((uint32_t)(v >> 32)));	/* 4 */
413 	return (res);
414 }
415 
416 static __inline int
417 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
418 {
419 	u_char res;
420 
421 	__asm __volatile(
422 	"	lock; cmpxchg8b %1 ;	"
423 	"	sete	%0"
424 	: "=q" (res),			/* 0 */
425 	  "+m" (*dst),			/* 1 */
426 	  "+A" (expect)			/* 2 */
427 	: "b" ((uint32_t)src),		/* 3 */
428 	  "c" ((uint32_t)(src >> 32))	/* 4 */
429 	: "memory", "cc");
430 	return (res);
431 }
432 
433 static __inline int
434 atomic_fcmpset_64_i586(volatile uint64_t *dst, uint64_t *expect, uint64_t src)
435 {
436 	u_char res;
437 
438 	__asm __volatile(
439 	"	lock; cmpxchg8b %1 ;	"
440 	"	sete	%0"
441 	: "=q" (res),			/* 0 */
442 	  "+m" (*dst),			/* 1 */
443 	  "+A" (*expect)		/* 2 */
444 	: "b" ((uint32_t)src),		/* 3 */
445 	  "c" ((uint32_t)(src >> 32))	/* 4 */
446 	: "memory", "cc");
447 	return (res);
448 }
449 
450 static __inline uint64_t
451 atomic_load_acq_64_i586(volatile uint64_t *p)
452 {
453 	uint64_t res;
454 
455 	__asm __volatile(
456 	"	movl	%%ebx,%%eax ;	"
457 	"	movl	%%ecx,%%edx ;	"
458 	"	lock; cmpxchg8b %1"
459 	: "=&A" (res),			/* 0 */
460 	  "+m" (*p)			/* 1 */
461 	: : "memory", "cc");
462 	return (res);
463 }
464 
465 static __inline void
466 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
467 {
468 
469 	__asm __volatile(
470 	"	movl	%%eax,%%ebx ;	"
471 	"	movl	%%edx,%%ecx ;	"
472 	"1:				"
473 	"	lock; cmpxchg8b %0 ;	"
474 	"	jne	1b"
475 	: "+m" (*p),			/* 0 */
476 	  "+A" (v)			/* 1 */
477 	: : "ebx", "ecx", "memory", "cc");
478 }
479 
480 static __inline uint64_t
481 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
482 {
483 
484 	__asm __volatile(
485 	"	movl	%%eax,%%ebx ;	"
486 	"	movl	%%edx,%%ecx ;	"
487 	"1:				"
488 	"	lock; cmpxchg8b %0 ;	"
489 	"	jne	1b"
490 	: "+m" (*p),			/* 0 */
491 	  "+A" (v)			/* 1 */
492 	: : "ebx", "ecx", "memory", "cc");
493 	return (v);
494 }
495 
496 static __inline int
497 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
498 {
499 
500 	if ((cpu_feature & CPUID_CX8) == 0)
501 		return (atomic_cmpset_64_i386(dst, expect, src));
502 	else
503 		return (atomic_cmpset_64_i586(dst, expect, src));
504 }
505 
506 static __inline int
507 atomic_fcmpset_64(volatile uint64_t *dst, uint64_t *expect, uint64_t src)
508 {
509 
510   	if ((cpu_feature & CPUID_CX8) == 0)
511 		return (atomic_fcmpset_64_i386(dst, expect, src));
512 	else
513 		return (atomic_fcmpset_64_i586(dst, expect, src));
514 }
515 
516 static __inline uint64_t
517 atomic_load_acq_64(volatile uint64_t *p)
518 {
519 
520 	if ((cpu_feature & CPUID_CX8) == 0)
521 		return (atomic_load_acq_64_i386(p));
522 	else
523 		return (atomic_load_acq_64_i586(p));
524 }
525 
526 static __inline void
527 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
528 {
529 
530 	if ((cpu_feature & CPUID_CX8) == 0)
531 		atomic_store_rel_64_i386(p, v);
532 	else
533 		atomic_store_rel_64_i586(p, v);
534 }
535 
536 static __inline uint64_t
537 atomic_swap_64(volatile uint64_t *p, uint64_t v)
538 {
539 
540 	if ((cpu_feature & CPUID_CX8) == 0)
541 		return (atomic_swap_64_i386(p, v));
542 	else
543 		return (atomic_swap_64_i586(p, v));
544 }
545 
546 static __inline uint64_t
547 atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
548 {
549 
550 	for (;;) {
551 		uint64_t t = *p;
552 		if (atomic_cmpset_64(p, t, t + v))
553 			return (t);
554 	}
555 }
556 
557 static __inline void
558 atomic_add_64(volatile uint64_t *p, uint64_t v)
559 {
560 	uint64_t t;
561 
562 	for (;;) {
563 		t = *p;
564 		if (atomic_cmpset_64(p, t, t + v))
565 			break;
566 	}
567 }
568 
569 static __inline void
570 atomic_subtract_64(volatile uint64_t *p, uint64_t v)
571 {
572 	uint64_t t;
573 
574 	for (;;) {
575 		t = *p;
576 		if (atomic_cmpset_64(p, t, t - v))
577 			break;
578 	}
579 }
580 
581 #endif /* _KERNEL */
582 
583 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
584 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
585 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
586 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
587 
588 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
589 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
590 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
591 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
592 
593 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
594 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
595 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
596 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
597 
598 ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
599 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
600 ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
601 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
602 
603 #define	ATOMIC_LOADSTORE(TYPE)				\
604 	ATOMIC_LOAD(TYPE);				\
605 	ATOMIC_STORE(TYPE)
606 
607 ATOMIC_LOADSTORE(char);
608 ATOMIC_LOADSTORE(short);
609 ATOMIC_LOADSTORE(int);
610 ATOMIC_LOADSTORE(long);
611 
612 #undef ATOMIC_ASM
613 #undef ATOMIC_LOAD
614 #undef ATOMIC_STORE
615 #undef ATOMIC_LOADSTORE
616 
617 #ifndef WANT_FUNCTIONS
618 
619 static __inline int
620 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
621 {
622 
623 	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
624 	    (u_int)src));
625 }
626 
627 static __inline int
628 atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src)
629 {
630 
631 	return (atomic_fcmpset_int((volatile u_int *)dst, (u_int *)expect,
632 	    (u_int)src));
633 }
634 
635 static __inline u_long
636 atomic_fetchadd_long(volatile u_long *p, u_long v)
637 {
638 
639 	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
640 }
641 
642 static __inline int
643 atomic_testandset_long(volatile u_long *p, u_int v)
644 {
645 
646 	return (atomic_testandset_int((volatile u_int *)p, v));
647 }
648 
649 static __inline int
650 atomic_testandclear_long(volatile u_long *p, u_int v)
651 {
652 
653 	return (atomic_testandclear_int((volatile u_int *)p, v));
654 }
655 
656 /* Read the current value and store a new value in the destination. */
657 static __inline u_int
658 atomic_swap_int(volatile u_int *p, u_int v)
659 {
660 
661 	__asm __volatile(
662 	"	xchgl	%1,%0 ;		"
663 	"# atomic_swap_int"
664 	: "+r" (v),			/* 0 */
665 	  "+m" (*p));			/* 1 */
666 	return (v);
667 }
668 
669 static __inline u_long
670 atomic_swap_long(volatile u_long *p, u_long v)
671 {
672 
673 	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
674 }
675 
676 #define	atomic_set_acq_char		atomic_set_barr_char
677 #define	atomic_set_rel_char		atomic_set_barr_char
678 #define	atomic_clear_acq_char		atomic_clear_barr_char
679 #define	atomic_clear_rel_char		atomic_clear_barr_char
680 #define	atomic_add_acq_char		atomic_add_barr_char
681 #define	atomic_add_rel_char		atomic_add_barr_char
682 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
683 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
684 #define	atomic_cmpset_acq_char		atomic_cmpset_char
685 #define	atomic_cmpset_rel_char		atomic_cmpset_char
686 #define	atomic_fcmpset_acq_char		atomic_fcmpset_char
687 #define	atomic_fcmpset_rel_char		atomic_fcmpset_char
688 
689 #define	atomic_set_acq_short		atomic_set_barr_short
690 #define	atomic_set_rel_short		atomic_set_barr_short
691 #define	atomic_clear_acq_short		atomic_clear_barr_short
692 #define	atomic_clear_rel_short		atomic_clear_barr_short
693 #define	atomic_add_acq_short		atomic_add_barr_short
694 #define	atomic_add_rel_short		atomic_add_barr_short
695 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
696 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
697 #define	atomic_cmpset_acq_short		atomic_cmpset_short
698 #define	atomic_cmpset_rel_short		atomic_cmpset_short
699 #define	atomic_fcmpset_acq_short	atomic_fcmpset_short
700 #define	atomic_fcmpset_rel_short	atomic_fcmpset_short
701 
702 #define	atomic_set_acq_int		atomic_set_barr_int
703 #define	atomic_set_rel_int		atomic_set_barr_int
704 #define	atomic_clear_acq_int		atomic_clear_barr_int
705 #define	atomic_clear_rel_int		atomic_clear_barr_int
706 #define	atomic_add_acq_int		atomic_add_barr_int
707 #define	atomic_add_rel_int		atomic_add_barr_int
708 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
709 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
710 #define	atomic_cmpset_acq_int		atomic_cmpset_int
711 #define	atomic_cmpset_rel_int		atomic_cmpset_int
712 #define	atomic_fcmpset_acq_int		atomic_fcmpset_int
713 #define	atomic_fcmpset_rel_int		atomic_fcmpset_int
714 
715 #define	atomic_set_acq_long		atomic_set_barr_long
716 #define	atomic_set_rel_long		atomic_set_barr_long
717 #define	atomic_clear_acq_long		atomic_clear_barr_long
718 #define	atomic_clear_rel_long		atomic_clear_barr_long
719 #define	atomic_add_acq_long		atomic_add_barr_long
720 #define	atomic_add_rel_long		atomic_add_barr_long
721 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
722 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
723 #define	atomic_cmpset_acq_long		atomic_cmpset_long
724 #define	atomic_cmpset_rel_long		atomic_cmpset_long
725 #define	atomic_fcmpset_acq_long		atomic_fcmpset_long
726 #define	atomic_fcmpset_rel_long		atomic_fcmpset_long
727 
728 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
729 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
730 #define	atomic_testandset_acq_long	atomic_testandset_long
731 
732 /* Operations on 8-bit bytes. */
733 #define	atomic_set_8		atomic_set_char
734 #define	atomic_set_acq_8	atomic_set_acq_char
735 #define	atomic_set_rel_8	atomic_set_rel_char
736 #define	atomic_clear_8		atomic_clear_char
737 #define	atomic_clear_acq_8	atomic_clear_acq_char
738 #define	atomic_clear_rel_8	atomic_clear_rel_char
739 #define	atomic_add_8		atomic_add_char
740 #define	atomic_add_acq_8	atomic_add_acq_char
741 #define	atomic_add_rel_8	atomic_add_rel_char
742 #define	atomic_subtract_8	atomic_subtract_char
743 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
744 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
745 #define	atomic_load_acq_8	atomic_load_acq_char
746 #define	atomic_store_rel_8	atomic_store_rel_char
747 #define	atomic_cmpset_8		atomic_cmpset_char
748 #define	atomic_cmpset_acq_8	atomic_cmpset_acq_char
749 #define	atomic_cmpset_rel_8	atomic_cmpset_rel_char
750 #define	atomic_fcmpset_8	atomic_fcmpset_char
751 #define	atomic_fcmpset_acq_8	atomic_fcmpset_acq_char
752 #define	atomic_fcmpset_rel_8	atomic_fcmpset_rel_char
753 
754 /* Operations on 16-bit words. */
755 #define	atomic_set_16		atomic_set_short
756 #define	atomic_set_acq_16	atomic_set_acq_short
757 #define	atomic_set_rel_16	atomic_set_rel_short
758 #define	atomic_clear_16		atomic_clear_short
759 #define	atomic_clear_acq_16	atomic_clear_acq_short
760 #define	atomic_clear_rel_16	atomic_clear_rel_short
761 #define	atomic_add_16		atomic_add_short
762 #define	atomic_add_acq_16	atomic_add_acq_short
763 #define	atomic_add_rel_16	atomic_add_rel_short
764 #define	atomic_subtract_16	atomic_subtract_short
765 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
766 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
767 #define	atomic_load_acq_16	atomic_load_acq_short
768 #define	atomic_store_rel_16	atomic_store_rel_short
769 #define	atomic_cmpset_16	atomic_cmpset_short
770 #define	atomic_cmpset_acq_16	atomic_cmpset_acq_short
771 #define	atomic_cmpset_rel_16	atomic_cmpset_rel_short
772 #define	atomic_fcmpset_16	atomic_fcmpset_short
773 #define	atomic_fcmpset_acq_16	atomic_fcmpset_acq_short
774 #define	atomic_fcmpset_rel_16	atomic_fcmpset_rel_short
775 
776 /* Operations on 32-bit double words. */
777 #define	atomic_set_32		atomic_set_int
778 #define	atomic_set_acq_32	atomic_set_acq_int
779 #define	atomic_set_rel_32	atomic_set_rel_int
780 #define	atomic_clear_32		atomic_clear_int
781 #define	atomic_clear_acq_32	atomic_clear_acq_int
782 #define	atomic_clear_rel_32	atomic_clear_rel_int
783 #define	atomic_add_32		atomic_add_int
784 #define	atomic_add_acq_32	atomic_add_acq_int
785 #define	atomic_add_rel_32	atomic_add_rel_int
786 #define	atomic_subtract_32	atomic_subtract_int
787 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
788 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
789 #define	atomic_load_acq_32	atomic_load_acq_int
790 #define	atomic_store_rel_32	atomic_store_rel_int
791 #define	atomic_cmpset_32	atomic_cmpset_int
792 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
793 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
794 #define	atomic_fcmpset_32	atomic_fcmpset_int
795 #define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
796 #define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
797 #define	atomic_swap_32		atomic_swap_int
798 #define	atomic_readandclear_32	atomic_readandclear_int
799 #define	atomic_fetchadd_32	atomic_fetchadd_int
800 #define	atomic_testandset_32	atomic_testandset_int
801 #define	atomic_testandclear_32	atomic_testandclear_int
802 
803 #ifdef _KERNEL
804 /* Operations on 64-bit quad words. */
805 #define	atomic_cmpset_acq_64 atomic_cmpset_64
806 #define	atomic_cmpset_rel_64 atomic_cmpset_64
807 #define	atomic_fcmpset_acq_64 atomic_fcmpset_64
808 #define	atomic_fcmpset_rel_64 atomic_fcmpset_64
809 #define	atomic_fetchadd_acq_64	atomic_fetchadd_64
810 #define	atomic_fetchadd_rel_64	atomic_fetchadd_64
811 #define	atomic_add_acq_64 atomic_add_64
812 #define	atomic_add_rel_64 atomic_add_64
813 #define	atomic_subtract_acq_64 atomic_subtract_64
814 #define	atomic_subtract_rel_64 atomic_subtract_64
815 #define	atomic_load_64 atomic_load_acq_64
816 #define	atomic_store_64 atomic_store_rel_64
817 #endif
818 
819 /* Operations on pointers. */
820 #define	atomic_set_ptr(p, v) \
821 	atomic_set_int((volatile u_int *)(p), (u_int)(v))
822 #define	atomic_set_acq_ptr(p, v) \
823 	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
824 #define	atomic_set_rel_ptr(p, v) \
825 	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
826 #define	atomic_clear_ptr(p, v) \
827 	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
828 #define	atomic_clear_acq_ptr(p, v) \
829 	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
830 #define	atomic_clear_rel_ptr(p, v) \
831 	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
832 #define	atomic_add_ptr(p, v) \
833 	atomic_add_int((volatile u_int *)(p), (u_int)(v))
834 #define	atomic_add_acq_ptr(p, v) \
835 	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
836 #define	atomic_add_rel_ptr(p, v) \
837 	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
838 #define	atomic_subtract_ptr(p, v) \
839 	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
840 #define	atomic_subtract_acq_ptr(p, v) \
841 	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
842 #define	atomic_subtract_rel_ptr(p, v) \
843 	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
844 #define	atomic_load_acq_ptr(p) \
845 	atomic_load_acq_int((volatile u_int *)(p))
846 #define	atomic_store_rel_ptr(p, v) \
847 	atomic_store_rel_int((volatile u_int *)(p), (v))
848 #define	atomic_cmpset_ptr(dst, old, new) \
849 	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
850 #define	atomic_cmpset_acq_ptr(dst, old, new) \
851 	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
852 	    (u_int)(new))
853 #define	atomic_cmpset_rel_ptr(dst, old, new) \
854 	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
855 	    (u_int)(new))
856 #define	atomic_fcmpset_ptr(dst, old, new) \
857 	atomic_fcmpset_int((volatile u_int *)(dst), (u_int *)(old), (u_int)(new))
858 #define	atomic_fcmpset_acq_ptr(dst, old, new) \
859 	atomic_fcmpset_acq_int((volatile u_int *)(dst), (u_int *)(old), \
860 	    (u_int)(new))
861 #define	atomic_fcmpset_rel_ptr(dst, old, new) \
862 	atomic_fcmpset_rel_int((volatile u_int *)(dst), (u_int *)(old), \
863 	    (u_int)(new))
864 #define	atomic_swap_ptr(p, v) \
865 	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
866 #define	atomic_readandclear_ptr(p) \
867 	atomic_readandclear_int((volatile u_int *)(p))
868 
869 #endif /* !WANT_FUNCTIONS */
870 
871 #if defined(_KERNEL)
872 #define	mb()	__mbk()
873 #define	wmb()	__mbk()
874 #define	rmb()	__mbk()
875 #else
876 #define	mb()	__mbu()
877 #define	wmb()	__mbu()
878 #define	rmb()	__mbu()
879 #endif
880 
881 #endif /* !_MACHINE_ATOMIC_H_ */
882