xref: /freebsd/sys/i386/include/atomic.h (revision dd5b64258f6d85a771ac71e215d8490bf80a2044)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define	_MACHINE_ATOMIC_H_
30 
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34 
35 #ifdef _KERNEL
36 #include <machine/md_var.h>
37 #include <machine/specialreg.h>
38 #endif
39 
40 #define	mb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
41 #define	wmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
42 #define	rmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
43 
44 /*
45  * Various simple operations on memory, each of which is atomic in the
46  * presence of interrupts and multiple processors.
47  *
48  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
49  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
50  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
51  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
52  *
53  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
54  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
55  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
56  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
57  *
58  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
59  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
60  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
61  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
62  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
63  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
64  *
65  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
66  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
67  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
68  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
69  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
70  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
71  */
72 
73 /*
74  * The above functions are expanded inline in the statically-linked
75  * kernel.  Lock prefixes are generated if an SMP kernel is being
76  * built.
77  *
78  * Kernel modules call real functions which are built into the kernel.
79  * This allows kernel modules to be portable between UP and SMP systems.
80  */
81 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
82 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
83 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
84 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
85 
86 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
87 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
88 int	atomic_testandset_int(volatile u_int *p, u_int v);
89 void	atomic_thread_fence_acq(void);
90 void	atomic_thread_fence_acq_rel(void);
91 void	atomic_thread_fence_rel(void);
92 void	atomic_thread_fence_seq_cst(void);
93 
94 #define	ATOMIC_LOAD(TYPE)					\
95 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
96 #define	ATOMIC_STORE(TYPE)					\
97 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
98 
99 int		atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
100 uint64_t	atomic_load_acq_64(volatile uint64_t *);
101 void		atomic_store_rel_64(volatile uint64_t *, uint64_t);
102 uint64_t	atomic_swap_64(volatile uint64_t *, uint64_t);
103 
104 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
105 
106 /*
107  * For userland, always use lock prefixes so that the binaries will run
108  * on both SMP and !SMP systems.
109  */
110 #if defined(SMP) || !defined(_KERNEL)
111 #define	MPLOCKED	"lock ; "
112 #else
113 #define	MPLOCKED
114 #endif
115 
116 /*
117  * The assembly is volatilized to avoid code chunk removal by the compiler.
118  * GCC aggressively reorders operations and memory clobbering is necessary
119  * in order to avoid that for memory barriers.
120  */
121 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
122 static __inline void					\
123 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
124 {							\
125 	__asm __volatile(MPLOCKED OP			\
126 	: "+m" (*p)					\
127 	: CONS (V)					\
128 	: "cc");					\
129 }							\
130 							\
131 static __inline void					\
132 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
133 {							\
134 	__asm __volatile(MPLOCKED OP			\
135 	: "+m" (*p)					\
136 	: CONS (V)					\
137 	: "memory", "cc");				\
138 }							\
139 struct __hack
140 
141 /*
142  * Atomic compare and set, used by the mutex functions
143  *
144  * if (*dst == expect) *dst = src (all 32 bit words)
145  *
146  * Returns 0 on failure, non-zero on success
147  */
148 
149 #ifdef CPU_DISABLE_CMPXCHG
150 
151 static __inline int
152 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
153 {
154 	u_char res;
155 
156 	__asm __volatile(
157 	"	pushfl ;		"
158 	"	cli ;			"
159 	"	cmpl	%3,%1 ;		"
160 	"	jne	1f ;		"
161 	"	movl	%2,%1 ;		"
162 	"1:				"
163 	"       sete	%0 ;		"
164 	"	popfl ;			"
165 	"# atomic_cmpset_int"
166 	: "=q" (res),			/* 0 */
167 	  "+m" (*dst)			/* 1 */
168 	: "r" (src),			/* 2 */
169 	  "r" (expect)			/* 3 */
170 	: "memory");
171 	return (res);
172 }
173 
174 #else /* !CPU_DISABLE_CMPXCHG */
175 
176 static __inline int
177 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
178 {
179 	u_char res;
180 
181 	__asm __volatile(
182 	"	" MPLOCKED "		"
183 	"	cmpxchgl %3,%1 ;	"
184 	"       sete	%0 ;		"
185 	"# atomic_cmpset_int"
186 	: "=q" (res),			/* 0 */
187 	  "+m" (*dst),			/* 1 */
188 	  "+a" (expect)			/* 2 */
189 	: "r" (src)			/* 3 */
190 	: "memory", "cc");
191 	return (res);
192 }
193 
194 #endif /* CPU_DISABLE_CMPXCHG */
195 
196 /*
197  * Atomically add the value of v to the integer pointed to by p and return
198  * the previous value of *p.
199  */
200 static __inline u_int
201 atomic_fetchadd_int(volatile u_int *p, u_int v)
202 {
203 
204 	__asm __volatile(
205 	"	" MPLOCKED "		"
206 	"	xaddl	%0,%1 ;		"
207 	"# atomic_fetchadd_int"
208 	: "+r" (v),			/* 0 */
209 	  "+m" (*p)			/* 1 */
210 	: : "cc");
211 	return (v);
212 }
213 
214 static __inline int
215 atomic_testandset_int(volatile u_int *p, u_int v)
216 {
217 	u_char res;
218 
219 	__asm __volatile(
220 	"	" MPLOCKED "		"
221 	"	btsl	%2,%1 ;		"
222 	"	setc	%0 ;		"
223 	"# atomic_testandset_int"
224 	: "=q" (res),			/* 0 */
225 	  "+m" (*p)			/* 1 */
226 	: "Ir" (v & 0x1f)		/* 2 */
227 	: "cc");
228 	return (res);
229 }
230 
231 /*
232  * We assume that a = b will do atomic loads and stores.  Due to the
233  * IA32 memory model, a simple store guarantees release semantics.
234  *
235  * However, a load may pass a store if they are performed on distinct
236  * addresses, so we need Store/Load barrier for sequentially
237  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
238  * Store/Load barrier, as recommended by the AMD Software Optimization
239  * Guide, and not mfence.  In the kernel, we use a private per-cpu
240  * cache line as the target for the locked addition, to avoid
241  * introducing false data dependencies.  In userspace, a word at the
242  * top of the stack is utilized.
243  *
244  * For UP kernels, however, the memory of the single processor is
245  * always consistent, so we only need to stop the compiler from
246  * reordering accesses in a way that violates the semantics of acquire
247  * and release.
248  */
249 #if defined(_KERNEL)
250 
251 /*
252  * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
253  *
254  * The open-coded number is used instead of the symbolic expression to
255  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
256  * An assertion in i386/vm_machdep.c ensures that the value is correct.
257  */
258 #define	OFFSETOF_MONITORBUF	0x180
259 
260 #if defined(SMP)
261 static __inline void
262 __storeload_barrier(void)
263 {
264 
265 	__asm __volatile("lock; addl $0,%%fs:%0"
266 	    : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
267 }
268 #else /* _KERNEL && UP */
269 static __inline void
270 __storeload_barrier(void)
271 {
272 
273 	__compiler_membar();
274 }
275 #endif /* SMP */
276 #else /* !_KERNEL */
277 static __inline void
278 __storeload_barrier(void)
279 {
280 
281 	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
282 }
283 #endif /* _KERNEL*/
284 
285 #define	ATOMIC_LOAD(TYPE)					\
286 static __inline u_##TYPE					\
287 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
288 {								\
289 	u_##TYPE res;						\
290 								\
291 	res = *p;						\
292 	__compiler_membar();					\
293 	return (res);						\
294 }								\
295 struct __hack
296 
297 #define	ATOMIC_STORE(TYPE)					\
298 static __inline void						\
299 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
300 {								\
301 								\
302 	__compiler_membar();					\
303 	*p = v;							\
304 }								\
305 struct __hack
306 
307 static __inline void
308 atomic_thread_fence_acq(void)
309 {
310 
311 	__compiler_membar();
312 }
313 
314 static __inline void
315 atomic_thread_fence_rel(void)
316 {
317 
318 	__compiler_membar();
319 }
320 
321 static __inline void
322 atomic_thread_fence_acq_rel(void)
323 {
324 
325 	__compiler_membar();
326 }
327 
328 static __inline void
329 atomic_thread_fence_seq_cst(void)
330 {
331 
332 	__storeload_barrier();
333 }
334 
335 #ifdef _KERNEL
336 
337 #ifdef WANT_FUNCTIONS
338 int		atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
339 int		atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
340 uint64_t	atomic_load_acq_64_i386(volatile uint64_t *);
341 uint64_t	atomic_load_acq_64_i586(volatile uint64_t *);
342 void		atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
343 void		atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
344 uint64_t	atomic_swap_64_i386(volatile uint64_t *, uint64_t);
345 uint64_t	atomic_swap_64_i586(volatile uint64_t *, uint64_t);
346 #endif
347 
348 /* I486 does not support SMP or CMPXCHG8B. */
349 static __inline int
350 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
351 {
352 	volatile uint32_t *p;
353 	u_char res;
354 
355 	p = (volatile uint32_t *)dst;
356 	__asm __volatile(
357 	"	pushfl ;		"
358 	"	cli ;			"
359 	"	xorl	%1,%%eax ;	"
360 	"	xorl	%2,%%edx ;	"
361 	"	orl	%%edx,%%eax ;	"
362 	"	jne	1f ;		"
363 	"	movl	%4,%1 ;		"
364 	"	movl	%5,%2 ;		"
365 	"1:				"
366 	"	sete	%3 ;		"
367 	"	popfl"
368 	: "+A" (expect),		/* 0 */
369 	  "+m" (*p),			/* 1 */
370 	  "+m" (*(p + 1)),		/* 2 */
371 	  "=q" (res)			/* 3 */
372 	: "r" ((uint32_t)src),		/* 4 */
373 	  "r" ((uint32_t)(src >> 32))	/* 5 */
374 	: "memory", "cc");
375 	return (res);
376 }
377 
378 static __inline uint64_t
379 atomic_load_acq_64_i386(volatile uint64_t *p)
380 {
381 	volatile uint32_t *q;
382 	uint64_t res;
383 
384 	q = (volatile uint32_t *)p;
385 	__asm __volatile(
386 	"	pushfl ;		"
387 	"	cli ;			"
388 	"	movl	%1,%%eax ;	"
389 	"	movl	%2,%%edx ;	"
390 	"	popfl"
391 	: "=&A" (res)			/* 0 */
392 	: "m" (*q),			/* 1 */
393 	  "m" (*(q + 1))		/* 2 */
394 	: "memory");
395 	return (res);
396 }
397 
398 static __inline void
399 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
400 {
401 	volatile uint32_t *q;
402 
403 	q = (volatile uint32_t *)p;
404 	__asm __volatile(
405 	"	pushfl ;		"
406 	"	cli ;			"
407 	"	movl	%%eax,%0 ;	"
408 	"	movl	%%edx,%1 ;	"
409 	"	popfl"
410 	: "=m" (*q),			/* 0 */
411 	  "=m" (*(q + 1))		/* 1 */
412 	: "A" (v)			/* 2 */
413 	: "memory");
414 }
415 
416 static __inline uint64_t
417 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
418 {
419 	volatile uint32_t *q;
420 	uint64_t res;
421 
422 	q = (volatile uint32_t *)p;
423 	__asm __volatile(
424 	"	pushfl ;		"
425 	"	cli ;			"
426 	"	movl	%1,%%eax ;	"
427 	"	movl	%2,%%edx ;	"
428 	"	movl	%4,%2 ;		"
429 	"	movl	%3,%1 ;		"
430 	"	popfl"
431 	: "=&A" (res),			/* 0 */
432 	  "+m" (*q),			/* 1 */
433 	  "+m" (*(q + 1))		/* 2 */
434 	: "r" ((uint32_t)v),		/* 3 */
435 	  "r" ((uint32_t)(v >> 32)));	/* 4 */
436 	return (res);
437 }
438 
439 static __inline int
440 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
441 {
442 	u_char res;
443 
444 	__asm __volatile(
445 	"	" MPLOCKED "		"
446 	"	cmpxchg8b %1 ;		"
447 	"	sete	%0"
448 	: "=q" (res),			/* 0 */
449 	  "+m" (*dst),			/* 1 */
450 	  "+A" (expect)			/* 2 */
451 	: "b" ((uint32_t)src),		/* 3 */
452 	  "c" ((uint32_t)(src >> 32))	/* 4 */
453 	: "memory", "cc");
454 	return (res);
455 }
456 
457 static __inline uint64_t
458 atomic_load_acq_64_i586(volatile uint64_t *p)
459 {
460 	uint64_t res;
461 
462 	__asm __volatile(
463 	"	movl	%%ebx,%%eax ;	"
464 	"	movl	%%ecx,%%edx ;	"
465 	"	" MPLOCKED "		"
466 	"	cmpxchg8b %1"
467 	: "=&A" (res),			/* 0 */
468 	  "+m" (*p)			/* 1 */
469 	: : "memory", "cc");
470 	return (res);
471 }
472 
473 static __inline void
474 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
475 {
476 
477 	__asm __volatile(
478 	"	movl	%%eax,%%ebx ;	"
479 	"	movl	%%edx,%%ecx ;	"
480 	"1:				"
481 	"	" MPLOCKED "		"
482 	"	cmpxchg8b %0 ;		"
483 	"	jne	1b"
484 	: "+m" (*p),			/* 0 */
485 	  "+A" (v)			/* 1 */
486 	: : "ebx", "ecx", "memory", "cc");
487 }
488 
489 static __inline uint64_t
490 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
491 {
492 
493 	__asm __volatile(
494 	"	movl	%%eax,%%ebx ;	"
495 	"	movl	%%edx,%%ecx ;	"
496 	"1:				"
497 	"	" MPLOCKED "		"
498 	"	cmpxchg8b %0 ;		"
499 	"	jne	1b"
500 	: "+m" (*p),			/* 0 */
501 	  "+A" (v)			/* 1 */
502 	: : "ebx", "ecx", "memory", "cc");
503 	return (v);
504 }
505 
506 static __inline int
507 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
508 {
509 
510 	if ((cpu_feature & CPUID_CX8) == 0)
511 		return (atomic_cmpset_64_i386(dst, expect, src));
512 	else
513 		return (atomic_cmpset_64_i586(dst, expect, src));
514 }
515 
516 static __inline uint64_t
517 atomic_load_acq_64(volatile uint64_t *p)
518 {
519 
520 	if ((cpu_feature & CPUID_CX8) == 0)
521 		return (atomic_load_acq_64_i386(p));
522 	else
523 		return (atomic_load_acq_64_i586(p));
524 }
525 
526 static __inline void
527 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
528 {
529 
530 	if ((cpu_feature & CPUID_CX8) == 0)
531 		atomic_store_rel_64_i386(p, v);
532 	else
533 		atomic_store_rel_64_i586(p, v);
534 }
535 
536 static __inline uint64_t
537 atomic_swap_64(volatile uint64_t *p, uint64_t v)
538 {
539 
540 	if ((cpu_feature & CPUID_CX8) == 0)
541 		return (atomic_swap_64_i386(p, v));
542 	else
543 		return (atomic_swap_64_i586(p, v));
544 }
545 
546 #endif /* _KERNEL */
547 
548 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
549 
550 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
551 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
552 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
553 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
554 
555 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
556 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
557 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
558 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
559 
560 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
561 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
562 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
563 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
564 
565 ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
566 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
567 ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
568 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
569 
570 #define	ATOMIC_LOADSTORE(TYPE)				\
571 	ATOMIC_LOAD(TYPE);				\
572 	ATOMIC_STORE(TYPE)
573 
574 ATOMIC_LOADSTORE(char);
575 ATOMIC_LOADSTORE(short);
576 ATOMIC_LOADSTORE(int);
577 ATOMIC_LOADSTORE(long);
578 
579 #undef ATOMIC_ASM
580 #undef ATOMIC_LOAD
581 #undef ATOMIC_STORE
582 #undef ATOMIC_LOADSTORE
583 
584 #ifndef WANT_FUNCTIONS
585 
586 static __inline int
587 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
588 {
589 
590 	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
591 	    (u_int)src));
592 }
593 
594 static __inline u_long
595 atomic_fetchadd_long(volatile u_long *p, u_long v)
596 {
597 
598 	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
599 }
600 
601 static __inline int
602 atomic_testandset_long(volatile u_long *p, u_int v)
603 {
604 
605 	return (atomic_testandset_int((volatile u_int *)p, v));
606 }
607 
608 /* Read the current value and store a new value in the destination. */
609 #ifdef __GNUCLIKE_ASM
610 
611 static __inline u_int
612 atomic_swap_int(volatile u_int *p, u_int v)
613 {
614 
615 	__asm __volatile(
616 	"	xchgl	%1,%0 ;		"
617 	"# atomic_swap_int"
618 	: "+r" (v),			/* 0 */
619 	  "+m" (*p));			/* 1 */
620 	return (v);
621 }
622 
623 static __inline u_long
624 atomic_swap_long(volatile u_long *p, u_long v)
625 {
626 
627 	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
628 }
629 
630 #else /* !__GNUCLIKE_ASM */
631 
632 u_int	atomic_swap_int(volatile u_int *p, u_int v);
633 u_long	atomic_swap_long(volatile u_long *p, u_long v);
634 
635 #endif /* __GNUCLIKE_ASM */
636 
637 #define	atomic_set_acq_char		atomic_set_barr_char
638 #define	atomic_set_rel_char		atomic_set_barr_char
639 #define	atomic_clear_acq_char		atomic_clear_barr_char
640 #define	atomic_clear_rel_char		atomic_clear_barr_char
641 #define	atomic_add_acq_char		atomic_add_barr_char
642 #define	atomic_add_rel_char		atomic_add_barr_char
643 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
644 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
645 
646 #define	atomic_set_acq_short		atomic_set_barr_short
647 #define	atomic_set_rel_short		atomic_set_barr_short
648 #define	atomic_clear_acq_short		atomic_clear_barr_short
649 #define	atomic_clear_rel_short		atomic_clear_barr_short
650 #define	atomic_add_acq_short		atomic_add_barr_short
651 #define	atomic_add_rel_short		atomic_add_barr_short
652 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
653 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
654 
655 #define	atomic_set_acq_int		atomic_set_barr_int
656 #define	atomic_set_rel_int		atomic_set_barr_int
657 #define	atomic_clear_acq_int		atomic_clear_barr_int
658 #define	atomic_clear_rel_int		atomic_clear_barr_int
659 #define	atomic_add_acq_int		atomic_add_barr_int
660 #define	atomic_add_rel_int		atomic_add_barr_int
661 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
662 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
663 #define	atomic_cmpset_acq_int		atomic_cmpset_int
664 #define	atomic_cmpset_rel_int		atomic_cmpset_int
665 
666 #define	atomic_set_acq_long		atomic_set_barr_long
667 #define	atomic_set_rel_long		atomic_set_barr_long
668 #define	atomic_clear_acq_long		atomic_clear_barr_long
669 #define	atomic_clear_rel_long		atomic_clear_barr_long
670 #define	atomic_add_acq_long		atomic_add_barr_long
671 #define	atomic_add_rel_long		atomic_add_barr_long
672 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
673 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
674 #define	atomic_cmpset_acq_long		atomic_cmpset_long
675 #define	atomic_cmpset_rel_long		atomic_cmpset_long
676 
677 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
678 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
679 
680 /* Operations on 8-bit bytes. */
681 #define	atomic_set_8		atomic_set_char
682 #define	atomic_set_acq_8	atomic_set_acq_char
683 #define	atomic_set_rel_8	atomic_set_rel_char
684 #define	atomic_clear_8		atomic_clear_char
685 #define	atomic_clear_acq_8	atomic_clear_acq_char
686 #define	atomic_clear_rel_8	atomic_clear_rel_char
687 #define	atomic_add_8		atomic_add_char
688 #define	atomic_add_acq_8	atomic_add_acq_char
689 #define	atomic_add_rel_8	atomic_add_rel_char
690 #define	atomic_subtract_8	atomic_subtract_char
691 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
692 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
693 #define	atomic_load_acq_8	atomic_load_acq_char
694 #define	atomic_store_rel_8	atomic_store_rel_char
695 
696 /* Operations on 16-bit words. */
697 #define	atomic_set_16		atomic_set_short
698 #define	atomic_set_acq_16	atomic_set_acq_short
699 #define	atomic_set_rel_16	atomic_set_rel_short
700 #define	atomic_clear_16		atomic_clear_short
701 #define	atomic_clear_acq_16	atomic_clear_acq_short
702 #define	atomic_clear_rel_16	atomic_clear_rel_short
703 #define	atomic_add_16		atomic_add_short
704 #define	atomic_add_acq_16	atomic_add_acq_short
705 #define	atomic_add_rel_16	atomic_add_rel_short
706 #define	atomic_subtract_16	atomic_subtract_short
707 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
708 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
709 #define	atomic_load_acq_16	atomic_load_acq_short
710 #define	atomic_store_rel_16	atomic_store_rel_short
711 
712 /* Operations on 32-bit double words. */
713 #define	atomic_set_32		atomic_set_int
714 #define	atomic_set_acq_32	atomic_set_acq_int
715 #define	atomic_set_rel_32	atomic_set_rel_int
716 #define	atomic_clear_32		atomic_clear_int
717 #define	atomic_clear_acq_32	atomic_clear_acq_int
718 #define	atomic_clear_rel_32	atomic_clear_rel_int
719 #define	atomic_add_32		atomic_add_int
720 #define	atomic_add_acq_32	atomic_add_acq_int
721 #define	atomic_add_rel_32	atomic_add_rel_int
722 #define	atomic_subtract_32	atomic_subtract_int
723 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
724 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
725 #define	atomic_load_acq_32	atomic_load_acq_int
726 #define	atomic_store_rel_32	atomic_store_rel_int
727 #define	atomic_cmpset_32	atomic_cmpset_int
728 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
729 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
730 #define	atomic_swap_32		atomic_swap_int
731 #define	atomic_readandclear_32	atomic_readandclear_int
732 #define	atomic_fetchadd_32	atomic_fetchadd_int
733 #define	atomic_testandset_32	atomic_testandset_int
734 
735 /* Operations on pointers. */
736 #define	atomic_set_ptr(p, v) \
737 	atomic_set_int((volatile u_int *)(p), (u_int)(v))
738 #define	atomic_set_acq_ptr(p, v) \
739 	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
740 #define	atomic_set_rel_ptr(p, v) \
741 	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
742 #define	atomic_clear_ptr(p, v) \
743 	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
744 #define	atomic_clear_acq_ptr(p, v) \
745 	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
746 #define	atomic_clear_rel_ptr(p, v) \
747 	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
748 #define	atomic_add_ptr(p, v) \
749 	atomic_add_int((volatile u_int *)(p), (u_int)(v))
750 #define	atomic_add_acq_ptr(p, v) \
751 	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
752 #define	atomic_add_rel_ptr(p, v) \
753 	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
754 #define	atomic_subtract_ptr(p, v) \
755 	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
756 #define	atomic_subtract_acq_ptr(p, v) \
757 	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
758 #define	atomic_subtract_rel_ptr(p, v) \
759 	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
760 #define	atomic_load_acq_ptr(p) \
761 	atomic_load_acq_int((volatile u_int *)(p))
762 #define	atomic_store_rel_ptr(p, v) \
763 	atomic_store_rel_int((volatile u_int *)(p), (v))
764 #define	atomic_cmpset_ptr(dst, old, new) \
765 	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
766 #define	atomic_cmpset_acq_ptr(dst, old, new) \
767 	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
768 	    (u_int)(new))
769 #define	atomic_cmpset_rel_ptr(dst, old, new) \
770 	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
771 	    (u_int)(new))
772 #define	atomic_swap_ptr(p, v) \
773 	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
774 #define	atomic_readandclear_ptr(p) \
775 	atomic_readandclear_int((volatile u_int *)(p))
776 
777 #endif /* !WANT_FUNCTIONS */
778 
779 #endif /* !_MACHINE_ATOMIC_H_ */
780