xref: /freebsd/sys/i386/include/atomic.h (revision b78ee15e9f04ae15c3e1200df974473167524d17)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define	_MACHINE_ATOMIC_H_
30 
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34 
35 #ifdef _KERNEL
36 #include <machine/md_var.h>
37 #include <machine/specialreg.h>
38 #endif
39 
40 #define	mb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
41 #define	wmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
42 #define	rmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
43 
44 /*
45  * Various simple operations on memory, each of which is atomic in the
46  * presence of interrupts and multiple processors.
47  *
48  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
49  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
50  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
51  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
52  *
53  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
54  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
55  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
56  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
57  *
58  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
59  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
60  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
61  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
62  * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
63  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
64  *
65  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
66  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
67  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
68  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
69  * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
70  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
71  */
72 
73 /*
74  * The above functions are expanded inline in the statically-linked
75  * kernel.  Lock prefixes are generated if an SMP kernel is being
76  * built.
77  *
78  * Kernel modules call real functions which are built into the kernel.
79  * This allows kernel modules to be portable between UP and SMP systems.
80  */
81 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
82 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
83 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
84 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
85 
86 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
87 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
88 int	atomic_testandset_int(volatile u_int *p, u_int v);
89 
90 #define	ATOMIC_LOAD(TYPE)					\
91 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
92 #define	ATOMIC_STORE(TYPE)					\
93 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
94 
95 int		atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
96 uint64_t	atomic_load_acq_64(volatile uint64_t *);
97 void		atomic_store_rel_64(volatile uint64_t *, uint64_t);
98 uint64_t	atomic_swap_64(volatile uint64_t *, uint64_t);
99 
100 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
101 
102 /*
103  * For userland, always use lock prefixes so that the binaries will run
104  * on both SMP and !SMP systems.
105  */
106 #if defined(SMP) || !defined(_KERNEL)
107 #define	MPLOCKED	"lock ; "
108 #else
109 #define	MPLOCKED
110 #endif
111 
112 /*
113  * The assembly is volatilized to avoid code chunk removal by the compiler.
114  * GCC aggressively reorders operations and memory clobbering is necessary
115  * in order to avoid that for memory barriers.
116  */
117 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
118 static __inline void					\
119 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
120 {							\
121 	__asm __volatile(MPLOCKED OP			\
122 	: "+m" (*p)					\
123 	: CONS (V)					\
124 	: "cc");					\
125 }							\
126 							\
127 static __inline void					\
128 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
129 {							\
130 	__asm __volatile(MPLOCKED OP			\
131 	: "+m" (*p)					\
132 	: CONS (V)					\
133 	: "memory", "cc");				\
134 }							\
135 struct __hack
136 
137 /*
138  * Atomic compare and set, used by the mutex functions
139  *
140  * if (*dst == expect) *dst = src (all 32 bit words)
141  *
142  * Returns 0 on failure, non-zero on success
143  */
144 
145 #ifdef CPU_DISABLE_CMPXCHG
146 
147 static __inline int
148 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
149 {
150 	u_char res;
151 
152 	__asm __volatile(
153 	"	pushfl ;		"
154 	"	cli ;			"
155 	"	cmpl	%3,%1 ;		"
156 	"	jne	1f ;		"
157 	"	movl	%2,%1 ;		"
158 	"1:				"
159 	"       sete	%0 ;		"
160 	"	popfl ;			"
161 	"# atomic_cmpset_int"
162 	: "=q" (res),			/* 0 */
163 	  "+m" (*dst)			/* 1 */
164 	: "r" (src),			/* 2 */
165 	  "r" (expect)			/* 3 */
166 	: "memory");
167 	return (res);
168 }
169 
170 #else /* !CPU_DISABLE_CMPXCHG */
171 
172 static __inline int
173 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
174 {
175 	u_char res;
176 
177 	__asm __volatile(
178 	"	" MPLOCKED "		"
179 	"	cmpxchgl %3,%1 ;	"
180 	"       sete	%0 ;		"
181 	"# atomic_cmpset_int"
182 	: "=q" (res),			/* 0 */
183 	  "+m" (*dst),			/* 1 */
184 	  "+a" (expect)			/* 2 */
185 	: "r" (src)			/* 3 */
186 	: "memory", "cc");
187 	return (res);
188 }
189 
190 #endif /* CPU_DISABLE_CMPXCHG */
191 
192 /*
193  * Atomically add the value of v to the integer pointed to by p and return
194  * the previous value of *p.
195  */
196 static __inline u_int
197 atomic_fetchadd_int(volatile u_int *p, u_int v)
198 {
199 
200 	__asm __volatile(
201 	"	" MPLOCKED "		"
202 	"	xaddl	%0,%1 ;		"
203 	"# atomic_fetchadd_int"
204 	: "+r" (v),			/* 0 */
205 	  "+m" (*p)			/* 1 */
206 	: : "cc");
207 	return (v);
208 }
209 
210 static __inline int
211 atomic_testandset_int(volatile u_int *p, u_int v)
212 {
213 	u_char res;
214 
215 	__asm __volatile(
216 	"	" MPLOCKED "		"
217 	"	btsl	%2,%1 ;		"
218 	"	setc	%0 ;		"
219 	"# atomic_testandset_int"
220 	: "=q" (res),			/* 0 */
221 	  "+m" (*p)			/* 1 */
222 	: "Ir" (v & 0x1f)		/* 2 */
223 	: "cc");
224 	return (res);
225 }
226 
227 /*
228  * We assume that a = b will do atomic loads and stores.  Due to the
229  * IA32 memory model, a simple store guarantees release semantics.
230  *
231  * However, a load may pass a store if they are performed on distinct
232  * addresses, so for atomic_load_acq we introduce a Store/Load barrier
233  * before the load in SMP kernels.  We use "lock addl $0,mem", as
234  * recommended by the AMD Software Optimization Guide, and not mfence.
235  * In the kernel, we use a private per-cpu cache line as the target
236  * for the locked addition, to avoid introducing false data
237  * dependencies.  In userspace, a word at the top of the stack is
238  * utilized.
239  *
240  * For UP kernels, however, the memory of the single processor is
241  * always consistent, so we only need to stop the compiler from
242  * reordering accesses in a way that violates the semantics of acquire
243  * and release.
244  */
245 #if defined(_KERNEL)
246 
247 /*
248  * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
249  *
250  * The open-coded number is used instead of the symbolic expression to
251  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
252  * An assertion in i386/vm_machdep.c ensures that the value is correct.
253  */
254 #define	OFFSETOF_MONITORBUF	0x180
255 
256 #if defined(SMP)
257 static __inline void
258 __storeload_barrier(void)
259 {
260 
261 	__asm __volatile("lock; addl $0,%%fs:%0"
262 	    : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
263 }
264 #else /* _KERNEL && UP */
265 static __inline void
266 __storeload_barrier(void)
267 {
268 
269 	__compiler_membar();
270 }
271 #endif /* SMP */
272 #else /* !_KERNEL */
273 static __inline void
274 __storeload_barrier(void)
275 {
276 
277 	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
278 }
279 #endif /* _KERNEL*/
280 
281 /*
282  * C11-standard acq/rel semantics only apply when the variable in the
283  * call is the same for acq as it is for rel.  However, our previous
284  * (x86) implementations provided much stronger ordering than required
285  * (essentially what is called seq_cst order in C11).  This
286  * implementation provides the historical strong ordering since some
287  * callers depend on it.
288  */
289 
290 #define	ATOMIC_LOAD(TYPE)					\
291 static __inline u_##TYPE					\
292 atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
293 {								\
294 	u_##TYPE res;						\
295 								\
296 	__storeload_barrier();					\
297 	res = *p;						\
298 	__compiler_membar();					\
299 	return (res);						\
300 }								\
301 struct __hack
302 
303 #define	ATOMIC_STORE(TYPE)					\
304 static __inline void						\
305 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
306 {								\
307 								\
308 	__compiler_membar();					\
309 	*p = v;							\
310 }								\
311 struct __hack
312 
313 #ifdef _KERNEL
314 
315 #ifdef WANT_FUNCTIONS
316 int		atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
317 int		atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
318 uint64_t	atomic_load_acq_64_i386(volatile uint64_t *);
319 uint64_t	atomic_load_acq_64_i586(volatile uint64_t *);
320 void		atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
321 void		atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
322 uint64_t	atomic_swap_64_i386(volatile uint64_t *, uint64_t);
323 uint64_t	atomic_swap_64_i586(volatile uint64_t *, uint64_t);
324 #endif
325 
326 /* I486 does not support SMP or CMPXCHG8B. */
327 static __inline int
328 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
329 {
330 	volatile uint32_t *p;
331 	u_char res;
332 
333 	p = (volatile uint32_t *)dst;
334 	__asm __volatile(
335 	"	pushfl ;		"
336 	"	cli ;			"
337 	"	xorl	%1,%%eax ;	"
338 	"	xorl	%2,%%edx ;	"
339 	"	orl	%%edx,%%eax ;	"
340 	"	jne	1f ;		"
341 	"	movl	%4,%1 ;		"
342 	"	movl	%5,%2 ;		"
343 	"1:				"
344 	"	sete	%3 ;		"
345 	"	popfl"
346 	: "+A" (expect),		/* 0 */
347 	  "+m" (*p),			/* 1 */
348 	  "+m" (*(p + 1)),		/* 2 */
349 	  "=q" (res)			/* 3 */
350 	: "r" ((uint32_t)src),		/* 4 */
351 	  "r" ((uint32_t)(src >> 32))	/* 5 */
352 	: "memory", "cc");
353 	return (res);
354 }
355 
356 static __inline uint64_t
357 atomic_load_acq_64_i386(volatile uint64_t *p)
358 {
359 	volatile uint32_t *q;
360 	uint64_t res;
361 
362 	q = (volatile uint32_t *)p;
363 	__asm __volatile(
364 	"	pushfl ;		"
365 	"	cli ;			"
366 	"	movl	%1,%%eax ;	"
367 	"	movl	%2,%%edx ;	"
368 	"	popfl"
369 	: "=&A" (res)			/* 0 */
370 	: "m" (*q),			/* 1 */
371 	  "m" (*(q + 1))		/* 2 */
372 	: "memory");
373 	return (res);
374 }
375 
376 static __inline void
377 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
378 {
379 	volatile uint32_t *q;
380 
381 	q = (volatile uint32_t *)p;
382 	__asm __volatile(
383 	"	pushfl ;		"
384 	"	cli ;			"
385 	"	movl	%%eax,%0 ;	"
386 	"	movl	%%edx,%1 ;	"
387 	"	popfl"
388 	: "=m" (*q),			/* 0 */
389 	  "=m" (*(q + 1))		/* 1 */
390 	: "A" (v)			/* 2 */
391 	: "memory");
392 }
393 
394 static __inline uint64_t
395 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
396 {
397 	volatile uint32_t *q;
398 	uint64_t res;
399 
400 	q = (volatile uint32_t *)p;
401 	__asm __volatile(
402 	"	pushfl ;		"
403 	"	cli ;			"
404 	"	movl	%1,%%eax ;	"
405 	"	movl	%2,%%edx ;	"
406 	"	movl	%4,%2 ;		"
407 	"	movl	%3,%1 ;		"
408 	"	popfl"
409 	: "=&A" (res),			/* 0 */
410 	  "+m" (*q),			/* 1 */
411 	  "+m" (*(q + 1))		/* 2 */
412 	: "r" ((uint32_t)v),		/* 3 */
413 	  "r" ((uint32_t)(v >> 32)));	/* 4 */
414 	return (res);
415 }
416 
417 static __inline int
418 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
419 {
420 	u_char res;
421 
422 	__asm __volatile(
423 	"	" MPLOCKED "		"
424 	"	cmpxchg8b %1 ;		"
425 	"	sete	%0"
426 	: "=q" (res),			/* 0 */
427 	  "+m" (*dst),			/* 1 */
428 	  "+A" (expect)			/* 2 */
429 	: "b" ((uint32_t)src),		/* 3 */
430 	  "c" ((uint32_t)(src >> 32))	/* 4 */
431 	: "memory", "cc");
432 	return (res);
433 }
434 
435 static __inline uint64_t
436 atomic_load_acq_64_i586(volatile uint64_t *p)
437 {
438 	uint64_t res;
439 
440 	__asm __volatile(
441 	"	movl	%%ebx,%%eax ;	"
442 	"	movl	%%ecx,%%edx ;	"
443 	"	" MPLOCKED "		"
444 	"	cmpxchg8b %1"
445 	: "=&A" (res),			/* 0 */
446 	  "+m" (*p)			/* 1 */
447 	: : "memory", "cc");
448 	return (res);
449 }
450 
451 static __inline void
452 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
453 {
454 
455 	__asm __volatile(
456 	"	movl	%%eax,%%ebx ;	"
457 	"	movl	%%edx,%%ecx ;	"
458 	"1:				"
459 	"	" MPLOCKED "		"
460 	"	cmpxchg8b %0 ;		"
461 	"	jne	1b"
462 	: "+m" (*p),			/* 0 */
463 	  "+A" (v)			/* 1 */
464 	: : "ebx", "ecx", "memory", "cc");
465 }
466 
467 static __inline uint64_t
468 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
469 {
470 
471 	__asm __volatile(
472 	"	movl	%%eax,%%ebx ;	"
473 	"	movl	%%edx,%%ecx ;	"
474 	"1:				"
475 	"	" MPLOCKED "		"
476 	"	cmpxchg8b %0 ;		"
477 	"	jne	1b"
478 	: "+m" (*p),			/* 0 */
479 	  "+A" (v)			/* 1 */
480 	: : "ebx", "ecx", "memory", "cc");
481 	return (v);
482 }
483 
484 static __inline int
485 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
486 {
487 
488 	if ((cpu_feature & CPUID_CX8) == 0)
489 		return (atomic_cmpset_64_i386(dst, expect, src));
490 	else
491 		return (atomic_cmpset_64_i586(dst, expect, src));
492 }
493 
494 static __inline uint64_t
495 atomic_load_acq_64(volatile uint64_t *p)
496 {
497 
498 	if ((cpu_feature & CPUID_CX8) == 0)
499 		return (atomic_load_acq_64_i386(p));
500 	else
501 		return (atomic_load_acq_64_i586(p));
502 }
503 
504 static __inline void
505 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
506 {
507 
508 	if ((cpu_feature & CPUID_CX8) == 0)
509 		atomic_store_rel_64_i386(p, v);
510 	else
511 		atomic_store_rel_64_i586(p, v);
512 }
513 
514 static __inline uint64_t
515 atomic_swap_64(volatile uint64_t *p, uint64_t v)
516 {
517 
518 	if ((cpu_feature & CPUID_CX8) == 0)
519 		return (atomic_swap_64_i386(p, v));
520 	else
521 		return (atomic_swap_64_i586(p, v));
522 }
523 
524 #endif /* _KERNEL */
525 
526 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
527 
528 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
529 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
530 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
531 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
532 
533 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
534 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
535 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
536 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
537 
538 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
539 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
540 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
541 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
542 
543 ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
544 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
545 ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
546 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
547 
548 #define	ATOMIC_LOADSTORE(TYPE)				\
549 	ATOMIC_LOAD(TYPE);				\
550 	ATOMIC_STORE(TYPE)
551 
552 ATOMIC_LOADSTORE(char);
553 ATOMIC_LOADSTORE(short);
554 ATOMIC_LOADSTORE(int);
555 ATOMIC_LOADSTORE(long);
556 
557 #undef ATOMIC_ASM
558 #undef ATOMIC_LOAD
559 #undef ATOMIC_STORE
560 #undef ATOMIC_LOADSTORE
561 
562 #ifndef WANT_FUNCTIONS
563 
564 static __inline int
565 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
566 {
567 
568 	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
569 	    (u_int)src));
570 }
571 
572 static __inline u_long
573 atomic_fetchadd_long(volatile u_long *p, u_long v)
574 {
575 
576 	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
577 }
578 
579 static __inline int
580 atomic_testandset_long(volatile u_long *p, u_int v)
581 {
582 
583 	return (atomic_testandset_int((volatile u_int *)p, v));
584 }
585 
586 /* Read the current value and store a new value in the destination. */
587 #ifdef __GNUCLIKE_ASM
588 
589 static __inline u_int
590 atomic_swap_int(volatile u_int *p, u_int v)
591 {
592 
593 	__asm __volatile(
594 	"	xchgl	%1,%0 ;		"
595 	"# atomic_swap_int"
596 	: "+r" (v),			/* 0 */
597 	  "+m" (*p));			/* 1 */
598 	return (v);
599 }
600 
601 static __inline u_long
602 atomic_swap_long(volatile u_long *p, u_long v)
603 {
604 
605 	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
606 }
607 
608 #else /* !__GNUCLIKE_ASM */
609 
610 u_int	atomic_swap_int(volatile u_int *p, u_int v);
611 u_long	atomic_swap_long(volatile u_long *p, u_long v);
612 
613 #endif /* __GNUCLIKE_ASM */
614 
615 #define	atomic_set_acq_char		atomic_set_barr_char
616 #define	atomic_set_rel_char		atomic_set_barr_char
617 #define	atomic_clear_acq_char		atomic_clear_barr_char
618 #define	atomic_clear_rel_char		atomic_clear_barr_char
619 #define	atomic_add_acq_char		atomic_add_barr_char
620 #define	atomic_add_rel_char		atomic_add_barr_char
621 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
622 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
623 
624 #define	atomic_set_acq_short		atomic_set_barr_short
625 #define	atomic_set_rel_short		atomic_set_barr_short
626 #define	atomic_clear_acq_short		atomic_clear_barr_short
627 #define	atomic_clear_rel_short		atomic_clear_barr_short
628 #define	atomic_add_acq_short		atomic_add_barr_short
629 #define	atomic_add_rel_short		atomic_add_barr_short
630 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
631 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
632 
633 #define	atomic_set_acq_int		atomic_set_barr_int
634 #define	atomic_set_rel_int		atomic_set_barr_int
635 #define	atomic_clear_acq_int		atomic_clear_barr_int
636 #define	atomic_clear_rel_int		atomic_clear_barr_int
637 #define	atomic_add_acq_int		atomic_add_barr_int
638 #define	atomic_add_rel_int		atomic_add_barr_int
639 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
640 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
641 #define	atomic_cmpset_acq_int		atomic_cmpset_int
642 #define	atomic_cmpset_rel_int		atomic_cmpset_int
643 
644 #define	atomic_set_acq_long		atomic_set_barr_long
645 #define	atomic_set_rel_long		atomic_set_barr_long
646 #define	atomic_clear_acq_long		atomic_clear_barr_long
647 #define	atomic_clear_rel_long		atomic_clear_barr_long
648 #define	atomic_add_acq_long		atomic_add_barr_long
649 #define	atomic_add_rel_long		atomic_add_barr_long
650 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
651 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
652 #define	atomic_cmpset_acq_long		atomic_cmpset_long
653 #define	atomic_cmpset_rel_long		atomic_cmpset_long
654 
655 #define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
656 #define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
657 
658 /* Operations on 8-bit bytes. */
659 #define	atomic_set_8		atomic_set_char
660 #define	atomic_set_acq_8	atomic_set_acq_char
661 #define	atomic_set_rel_8	atomic_set_rel_char
662 #define	atomic_clear_8		atomic_clear_char
663 #define	atomic_clear_acq_8	atomic_clear_acq_char
664 #define	atomic_clear_rel_8	atomic_clear_rel_char
665 #define	atomic_add_8		atomic_add_char
666 #define	atomic_add_acq_8	atomic_add_acq_char
667 #define	atomic_add_rel_8	atomic_add_rel_char
668 #define	atomic_subtract_8	atomic_subtract_char
669 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
670 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
671 #define	atomic_load_acq_8	atomic_load_acq_char
672 #define	atomic_store_rel_8	atomic_store_rel_char
673 
674 /* Operations on 16-bit words. */
675 #define	atomic_set_16		atomic_set_short
676 #define	atomic_set_acq_16	atomic_set_acq_short
677 #define	atomic_set_rel_16	atomic_set_rel_short
678 #define	atomic_clear_16		atomic_clear_short
679 #define	atomic_clear_acq_16	atomic_clear_acq_short
680 #define	atomic_clear_rel_16	atomic_clear_rel_short
681 #define	atomic_add_16		atomic_add_short
682 #define	atomic_add_acq_16	atomic_add_acq_short
683 #define	atomic_add_rel_16	atomic_add_rel_short
684 #define	atomic_subtract_16	atomic_subtract_short
685 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
686 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
687 #define	atomic_load_acq_16	atomic_load_acq_short
688 #define	atomic_store_rel_16	atomic_store_rel_short
689 
690 /* Operations on 32-bit double words. */
691 #define	atomic_set_32		atomic_set_int
692 #define	atomic_set_acq_32	atomic_set_acq_int
693 #define	atomic_set_rel_32	atomic_set_rel_int
694 #define	atomic_clear_32		atomic_clear_int
695 #define	atomic_clear_acq_32	atomic_clear_acq_int
696 #define	atomic_clear_rel_32	atomic_clear_rel_int
697 #define	atomic_add_32		atomic_add_int
698 #define	atomic_add_acq_32	atomic_add_acq_int
699 #define	atomic_add_rel_32	atomic_add_rel_int
700 #define	atomic_subtract_32	atomic_subtract_int
701 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
702 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
703 #define	atomic_load_acq_32	atomic_load_acq_int
704 #define	atomic_store_rel_32	atomic_store_rel_int
705 #define	atomic_cmpset_32	atomic_cmpset_int
706 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
707 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
708 #define	atomic_swap_32		atomic_swap_int
709 #define	atomic_readandclear_32	atomic_readandclear_int
710 #define	atomic_fetchadd_32	atomic_fetchadd_int
711 #define	atomic_testandset_32	atomic_testandset_int
712 
713 /* Operations on pointers. */
714 #define	atomic_set_ptr(p, v) \
715 	atomic_set_int((volatile u_int *)(p), (u_int)(v))
716 #define	atomic_set_acq_ptr(p, v) \
717 	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
718 #define	atomic_set_rel_ptr(p, v) \
719 	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
720 #define	atomic_clear_ptr(p, v) \
721 	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
722 #define	atomic_clear_acq_ptr(p, v) \
723 	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
724 #define	atomic_clear_rel_ptr(p, v) \
725 	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
726 #define	atomic_add_ptr(p, v) \
727 	atomic_add_int((volatile u_int *)(p), (u_int)(v))
728 #define	atomic_add_acq_ptr(p, v) \
729 	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
730 #define	atomic_add_rel_ptr(p, v) \
731 	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
732 #define	atomic_subtract_ptr(p, v) \
733 	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
734 #define	atomic_subtract_acq_ptr(p, v) \
735 	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
736 #define	atomic_subtract_rel_ptr(p, v) \
737 	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
738 #define	atomic_load_acq_ptr(p) \
739 	atomic_load_acq_int((volatile u_int *)(p))
740 #define	atomic_store_rel_ptr(p, v) \
741 	atomic_store_rel_int((volatile u_int *)(p), (v))
742 #define	atomic_cmpset_ptr(dst, old, new) \
743 	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
744 #define	atomic_cmpset_acq_ptr(dst, old, new) \
745 	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
746 	    (u_int)(new))
747 #define	atomic_cmpset_rel_ptr(dst, old, new) \
748 	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
749 	    (u_int)(new))
750 #define	atomic_swap_ptr(p, v) \
751 	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
752 #define	atomic_readandclear_ptr(p) \
753 	atomic_readandclear_int((volatile u_int *)(p))
754 
755 #endif /* !WANT_FUNCTIONS */
756 
757 #endif /* !_MACHINE_ATOMIC_H_ */
758