1069e9bc1SDoug Rabson /*- 2069e9bc1SDoug Rabson * Copyright (c) 1998 Doug Rabson 3069e9bc1SDoug Rabson * All rights reserved. 4069e9bc1SDoug Rabson * 5069e9bc1SDoug Rabson * Redistribution and use in source and binary forms, with or without 6069e9bc1SDoug Rabson * modification, are permitted provided that the following conditions 7069e9bc1SDoug Rabson * are met: 8069e9bc1SDoug Rabson * 1. Redistributions of source code must retain the above copyright 9069e9bc1SDoug Rabson * notice, this list of conditions and the following disclaimer. 10069e9bc1SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 11069e9bc1SDoug Rabson * notice, this list of conditions and the following disclaimer in the 12069e9bc1SDoug Rabson * documentation and/or other materials provided with the distribution. 13069e9bc1SDoug Rabson * 14069e9bc1SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15069e9bc1SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16069e9bc1SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17069e9bc1SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18069e9bc1SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19069e9bc1SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20069e9bc1SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21069e9bc1SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22069e9bc1SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23069e9bc1SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24069e9bc1SDoug Rabson * SUCH DAMAGE. 25069e9bc1SDoug Rabson * 26c3aac50fSPeter Wemm * $FreeBSD$ 27069e9bc1SDoug Rabson */ 28069e9bc1SDoug Rabson #ifndef _MACHINE_ATOMIC_H_ 29069e9bc1SDoug Rabson #define _MACHINE_ATOMIC_H_ 30069e9bc1SDoug Rabson 31a5f50ef9SJoerg Wunsch #ifndef _SYS_CDEFS_H_ 32a5f50ef9SJoerg Wunsch #error this file needs sys/cdefs.h as a prerequisite 33a5f50ef9SJoerg Wunsch #endif 34a5f50ef9SJoerg Wunsch 353264fd70SJung-uk Kim #ifdef _KERNEL 363264fd70SJung-uk Kim #include <machine/md_var.h> 373264fd70SJung-uk Kim #include <machine/specialreg.h> 383264fd70SJung-uk Kim #endif 393264fd70SJung-uk Kim 40fa9f322dSKonstantin Belousov #define mb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc") 41fa9f322dSKonstantin Belousov #define wmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc") 42fa9f322dSKonstantin Belousov #define rmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc") 43db7f0b97SKip Macy 44069e9bc1SDoug Rabson /* 45f28e1c8fSBruce Evans * Various simple operations on memory, each of which is atomic in the 46f28e1c8fSBruce Evans * presence of interrupts and multiple processors. 47069e9bc1SDoug Rabson * 4847b8bc92SAlan Cox * atomic_set_char(P, V) (*(u_char *)(P) |= (V)) 4947b8bc92SAlan Cox * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V)) 5047b8bc92SAlan Cox * atomic_add_char(P, V) (*(u_char *)(P) += (V)) 5147b8bc92SAlan Cox * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V)) 5247b8bc92SAlan Cox * 5347b8bc92SAlan Cox * atomic_set_short(P, V) (*(u_short *)(P) |= (V)) 5447b8bc92SAlan Cox * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V)) 5547b8bc92SAlan Cox * atomic_add_short(P, V) (*(u_short *)(P) += (V)) 5647b8bc92SAlan Cox * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V)) 5747b8bc92SAlan Cox * 5847b8bc92SAlan Cox * atomic_set_int(P, V) (*(u_int *)(P) |= (V)) 5947b8bc92SAlan Cox * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V)) 6047b8bc92SAlan Cox * atomic_add_int(P, V) (*(u_int *)(P) += (V)) 6147b8bc92SAlan Cox * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V)) 628a1ee2d3SJung-uk Kim * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);) 63f28e1c8fSBruce Evans * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;) 6447b8bc92SAlan Cox * 6547b8bc92SAlan Cox * atomic_set_long(P, V) (*(u_long *)(P) |= (V)) 6647b8bc92SAlan Cox * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V)) 6747b8bc92SAlan Cox * atomic_add_long(P, V) (*(u_long *)(P) += (V)) 6847b8bc92SAlan Cox * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V)) 698a1ee2d3SJung-uk Kim * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);) 70f28e1c8fSBruce Evans * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;) 71069e9bc1SDoug Rabson */ 72069e9bc1SDoug Rabson 7347b8bc92SAlan Cox /* 7408c40841SAlan Cox * The above functions are expanded inline in the statically-linked 7508c40841SAlan Cox * kernel. Lock prefixes are generated if an SMP kernel is being 7608c40841SAlan Cox * built. 7708c40841SAlan Cox * 7808c40841SAlan Cox * Kernel modules call real functions which are built into the kernel. 7908c40841SAlan Cox * This allows kernel modules to be portable between UP and SMP systems. 8047b8bc92SAlan Cox */ 8148281036SJohn Baldwin #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM) 82e4e991e1SJohn Baldwin #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 8386d2e48cSAttilio Rao void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ 8486d2e48cSAttilio Rao void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 8508c40841SAlan Cox 86065b12a7SPoul-Henning Kamp int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src); 873c2bc2bfSJohn Baldwin u_int atomic_fetchadd_int(volatile u_int *p, u_int v); 888a1ee2d3SJung-uk Kim int atomic_testandset_int(volatile u_int *p, u_int v); 89*8954a9a4SKonstantin Belousov void atomic_thread_fence_acq(void); 90*8954a9a4SKonstantin Belousov void atomic_thread_fence_acq_rel(void); 91*8954a9a4SKonstantin Belousov void atomic_thread_fence_rel(void); 92*8954a9a4SKonstantin Belousov void atomic_thread_fence_seq_cst(void); 93819e370cSPoul-Henning Kamp 947626d062SKonstantin Belousov #define ATOMIC_LOAD(TYPE) \ 95fa9f322dSKonstantin Belousov u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p) 96fa9f322dSKonstantin Belousov #define ATOMIC_STORE(TYPE) \ 978306a37bSMark Murray void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 988a6b1c8fSJohn Baldwin 995188b5f3SJung-uk Kim int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t); 1003264fd70SJung-uk Kim uint64_t atomic_load_acq_64(volatile uint64_t *); 1013264fd70SJung-uk Kim void atomic_store_rel_64(volatile uint64_t *, uint64_t); 1025188b5f3SJung-uk Kim uint64_t atomic_swap_64(volatile uint64_t *, uint64_t); 1033264fd70SJung-uk Kim 10448281036SJohn Baldwin #else /* !KLD_MODULE && __GNUCLIKE_ASM */ 1054c5aee92SMark Murray 1062a89a48fSJohn Baldwin /* 107f28e1c8fSBruce Evans * For userland, always use lock prefixes so that the binaries will run 108f28e1c8fSBruce Evans * on both SMP and !SMP systems. 1092a89a48fSJohn Baldwin */ 1102a89a48fSJohn Baldwin #if defined(SMP) || !defined(_KERNEL) 1117e4277e5SBruce Evans #define MPLOCKED "lock ; " 112d2f22d70SBruce Evans #else 11347b8bc92SAlan Cox #define MPLOCKED 114d2f22d70SBruce Evans #endif 115069e9bc1SDoug Rabson 11647b8bc92SAlan Cox /* 11786d2e48cSAttilio Rao * The assembly is volatilized to avoid code chunk removal by the compiler. 11886d2e48cSAttilio Rao * GCC aggressively reorders operations and memory clobbering is necessary 11986d2e48cSAttilio Rao * in order to avoid that for memory barriers. 12047b8bc92SAlan Cox */ 121e4e991e1SJohn Baldwin #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 12247b8bc92SAlan Cox static __inline void \ 12303e3bc8eSAlan Cox atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 12447b8bc92SAlan Cox { \ 1257e4277e5SBruce Evans __asm __volatile(MPLOCKED OP \ 126fe94be3dSJung-uk Kim : "+m" (*p) \ 127fe94be3dSJung-uk Kim : CONS (V) \ 1287222d2fbSKonstantin Belousov : "cc"); \ 1296d800f89SBruce Evans } \ 13086d2e48cSAttilio Rao \ 13186d2e48cSAttilio Rao static __inline void \ 13286d2e48cSAttilio Rao atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 13386d2e48cSAttilio Rao { \ 13486d2e48cSAttilio Rao __asm __volatile(MPLOCKED OP \ 135fe94be3dSJung-uk Kim : "+m" (*p) \ 136fe94be3dSJung-uk Kim : CONS (V) \ 1377222d2fbSKonstantin Belousov : "memory", "cc"); \ 13886d2e48cSAttilio Rao } \ 1396d800f89SBruce Evans struct __hack 1404c5aee92SMark Murray 141819e370cSPoul-Henning Kamp /* 142819e370cSPoul-Henning Kamp * Atomic compare and set, used by the mutex functions 143819e370cSPoul-Henning Kamp * 144065b12a7SPoul-Henning Kamp * if (*dst == expect) *dst = src (all 32 bit words) 145819e370cSPoul-Henning Kamp * 146819e370cSPoul-Henning Kamp * Returns 0 on failure, non-zero on success 147819e370cSPoul-Henning Kamp */ 148819e370cSPoul-Henning Kamp 149f28e1c8fSBruce Evans #ifdef CPU_DISABLE_CMPXCHG 1504c5aee92SMark Murray 1518448afceSAttilio Rao static __inline int 152065b12a7SPoul-Henning Kamp atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src) 1538448afceSAttilio Rao { 1548448afceSAttilio Rao u_char res; 1558448afceSAttilio Rao 1568448afceSAttilio Rao __asm __volatile( 1578448afceSAttilio Rao " pushfl ; " 1588448afceSAttilio Rao " cli ; " 159fe94be3dSJung-uk Kim " cmpl %3,%1 ; " 1608448afceSAttilio Rao " jne 1f ; " 1618448afceSAttilio Rao " movl %2,%1 ; " 1628448afceSAttilio Rao "1: " 1638448afceSAttilio Rao " sete %0 ; " 1648448afceSAttilio Rao " popfl ; " 1658448afceSAttilio Rao "# atomic_cmpset_int" 1668448afceSAttilio Rao : "=q" (res), /* 0 */ 167fe94be3dSJung-uk Kim "+m" (*dst) /* 1 */ 1688448afceSAttilio Rao : "r" (src), /* 2 */ 169fe94be3dSJung-uk Kim "r" (expect) /* 3 */ 1708448afceSAttilio Rao : "memory"); 1718448afceSAttilio Rao return (res); 1728448afceSAttilio Rao } 1734c5aee92SMark Murray 174f28e1c8fSBruce Evans #else /* !CPU_DISABLE_CMPXCHG */ 1754c5aee92SMark Murray 1768448afceSAttilio Rao static __inline int 177065b12a7SPoul-Henning Kamp atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src) 1788448afceSAttilio Rao { 1798448afceSAttilio Rao u_char res; 1808448afceSAttilio Rao 1818448afceSAttilio Rao __asm __volatile( 1828448afceSAttilio Rao " " MPLOCKED " " 183da255e4cSJung-uk Kim " cmpxchgl %3,%1 ; " 1848448afceSAttilio Rao " sete %0 ; " 1858448afceSAttilio Rao "# atomic_cmpset_int" 186da255e4cSJung-uk Kim : "=q" (res), /* 0 */ 187da255e4cSJung-uk Kim "+m" (*dst), /* 1 */ 188da255e4cSJung-uk Kim "+a" (expect) /* 2 */ 189da255e4cSJung-uk Kim : "r" (src) /* 3 */ 1907222d2fbSKonstantin Belousov : "memory", "cc"); 1918448afceSAttilio Rao return (res); 1928448afceSAttilio Rao } 1934c5aee92SMark Murray 194f28e1c8fSBruce Evans #endif /* CPU_DISABLE_CMPXCHG */ 1954c5aee92SMark Murray 1963c2bc2bfSJohn Baldwin /* 1973c2bc2bfSJohn Baldwin * Atomically add the value of v to the integer pointed to by p and return 1983c2bc2bfSJohn Baldwin * the previous value of *p. 1993c2bc2bfSJohn Baldwin */ 2003c2bc2bfSJohn Baldwin static __inline u_int 2013c2bc2bfSJohn Baldwin atomic_fetchadd_int(volatile u_int *p, u_int v) 2023c2bc2bfSJohn Baldwin { 2033c2bc2bfSJohn Baldwin 2043c2bc2bfSJohn Baldwin __asm __volatile( 2057e4277e5SBruce Evans " " MPLOCKED " " 2063c2bc2bfSJohn Baldwin " xaddl %0,%1 ; " 2073c2bc2bfSJohn Baldwin "# atomic_fetchadd_int" 208ee93d117SJung-uk Kim : "+r" (v), /* 0 */ 209fe94be3dSJung-uk Kim "+m" (*p) /* 1 */ 210fe94be3dSJung-uk Kim : : "cc"); 2113c2bc2bfSJohn Baldwin return (v); 2123c2bc2bfSJohn Baldwin } 2133c2bc2bfSJohn Baldwin 2148a1ee2d3SJung-uk Kim static __inline int 2158a1ee2d3SJung-uk Kim atomic_testandset_int(volatile u_int *p, u_int v) 2168a1ee2d3SJung-uk Kim { 2178a1ee2d3SJung-uk Kim u_char res; 2188a1ee2d3SJung-uk Kim 2198a1ee2d3SJung-uk Kim __asm __volatile( 2208a1ee2d3SJung-uk Kim " " MPLOCKED " " 2218a1ee2d3SJung-uk Kim " btsl %2,%1 ; " 2228a1ee2d3SJung-uk Kim " setc %0 ; " 2238a1ee2d3SJung-uk Kim "# atomic_testandset_int" 2248a1ee2d3SJung-uk Kim : "=q" (res), /* 0 */ 2258a1ee2d3SJung-uk Kim "+m" (*p) /* 1 */ 2268a1ee2d3SJung-uk Kim : "Ir" (v & 0x1f) /* 2 */ 2278a1ee2d3SJung-uk Kim : "cc"); 2288a1ee2d3SJung-uk Kim return (res); 2298a1ee2d3SJung-uk Kim } 2308a1ee2d3SJung-uk Kim 231fa9f322dSKonstantin Belousov /* 232fa9f322dSKonstantin Belousov * We assume that a = b will do atomic loads and stores. Due to the 233fa9f322dSKonstantin Belousov * IA32 memory model, a simple store guarantees release semantics. 234fa9f322dSKonstantin Belousov * 2357626d062SKonstantin Belousov * However, a load may pass a store if they are performed on distinct 2367626d062SKonstantin Belousov * addresses, so for atomic_load_acq we introduce a Store/Load barrier 2377626d062SKonstantin Belousov * before the load in SMP kernels. We use "lock addl $0,mem", as 2387626d062SKonstantin Belousov * recommended by the AMD Software Optimization Guide, and not mfence. 2397626d062SKonstantin Belousov * In the kernel, we use a private per-cpu cache line as the target 2407626d062SKonstantin Belousov * for the locked addition, to avoid introducing false data 2417626d062SKonstantin Belousov * dependencies. In userspace, a word at the top of the stack is 2427626d062SKonstantin Belousov * utilized. 2437626d062SKonstantin Belousov * 2447626d062SKonstantin Belousov * For UP kernels, however, the memory of the single processor is 2457626d062SKonstantin Belousov * always consistent, so we only need to stop the compiler from 2467626d062SKonstantin Belousov * reordering accesses in a way that violates the semantics of acquire 2477626d062SKonstantin Belousov * and release. 248fa9f322dSKonstantin Belousov */ 2497626d062SKonstantin Belousov #if defined(_KERNEL) 250fa9f322dSKonstantin Belousov 2517626d062SKonstantin Belousov /* 2527626d062SKonstantin Belousov * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf). 2537626d062SKonstantin Belousov * 2547626d062SKonstantin Belousov * The open-coded number is used instead of the symbolic expression to 2557626d062SKonstantin Belousov * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers. 2567626d062SKonstantin Belousov * An assertion in i386/vm_machdep.c ensures that the value is correct. 2577626d062SKonstantin Belousov */ 2587626d062SKonstantin Belousov #define OFFSETOF_MONITORBUF 0x180 2594c5aee92SMark Murray 2607626d062SKonstantin Belousov #if defined(SMP) 2617626d062SKonstantin Belousov static __inline void 2627626d062SKonstantin Belousov __storeload_barrier(void) 2637626d062SKonstantin Belousov { 2644c5aee92SMark Murray 2657626d062SKonstantin Belousov __asm __volatile("lock; addl $0,%%fs:%0" 2667626d062SKonstantin Belousov : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc"); 2677626d062SKonstantin Belousov } 2687626d062SKonstantin Belousov #else /* _KERNEL && UP */ 2697626d062SKonstantin Belousov static __inline void 2707626d062SKonstantin Belousov __storeload_barrier(void) 2717626d062SKonstantin Belousov { 272ccbdd9eeSJohn Baldwin 2737626d062SKonstantin Belousov __compiler_membar(); 2747626d062SKonstantin Belousov } 2757626d062SKonstantin Belousov #endif /* SMP */ 2767626d062SKonstantin Belousov #else /* !_KERNEL */ 2777626d062SKonstantin Belousov static __inline void 2787626d062SKonstantin Belousov __storeload_barrier(void) 2797626d062SKonstantin Belousov { 2807626d062SKonstantin Belousov 2817626d062SKonstantin Belousov __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc"); 2827626d062SKonstantin Belousov } 2837626d062SKonstantin Belousov #endif /* _KERNEL*/ 2847626d062SKonstantin Belousov 2853ac3c0f2SKonstantin Belousov /* 2863ac3c0f2SKonstantin Belousov * C11-standard acq/rel semantics only apply when the variable in the 2873ac3c0f2SKonstantin Belousov * call is the same for acq as it is for rel. However, our previous 2883ac3c0f2SKonstantin Belousov * (x86) implementations provided much stronger ordering than required 2893ac3c0f2SKonstantin Belousov * (essentially what is called seq_cst order in C11). This 2903ac3c0f2SKonstantin Belousov * implementation provides the historical strong ordering since some 2913ac3c0f2SKonstantin Belousov * callers depend on it. 2923ac3c0f2SKonstantin Belousov */ 2933ac3c0f2SKonstantin Belousov 2947626d062SKonstantin Belousov #define ATOMIC_LOAD(TYPE) \ 2959d979d89SJohn Baldwin static __inline u_##TYPE \ 2969d979d89SJohn Baldwin atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 2979d979d89SJohn Baldwin { \ 2989d979d89SJohn Baldwin u_##TYPE res; \ 2999d979d89SJohn Baldwin \ 3007626d062SKonstantin Belousov __storeload_barrier(); \ 3017626d062SKonstantin Belousov res = *p; \ 3027626d062SKonstantin Belousov __compiler_membar(); \ 3039d979d89SJohn Baldwin return (res); \ 3049d979d89SJohn Baldwin } \ 3056d800f89SBruce Evans struct __hack 3064c5aee92SMark Murray 3077626d062SKonstantin Belousov #define ATOMIC_STORE(TYPE) \ 3087626d062SKonstantin Belousov static __inline void \ 3097626d062SKonstantin Belousov atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \ 3107626d062SKonstantin Belousov { \ 3117626d062SKonstantin Belousov \ 3127626d062SKonstantin Belousov __compiler_membar(); \ 3137626d062SKonstantin Belousov *p = v; \ 3147626d062SKonstantin Belousov } \ 3157626d062SKonstantin Belousov struct __hack 3164c5aee92SMark Murray 317*8954a9a4SKonstantin Belousov static __inline void 318*8954a9a4SKonstantin Belousov atomic_thread_fence_acq(void) 319*8954a9a4SKonstantin Belousov { 320*8954a9a4SKonstantin Belousov 321*8954a9a4SKonstantin Belousov __compiler_membar(); 322*8954a9a4SKonstantin Belousov } 323*8954a9a4SKonstantin Belousov 324*8954a9a4SKonstantin Belousov static __inline void 325*8954a9a4SKonstantin Belousov atomic_thread_fence_rel(void) 326*8954a9a4SKonstantin Belousov { 327*8954a9a4SKonstantin Belousov 328*8954a9a4SKonstantin Belousov __compiler_membar(); 329*8954a9a4SKonstantin Belousov } 330*8954a9a4SKonstantin Belousov 331*8954a9a4SKonstantin Belousov static __inline void 332*8954a9a4SKonstantin Belousov atomic_thread_fence_acq_rel(void) 333*8954a9a4SKonstantin Belousov { 334*8954a9a4SKonstantin Belousov 335*8954a9a4SKonstantin Belousov __compiler_membar(); 336*8954a9a4SKonstantin Belousov } 337*8954a9a4SKonstantin Belousov 338*8954a9a4SKonstantin Belousov static __inline void 339*8954a9a4SKonstantin Belousov atomic_thread_fence_seq_cst(void) 340*8954a9a4SKonstantin Belousov { 341*8954a9a4SKonstantin Belousov 342*8954a9a4SKonstantin Belousov __storeload_barrier(); 343*8954a9a4SKonstantin Belousov } 344*8954a9a4SKonstantin Belousov 3453264fd70SJung-uk Kim #ifdef _KERNEL 3463264fd70SJung-uk Kim 3473264fd70SJung-uk Kim #ifdef WANT_FUNCTIONS 3485188b5f3SJung-uk Kim int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t); 3495188b5f3SJung-uk Kim int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t); 3503264fd70SJung-uk Kim uint64_t atomic_load_acq_64_i386(volatile uint64_t *); 3513264fd70SJung-uk Kim uint64_t atomic_load_acq_64_i586(volatile uint64_t *); 3523264fd70SJung-uk Kim void atomic_store_rel_64_i386(volatile uint64_t *, uint64_t); 3533264fd70SJung-uk Kim void atomic_store_rel_64_i586(volatile uint64_t *, uint64_t); 3545188b5f3SJung-uk Kim uint64_t atomic_swap_64_i386(volatile uint64_t *, uint64_t); 3555188b5f3SJung-uk Kim uint64_t atomic_swap_64_i586(volatile uint64_t *, uint64_t); 3563264fd70SJung-uk Kim #endif 3573264fd70SJung-uk Kim 3583264fd70SJung-uk Kim /* I486 does not support SMP or CMPXCHG8B. */ 3595188b5f3SJung-uk Kim static __inline int 3605188b5f3SJung-uk Kim atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src) 3615188b5f3SJung-uk Kim { 3625188b5f3SJung-uk Kim volatile uint32_t *p; 3635188b5f3SJung-uk Kim u_char res; 3645188b5f3SJung-uk Kim 3655188b5f3SJung-uk Kim p = (volatile uint32_t *)dst; 3665188b5f3SJung-uk Kim __asm __volatile( 3675188b5f3SJung-uk Kim " pushfl ; " 3685188b5f3SJung-uk Kim " cli ; " 3695188b5f3SJung-uk Kim " xorl %1,%%eax ; " 3705188b5f3SJung-uk Kim " xorl %2,%%edx ; " 3715188b5f3SJung-uk Kim " orl %%edx,%%eax ; " 3725188b5f3SJung-uk Kim " jne 1f ; " 3735188b5f3SJung-uk Kim " movl %4,%1 ; " 3745188b5f3SJung-uk Kim " movl %5,%2 ; " 3755188b5f3SJung-uk Kim "1: " 3765188b5f3SJung-uk Kim " sete %3 ; " 3775188b5f3SJung-uk Kim " popfl" 3785188b5f3SJung-uk Kim : "+A" (expect), /* 0 */ 3795188b5f3SJung-uk Kim "+m" (*p), /* 1 */ 3805188b5f3SJung-uk Kim "+m" (*(p + 1)), /* 2 */ 3815188b5f3SJung-uk Kim "=q" (res) /* 3 */ 3825188b5f3SJung-uk Kim : "r" ((uint32_t)src), /* 4 */ 3835188b5f3SJung-uk Kim "r" ((uint32_t)(src >> 32)) /* 5 */ 3845188b5f3SJung-uk Kim : "memory", "cc"); 3855188b5f3SJung-uk Kim return (res); 3865188b5f3SJung-uk Kim } 3875188b5f3SJung-uk Kim 3883264fd70SJung-uk Kim static __inline uint64_t 3893264fd70SJung-uk Kim atomic_load_acq_64_i386(volatile uint64_t *p) 3903264fd70SJung-uk Kim { 3913264fd70SJung-uk Kim volatile uint32_t *q; 3923264fd70SJung-uk Kim uint64_t res; 3933264fd70SJung-uk Kim 3943264fd70SJung-uk Kim q = (volatile uint32_t *)p; 3953264fd70SJung-uk Kim __asm __volatile( 3963264fd70SJung-uk Kim " pushfl ; " 3973264fd70SJung-uk Kim " cli ; " 3983264fd70SJung-uk Kim " movl %1,%%eax ; " 3993264fd70SJung-uk Kim " movl %2,%%edx ; " 4003264fd70SJung-uk Kim " popfl" 4013264fd70SJung-uk Kim : "=&A" (res) /* 0 */ 4023264fd70SJung-uk Kim : "m" (*q), /* 1 */ 4033264fd70SJung-uk Kim "m" (*(q + 1)) /* 2 */ 4043264fd70SJung-uk Kim : "memory"); 4053264fd70SJung-uk Kim return (res); 4063264fd70SJung-uk Kim } 4073264fd70SJung-uk Kim 4083264fd70SJung-uk Kim static __inline void 4093264fd70SJung-uk Kim atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v) 4103264fd70SJung-uk Kim { 4113264fd70SJung-uk Kim volatile uint32_t *q; 4123264fd70SJung-uk Kim 4133264fd70SJung-uk Kim q = (volatile uint32_t *)p; 4143264fd70SJung-uk Kim __asm __volatile( 4153264fd70SJung-uk Kim " pushfl ; " 4163264fd70SJung-uk Kim " cli ; " 4173264fd70SJung-uk Kim " movl %%eax,%0 ; " 4183264fd70SJung-uk Kim " movl %%edx,%1 ; " 4193264fd70SJung-uk Kim " popfl" 4203264fd70SJung-uk Kim : "=m" (*q), /* 0 */ 4213264fd70SJung-uk Kim "=m" (*(q + 1)) /* 1 */ 4223264fd70SJung-uk Kim : "A" (v) /* 2 */ 4233264fd70SJung-uk Kim : "memory"); 4243264fd70SJung-uk Kim } 4253264fd70SJung-uk Kim 4263264fd70SJung-uk Kim static __inline uint64_t 4275188b5f3SJung-uk Kim atomic_swap_64_i386(volatile uint64_t *p, uint64_t v) 4285188b5f3SJung-uk Kim { 4295188b5f3SJung-uk Kim volatile uint32_t *q; 4305188b5f3SJung-uk Kim uint64_t res; 4315188b5f3SJung-uk Kim 4325188b5f3SJung-uk Kim q = (volatile uint32_t *)p; 4335188b5f3SJung-uk Kim __asm __volatile( 4345188b5f3SJung-uk Kim " pushfl ; " 4355188b5f3SJung-uk Kim " cli ; " 4365188b5f3SJung-uk Kim " movl %1,%%eax ; " 4375188b5f3SJung-uk Kim " movl %2,%%edx ; " 4385188b5f3SJung-uk Kim " movl %4,%2 ; " 4395188b5f3SJung-uk Kim " movl %3,%1 ; " 4405188b5f3SJung-uk Kim " popfl" 4415188b5f3SJung-uk Kim : "=&A" (res), /* 0 */ 4425188b5f3SJung-uk Kim "+m" (*q), /* 1 */ 4435188b5f3SJung-uk Kim "+m" (*(q + 1)) /* 2 */ 4445188b5f3SJung-uk Kim : "r" ((uint32_t)v), /* 3 */ 4455188b5f3SJung-uk Kim "r" ((uint32_t)(v >> 32))); /* 4 */ 4465188b5f3SJung-uk Kim return (res); 4475188b5f3SJung-uk Kim } 4485188b5f3SJung-uk Kim 4495188b5f3SJung-uk Kim static __inline int 4505188b5f3SJung-uk Kim atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src) 4515188b5f3SJung-uk Kim { 4525188b5f3SJung-uk Kim u_char res; 4535188b5f3SJung-uk Kim 4545188b5f3SJung-uk Kim __asm __volatile( 4555188b5f3SJung-uk Kim " " MPLOCKED " " 4565188b5f3SJung-uk Kim " cmpxchg8b %1 ; " 4575188b5f3SJung-uk Kim " sete %0" 4585188b5f3SJung-uk Kim : "=q" (res), /* 0 */ 4595188b5f3SJung-uk Kim "+m" (*dst), /* 1 */ 4605188b5f3SJung-uk Kim "+A" (expect) /* 2 */ 4615188b5f3SJung-uk Kim : "b" ((uint32_t)src), /* 3 */ 4625188b5f3SJung-uk Kim "c" ((uint32_t)(src >> 32)) /* 4 */ 4635188b5f3SJung-uk Kim : "memory", "cc"); 4645188b5f3SJung-uk Kim return (res); 4655188b5f3SJung-uk Kim } 4665188b5f3SJung-uk Kim 4675188b5f3SJung-uk Kim static __inline uint64_t 4683264fd70SJung-uk Kim atomic_load_acq_64_i586(volatile uint64_t *p) 4693264fd70SJung-uk Kim { 4703264fd70SJung-uk Kim uint64_t res; 4713264fd70SJung-uk Kim 4723264fd70SJung-uk Kim __asm __volatile( 4733264fd70SJung-uk Kim " movl %%ebx,%%eax ; " 4743264fd70SJung-uk Kim " movl %%ecx,%%edx ; " 4753264fd70SJung-uk Kim " " MPLOCKED " " 4763264fd70SJung-uk Kim " cmpxchg8b %1" 4773264fd70SJung-uk Kim : "=&A" (res), /* 0 */ 4783264fd70SJung-uk Kim "+m" (*p) /* 1 */ 4793264fd70SJung-uk Kim : : "memory", "cc"); 4803264fd70SJung-uk Kim return (res); 4813264fd70SJung-uk Kim } 4823264fd70SJung-uk Kim 4833264fd70SJung-uk Kim static __inline void 4843264fd70SJung-uk Kim atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v) 4853264fd70SJung-uk Kim { 4863264fd70SJung-uk Kim 4873264fd70SJung-uk Kim __asm __volatile( 4883264fd70SJung-uk Kim " movl %%eax,%%ebx ; " 4893264fd70SJung-uk Kim " movl %%edx,%%ecx ; " 4903264fd70SJung-uk Kim "1: " 4913264fd70SJung-uk Kim " " MPLOCKED " " 4923264fd70SJung-uk Kim " cmpxchg8b %0 ; " 4933264fd70SJung-uk Kim " jne 1b" 4943264fd70SJung-uk Kim : "+m" (*p), /* 0 */ 4953264fd70SJung-uk Kim "+A" (v) /* 1 */ 4963264fd70SJung-uk Kim : : "ebx", "ecx", "memory", "cc"); 4973264fd70SJung-uk Kim } 4983264fd70SJung-uk Kim 4993264fd70SJung-uk Kim static __inline uint64_t 5005188b5f3SJung-uk Kim atomic_swap_64_i586(volatile uint64_t *p, uint64_t v) 5015188b5f3SJung-uk Kim { 5025188b5f3SJung-uk Kim 5035188b5f3SJung-uk Kim __asm __volatile( 5045188b5f3SJung-uk Kim " movl %%eax,%%ebx ; " 5055188b5f3SJung-uk Kim " movl %%edx,%%ecx ; " 5065188b5f3SJung-uk Kim "1: " 5075188b5f3SJung-uk Kim " " MPLOCKED " " 5085188b5f3SJung-uk Kim " cmpxchg8b %0 ; " 5095188b5f3SJung-uk Kim " jne 1b" 5105188b5f3SJung-uk Kim : "+m" (*p), /* 0 */ 5115188b5f3SJung-uk Kim "+A" (v) /* 1 */ 5125188b5f3SJung-uk Kim : : "ebx", "ecx", "memory", "cc"); 5135188b5f3SJung-uk Kim return (v); 5145188b5f3SJung-uk Kim } 5155188b5f3SJung-uk Kim 5165188b5f3SJung-uk Kim static __inline int 5175188b5f3SJung-uk Kim atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src) 5185188b5f3SJung-uk Kim { 5195188b5f3SJung-uk Kim 5205188b5f3SJung-uk Kim if ((cpu_feature & CPUID_CX8) == 0) 5215188b5f3SJung-uk Kim return (atomic_cmpset_64_i386(dst, expect, src)); 5225188b5f3SJung-uk Kim else 5235188b5f3SJung-uk Kim return (atomic_cmpset_64_i586(dst, expect, src)); 5245188b5f3SJung-uk Kim } 5255188b5f3SJung-uk Kim 5265188b5f3SJung-uk Kim static __inline uint64_t 5273264fd70SJung-uk Kim atomic_load_acq_64(volatile uint64_t *p) 5283264fd70SJung-uk Kim { 5293264fd70SJung-uk Kim 5303264fd70SJung-uk Kim if ((cpu_feature & CPUID_CX8) == 0) 5313264fd70SJung-uk Kim return (atomic_load_acq_64_i386(p)); 5323264fd70SJung-uk Kim else 5333264fd70SJung-uk Kim return (atomic_load_acq_64_i586(p)); 5343264fd70SJung-uk Kim } 5353264fd70SJung-uk Kim 5363264fd70SJung-uk Kim static __inline void 5373264fd70SJung-uk Kim atomic_store_rel_64(volatile uint64_t *p, uint64_t v) 5383264fd70SJung-uk Kim { 5393264fd70SJung-uk Kim 5403264fd70SJung-uk Kim if ((cpu_feature & CPUID_CX8) == 0) 5413264fd70SJung-uk Kim atomic_store_rel_64_i386(p, v); 5423264fd70SJung-uk Kim else 5433264fd70SJung-uk Kim atomic_store_rel_64_i586(p, v); 5443264fd70SJung-uk Kim } 5453264fd70SJung-uk Kim 5465188b5f3SJung-uk Kim static __inline uint64_t 5475188b5f3SJung-uk Kim atomic_swap_64(volatile uint64_t *p, uint64_t v) 5485188b5f3SJung-uk Kim { 5495188b5f3SJung-uk Kim 5505188b5f3SJung-uk Kim if ((cpu_feature & CPUID_CX8) == 0) 5515188b5f3SJung-uk Kim return (atomic_swap_64_i386(p, v)); 5525188b5f3SJung-uk Kim else 5535188b5f3SJung-uk Kim return (atomic_swap_64_i586(p, v)); 5545188b5f3SJung-uk Kim } 5555188b5f3SJung-uk Kim 5563264fd70SJung-uk Kim #endif /* _KERNEL */ 5573264fd70SJung-uk Kim 55848281036SJohn Baldwin #endif /* KLD_MODULE || !__GNUCLIKE_ASM */ 5598a6b1c8fSJohn Baldwin 5608306a37bSMark Murray ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); 5618306a37bSMark Murray ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); 5628306a37bSMark Murray ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); 5638306a37bSMark Murray ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v); 5648a6b1c8fSJohn Baldwin 5658306a37bSMark Murray ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v); 5668306a37bSMark Murray ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v); 5678306a37bSMark Murray ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v); 5688306a37bSMark Murray ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v); 5698a6b1c8fSJohn Baldwin 5708306a37bSMark Murray ATOMIC_ASM(set, int, "orl %1,%0", "ir", v); 5718306a37bSMark Murray ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v); 5728306a37bSMark Murray ATOMIC_ASM(add, int, "addl %1,%0", "ir", v); 5738306a37bSMark Murray ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v); 5748a6b1c8fSJohn Baldwin 5758306a37bSMark Murray ATOMIC_ASM(set, long, "orl %1,%0", "ir", v); 5768306a37bSMark Murray ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v); 5778306a37bSMark Murray ATOMIC_ASM(add, long, "addl %1,%0", "ir", v); 5788306a37bSMark Murray ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v); 5799d979d89SJohn Baldwin 5807626d062SKonstantin Belousov #define ATOMIC_LOADSTORE(TYPE) \ 5817626d062SKonstantin Belousov ATOMIC_LOAD(TYPE); \ 5827626d062SKonstantin Belousov ATOMIC_STORE(TYPE) 583fa9f322dSKonstantin Belousov 5847626d062SKonstantin Belousov ATOMIC_LOADSTORE(char); 5857626d062SKonstantin Belousov ATOMIC_LOADSTORE(short); 5867626d062SKonstantin Belousov ATOMIC_LOADSTORE(int); 5877626d062SKonstantin Belousov ATOMIC_LOADSTORE(long); 588ccbdd9eeSJohn Baldwin 5898a6b1c8fSJohn Baldwin #undef ATOMIC_ASM 590fa9f322dSKonstantin Belousov #undef ATOMIC_LOAD 591fa9f322dSKonstantin Belousov #undef ATOMIC_STORE 5927626d062SKonstantin Belousov #undef ATOMIC_LOADSTORE 593ccbdd9eeSJohn Baldwin 594f28e1c8fSBruce Evans #ifndef WANT_FUNCTIONS 59548281036SJohn Baldwin 59648281036SJohn Baldwin static __inline int 597065b12a7SPoul-Henning Kamp atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src) 59848281036SJohn Baldwin { 59948281036SJohn Baldwin 600065b12a7SPoul-Henning Kamp return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect, 60148281036SJohn Baldwin (u_int)src)); 60248281036SJohn Baldwin } 60348281036SJohn Baldwin 6046eb4157fSPawel Jakub Dawidek static __inline u_long 6056eb4157fSPawel Jakub Dawidek atomic_fetchadd_long(volatile u_long *p, u_long v) 6066eb4157fSPawel Jakub Dawidek { 6076eb4157fSPawel Jakub Dawidek 6086eb4157fSPawel Jakub Dawidek return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v)); 6096eb4157fSPawel Jakub Dawidek } 6106eb4157fSPawel Jakub Dawidek 6118a1ee2d3SJung-uk Kim static __inline int 6128a1ee2d3SJung-uk Kim atomic_testandset_long(volatile u_long *p, u_int v) 6138a1ee2d3SJung-uk Kim { 6148a1ee2d3SJung-uk Kim 6158a1ee2d3SJung-uk Kim return (atomic_testandset_int((volatile u_int *)p, v)); 6168a1ee2d3SJung-uk Kim } 6178a1ee2d3SJung-uk Kim 6188a1ee2d3SJung-uk Kim /* Read the current value and store a new value in the destination. */ 61948281036SJohn Baldwin #ifdef __GNUCLIKE_ASM 62048281036SJohn Baldwin 62148281036SJohn Baldwin static __inline u_int 6228a1ee2d3SJung-uk Kim atomic_swap_int(volatile u_int *p, u_int v) 62348281036SJohn Baldwin { 62448281036SJohn Baldwin 62548281036SJohn Baldwin __asm __volatile( 62648281036SJohn Baldwin " xchgl %1,%0 ; " 6278a1ee2d3SJung-uk Kim "# atomic_swap_int" 6288a1ee2d3SJung-uk Kim : "+r" (v), /* 0 */ 629fe94be3dSJung-uk Kim "+m" (*p)); /* 1 */ 6308a1ee2d3SJung-uk Kim return (v); 63148281036SJohn Baldwin } 63248281036SJohn Baldwin 63348281036SJohn Baldwin static __inline u_long 6348a1ee2d3SJung-uk Kim atomic_swap_long(volatile u_long *p, u_long v) 63548281036SJohn Baldwin { 63648281036SJohn Baldwin 6378a1ee2d3SJung-uk Kim return (atomic_swap_int((volatile u_int *)p, (u_int)v)); 63848281036SJohn Baldwin } 63948281036SJohn Baldwin 64048281036SJohn Baldwin #else /* !__GNUCLIKE_ASM */ 64148281036SJohn Baldwin 6428a1ee2d3SJung-uk Kim u_int atomic_swap_int(volatile u_int *p, u_int v); 6438a1ee2d3SJung-uk Kim u_long atomic_swap_long(volatile u_long *p, u_long v); 64448281036SJohn Baldwin 64548281036SJohn Baldwin #endif /* __GNUCLIKE_ASM */ 64648281036SJohn Baldwin 64786d2e48cSAttilio Rao #define atomic_set_acq_char atomic_set_barr_char 64886d2e48cSAttilio Rao #define atomic_set_rel_char atomic_set_barr_char 64986d2e48cSAttilio Rao #define atomic_clear_acq_char atomic_clear_barr_char 65086d2e48cSAttilio Rao #define atomic_clear_rel_char atomic_clear_barr_char 65186d2e48cSAttilio Rao #define atomic_add_acq_char atomic_add_barr_char 65286d2e48cSAttilio Rao #define atomic_add_rel_char atomic_add_barr_char 65386d2e48cSAttilio Rao #define atomic_subtract_acq_char atomic_subtract_barr_char 65486d2e48cSAttilio Rao #define atomic_subtract_rel_char atomic_subtract_barr_char 6558a6b1c8fSJohn Baldwin 65686d2e48cSAttilio Rao #define atomic_set_acq_short atomic_set_barr_short 65786d2e48cSAttilio Rao #define atomic_set_rel_short atomic_set_barr_short 65886d2e48cSAttilio Rao #define atomic_clear_acq_short atomic_clear_barr_short 65986d2e48cSAttilio Rao #define atomic_clear_rel_short atomic_clear_barr_short 66086d2e48cSAttilio Rao #define atomic_add_acq_short atomic_add_barr_short 66186d2e48cSAttilio Rao #define atomic_add_rel_short atomic_add_barr_short 66286d2e48cSAttilio Rao #define atomic_subtract_acq_short atomic_subtract_barr_short 66386d2e48cSAttilio Rao #define atomic_subtract_rel_short atomic_subtract_barr_short 6648a6b1c8fSJohn Baldwin 66586d2e48cSAttilio Rao #define atomic_set_acq_int atomic_set_barr_int 66686d2e48cSAttilio Rao #define atomic_set_rel_int atomic_set_barr_int 66786d2e48cSAttilio Rao #define atomic_clear_acq_int atomic_clear_barr_int 66886d2e48cSAttilio Rao #define atomic_clear_rel_int atomic_clear_barr_int 66986d2e48cSAttilio Rao #define atomic_add_acq_int atomic_add_barr_int 67086d2e48cSAttilio Rao #define atomic_add_rel_int atomic_add_barr_int 67186d2e48cSAttilio Rao #define atomic_subtract_acq_int atomic_subtract_barr_int 67286d2e48cSAttilio Rao #define atomic_subtract_rel_int atomic_subtract_barr_int 6738448afceSAttilio Rao #define atomic_cmpset_acq_int atomic_cmpset_int 6748448afceSAttilio Rao #define atomic_cmpset_rel_int atomic_cmpset_int 6758a6b1c8fSJohn Baldwin 67686d2e48cSAttilio Rao #define atomic_set_acq_long atomic_set_barr_long 67786d2e48cSAttilio Rao #define atomic_set_rel_long atomic_set_barr_long 67886d2e48cSAttilio Rao #define atomic_clear_acq_long atomic_clear_barr_long 67986d2e48cSAttilio Rao #define atomic_clear_rel_long atomic_clear_barr_long 68086d2e48cSAttilio Rao #define atomic_add_acq_long atomic_add_barr_long 68186d2e48cSAttilio Rao #define atomic_add_rel_long atomic_add_barr_long 68286d2e48cSAttilio Rao #define atomic_subtract_acq_long atomic_subtract_barr_long 68386d2e48cSAttilio Rao #define atomic_subtract_rel_long atomic_subtract_barr_long 6848448afceSAttilio Rao #define atomic_cmpset_acq_long atomic_cmpset_long 6858448afceSAttilio Rao #define atomic_cmpset_rel_long atomic_cmpset_long 6868a6b1c8fSJohn Baldwin 6878a1ee2d3SJung-uk Kim #define atomic_readandclear_int(p) atomic_swap_int(p, 0) 6888a1ee2d3SJung-uk Kim #define atomic_readandclear_long(p) atomic_swap_long(p, 0) 6898a1ee2d3SJung-uk Kim 69048281036SJohn Baldwin /* Operations on 8-bit bytes. */ 6918a6b1c8fSJohn Baldwin #define atomic_set_8 atomic_set_char 6928a6b1c8fSJohn Baldwin #define atomic_set_acq_8 atomic_set_acq_char 6938a6b1c8fSJohn Baldwin #define atomic_set_rel_8 atomic_set_rel_char 6948a6b1c8fSJohn Baldwin #define atomic_clear_8 atomic_clear_char 6958a6b1c8fSJohn Baldwin #define atomic_clear_acq_8 atomic_clear_acq_char 6968a6b1c8fSJohn Baldwin #define atomic_clear_rel_8 atomic_clear_rel_char 6978a6b1c8fSJohn Baldwin #define atomic_add_8 atomic_add_char 6988a6b1c8fSJohn Baldwin #define atomic_add_acq_8 atomic_add_acq_char 6998a6b1c8fSJohn Baldwin #define atomic_add_rel_8 atomic_add_rel_char 7008a6b1c8fSJohn Baldwin #define atomic_subtract_8 atomic_subtract_char 7018a6b1c8fSJohn Baldwin #define atomic_subtract_acq_8 atomic_subtract_acq_char 7028a6b1c8fSJohn Baldwin #define atomic_subtract_rel_8 atomic_subtract_rel_char 7038a6b1c8fSJohn Baldwin #define atomic_load_acq_8 atomic_load_acq_char 7048a6b1c8fSJohn Baldwin #define atomic_store_rel_8 atomic_store_rel_char 7058a6b1c8fSJohn Baldwin 70648281036SJohn Baldwin /* Operations on 16-bit words. */ 7078a6b1c8fSJohn Baldwin #define atomic_set_16 atomic_set_short 7088a6b1c8fSJohn Baldwin #define atomic_set_acq_16 atomic_set_acq_short 7098a6b1c8fSJohn Baldwin #define atomic_set_rel_16 atomic_set_rel_short 7108a6b1c8fSJohn Baldwin #define atomic_clear_16 atomic_clear_short 7118a6b1c8fSJohn Baldwin #define atomic_clear_acq_16 atomic_clear_acq_short 7128a6b1c8fSJohn Baldwin #define atomic_clear_rel_16 atomic_clear_rel_short 7138a6b1c8fSJohn Baldwin #define atomic_add_16 atomic_add_short 7148a6b1c8fSJohn Baldwin #define atomic_add_acq_16 atomic_add_acq_short 7158a6b1c8fSJohn Baldwin #define atomic_add_rel_16 atomic_add_rel_short 7168a6b1c8fSJohn Baldwin #define atomic_subtract_16 atomic_subtract_short 7178a6b1c8fSJohn Baldwin #define atomic_subtract_acq_16 atomic_subtract_acq_short 7188a6b1c8fSJohn Baldwin #define atomic_subtract_rel_16 atomic_subtract_rel_short 7198a6b1c8fSJohn Baldwin #define atomic_load_acq_16 atomic_load_acq_short 7208a6b1c8fSJohn Baldwin #define atomic_store_rel_16 atomic_store_rel_short 7218a6b1c8fSJohn Baldwin 72248281036SJohn Baldwin /* Operations on 32-bit double words. */ 7238a6b1c8fSJohn Baldwin #define atomic_set_32 atomic_set_int 7248a6b1c8fSJohn Baldwin #define atomic_set_acq_32 atomic_set_acq_int 7258a6b1c8fSJohn Baldwin #define atomic_set_rel_32 atomic_set_rel_int 7268a6b1c8fSJohn Baldwin #define atomic_clear_32 atomic_clear_int 7278a6b1c8fSJohn Baldwin #define atomic_clear_acq_32 atomic_clear_acq_int 7288a6b1c8fSJohn Baldwin #define atomic_clear_rel_32 atomic_clear_rel_int 7298a6b1c8fSJohn Baldwin #define atomic_add_32 atomic_add_int 7308a6b1c8fSJohn Baldwin #define atomic_add_acq_32 atomic_add_acq_int 7318a6b1c8fSJohn Baldwin #define atomic_add_rel_32 atomic_add_rel_int 7328a6b1c8fSJohn Baldwin #define atomic_subtract_32 atomic_subtract_int 7338a6b1c8fSJohn Baldwin #define atomic_subtract_acq_32 atomic_subtract_acq_int 7348a6b1c8fSJohn Baldwin #define atomic_subtract_rel_32 atomic_subtract_rel_int 7358a6b1c8fSJohn Baldwin #define atomic_load_acq_32 atomic_load_acq_int 7368a6b1c8fSJohn Baldwin #define atomic_store_rel_32 atomic_store_rel_int 7378a6b1c8fSJohn Baldwin #define atomic_cmpset_32 atomic_cmpset_int 7388a6b1c8fSJohn Baldwin #define atomic_cmpset_acq_32 atomic_cmpset_acq_int 7398a6b1c8fSJohn Baldwin #define atomic_cmpset_rel_32 atomic_cmpset_rel_int 7408a1ee2d3SJung-uk Kim #define atomic_swap_32 atomic_swap_int 7418a6b1c8fSJohn Baldwin #define atomic_readandclear_32 atomic_readandclear_int 7423c2bc2bfSJohn Baldwin #define atomic_fetchadd_32 atomic_fetchadd_int 7438a1ee2d3SJung-uk Kim #define atomic_testandset_32 atomic_testandset_int 7448a6b1c8fSJohn Baldwin 74548281036SJohn Baldwin /* Operations on pointers. */ 7466f0f8ccaSDag-Erling Smørgrav #define atomic_set_ptr(p, v) \ 7476f0f8ccaSDag-Erling Smørgrav atomic_set_int((volatile u_int *)(p), (u_int)(v)) 7486f0f8ccaSDag-Erling Smørgrav #define atomic_set_acq_ptr(p, v) \ 7496f0f8ccaSDag-Erling Smørgrav atomic_set_acq_int((volatile u_int *)(p), (u_int)(v)) 7506f0f8ccaSDag-Erling Smørgrav #define atomic_set_rel_ptr(p, v) \ 7516f0f8ccaSDag-Erling Smørgrav atomic_set_rel_int((volatile u_int *)(p), (u_int)(v)) 7526f0f8ccaSDag-Erling Smørgrav #define atomic_clear_ptr(p, v) \ 7536f0f8ccaSDag-Erling Smørgrav atomic_clear_int((volatile u_int *)(p), (u_int)(v)) 7546f0f8ccaSDag-Erling Smørgrav #define atomic_clear_acq_ptr(p, v) \ 7556f0f8ccaSDag-Erling Smørgrav atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v)) 7566f0f8ccaSDag-Erling Smørgrav #define atomic_clear_rel_ptr(p, v) \ 7576f0f8ccaSDag-Erling Smørgrav atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v)) 7586f0f8ccaSDag-Erling Smørgrav #define atomic_add_ptr(p, v) \ 7596f0f8ccaSDag-Erling Smørgrav atomic_add_int((volatile u_int *)(p), (u_int)(v)) 7606f0f8ccaSDag-Erling Smørgrav #define atomic_add_acq_ptr(p, v) \ 7616f0f8ccaSDag-Erling Smørgrav atomic_add_acq_int((volatile u_int *)(p), (u_int)(v)) 7626f0f8ccaSDag-Erling Smørgrav #define atomic_add_rel_ptr(p, v) \ 7636f0f8ccaSDag-Erling Smørgrav atomic_add_rel_int((volatile u_int *)(p), (u_int)(v)) 7646f0f8ccaSDag-Erling Smørgrav #define atomic_subtract_ptr(p, v) \ 7656f0f8ccaSDag-Erling Smørgrav atomic_subtract_int((volatile u_int *)(p), (u_int)(v)) 7666f0f8ccaSDag-Erling Smørgrav #define atomic_subtract_acq_ptr(p, v) \ 7676f0f8ccaSDag-Erling Smørgrav atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v)) 7686f0f8ccaSDag-Erling Smørgrav #define atomic_subtract_rel_ptr(p, v) \ 7696f0f8ccaSDag-Erling Smørgrav atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v)) 7706f0f8ccaSDag-Erling Smørgrav #define atomic_load_acq_ptr(p) \ 7716f0f8ccaSDag-Erling Smørgrav atomic_load_acq_int((volatile u_int *)(p)) 7726f0f8ccaSDag-Erling Smørgrav #define atomic_store_rel_ptr(p, v) \ 7736f0f8ccaSDag-Erling Smørgrav atomic_store_rel_int((volatile u_int *)(p), (v)) 7746f0f8ccaSDag-Erling Smørgrav #define atomic_cmpset_ptr(dst, old, new) \ 7756f0f8ccaSDag-Erling Smørgrav atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new)) 7766f0f8ccaSDag-Erling Smørgrav #define atomic_cmpset_acq_ptr(dst, old, new) \ 7776c296ffaSBruce Evans atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \ 7786c296ffaSBruce Evans (u_int)(new)) 7796f0f8ccaSDag-Erling Smørgrav #define atomic_cmpset_rel_ptr(dst, old, new) \ 7806c296ffaSBruce Evans atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \ 7816c296ffaSBruce Evans (u_int)(new)) 7828a1ee2d3SJung-uk Kim #define atomic_swap_ptr(p, v) \ 7838a1ee2d3SJung-uk Kim atomic_swap_int((volatile u_int *)(p), (u_int)(v)) 7846f0f8ccaSDag-Erling Smørgrav #define atomic_readandclear_ptr(p) \ 7856f0f8ccaSDag-Erling Smørgrav atomic_readandclear_int((volatile u_int *)(p)) 786ccbdd9eeSJohn Baldwin 787f28e1c8fSBruce Evans #endif /* !WANT_FUNCTIONS */ 7886c296ffaSBruce Evans 789069e9bc1SDoug Rabson #endif /* !_MACHINE_ATOMIC_H_ */ 790