1069e9bc1SDoug Rabson /*- 2069e9bc1SDoug Rabson * Copyright (c) 1998 Doug Rabson 3069e9bc1SDoug Rabson * All rights reserved. 4069e9bc1SDoug Rabson * 5069e9bc1SDoug Rabson * Redistribution and use in source and binary forms, with or without 6069e9bc1SDoug Rabson * modification, are permitted provided that the following conditions 7069e9bc1SDoug Rabson * are met: 8069e9bc1SDoug Rabson * 1. Redistributions of source code must retain the above copyright 9069e9bc1SDoug Rabson * notice, this list of conditions and the following disclaimer. 10069e9bc1SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 11069e9bc1SDoug Rabson * notice, this list of conditions and the following disclaimer in the 12069e9bc1SDoug Rabson * documentation and/or other materials provided with the distribution. 13069e9bc1SDoug Rabson * 14069e9bc1SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15069e9bc1SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16069e9bc1SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17069e9bc1SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18069e9bc1SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19069e9bc1SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20069e9bc1SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21069e9bc1SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22069e9bc1SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23069e9bc1SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24069e9bc1SDoug Rabson * SUCH DAMAGE. 25069e9bc1SDoug Rabson * 26c3aac50fSPeter Wemm * $FreeBSD$ 27069e9bc1SDoug Rabson */ 28069e9bc1SDoug Rabson #ifndef _MACHINE_ATOMIC_H_ 29069e9bc1SDoug Rabson #define _MACHINE_ATOMIC_H_ 30069e9bc1SDoug Rabson 31a5f50ef9SJoerg Wunsch #ifndef _SYS_CDEFS_H_ 32a5f50ef9SJoerg Wunsch #error this file needs sys/cdefs.h as a prerequisite 33a5f50ef9SJoerg Wunsch #endif 34a5f50ef9SJoerg Wunsch 353264fd70SJung-uk Kim #ifdef _KERNEL 363264fd70SJung-uk Kim #include <machine/md_var.h> 373264fd70SJung-uk Kim #include <machine/specialreg.h> 383264fd70SJung-uk Kim #endif 393264fd70SJung-uk Kim 40fa9f322dSKonstantin Belousov #define mb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc") 41fa9f322dSKonstantin Belousov #define wmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc") 42fa9f322dSKonstantin Belousov #define rmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc") 43db7f0b97SKip Macy 44069e9bc1SDoug Rabson /* 45f28e1c8fSBruce Evans * Various simple operations on memory, each of which is atomic in the 46f28e1c8fSBruce Evans * presence of interrupts and multiple processors. 47069e9bc1SDoug Rabson * 4847b8bc92SAlan Cox * atomic_set_char(P, V) (*(u_char *)(P) |= (V)) 4947b8bc92SAlan Cox * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V)) 5047b8bc92SAlan Cox * atomic_add_char(P, V) (*(u_char *)(P) += (V)) 5147b8bc92SAlan Cox * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V)) 5247b8bc92SAlan Cox * 5347b8bc92SAlan Cox * atomic_set_short(P, V) (*(u_short *)(P) |= (V)) 5447b8bc92SAlan Cox * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V)) 5547b8bc92SAlan Cox * atomic_add_short(P, V) (*(u_short *)(P) += (V)) 5647b8bc92SAlan Cox * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V)) 5747b8bc92SAlan Cox * 5847b8bc92SAlan Cox * atomic_set_int(P, V) (*(u_int *)(P) |= (V)) 5947b8bc92SAlan Cox * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V)) 6047b8bc92SAlan Cox * atomic_add_int(P, V) (*(u_int *)(P) += (V)) 6147b8bc92SAlan Cox * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V)) 628a1ee2d3SJung-uk Kim * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);) 63f28e1c8fSBruce Evans * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;) 6447b8bc92SAlan Cox * 6547b8bc92SAlan Cox * atomic_set_long(P, V) (*(u_long *)(P) |= (V)) 6647b8bc92SAlan Cox * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V)) 6747b8bc92SAlan Cox * atomic_add_long(P, V) (*(u_long *)(P) += (V)) 6847b8bc92SAlan Cox * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V)) 698a1ee2d3SJung-uk Kim * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);) 70f28e1c8fSBruce Evans * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;) 71069e9bc1SDoug Rabson */ 72069e9bc1SDoug Rabson 7347b8bc92SAlan Cox /* 7408c40841SAlan Cox * The above functions are expanded inline in the statically-linked 7508c40841SAlan Cox * kernel. Lock prefixes are generated if an SMP kernel is being 7608c40841SAlan Cox * built. 7708c40841SAlan Cox * 7808c40841SAlan Cox * Kernel modules call real functions which are built into the kernel. 7908c40841SAlan Cox * This allows kernel modules to be portable between UP and SMP systems. 8047b8bc92SAlan Cox */ 8148281036SJohn Baldwin #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM) 82e4e991e1SJohn Baldwin #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 8386d2e48cSAttilio Rao void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ 8486d2e48cSAttilio Rao void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 8508c40841SAlan Cox 86065b12a7SPoul-Henning Kamp int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src); 873c2bc2bfSJohn Baldwin u_int atomic_fetchadd_int(volatile u_int *p, u_int v); 888a1ee2d3SJung-uk Kim int atomic_testandset_int(volatile u_int *p, u_int v); 89819e370cSPoul-Henning Kamp 90*7626d062SKonstantin Belousov #define ATOMIC_LOAD(TYPE) \ 91fa9f322dSKonstantin Belousov u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p) 92fa9f322dSKonstantin Belousov #define ATOMIC_STORE(TYPE) \ 938306a37bSMark Murray void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 948a6b1c8fSJohn Baldwin 955188b5f3SJung-uk Kim int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t); 963264fd70SJung-uk Kim uint64_t atomic_load_acq_64(volatile uint64_t *); 973264fd70SJung-uk Kim void atomic_store_rel_64(volatile uint64_t *, uint64_t); 985188b5f3SJung-uk Kim uint64_t atomic_swap_64(volatile uint64_t *, uint64_t); 993264fd70SJung-uk Kim 10048281036SJohn Baldwin #else /* !KLD_MODULE && __GNUCLIKE_ASM */ 1014c5aee92SMark Murray 1022a89a48fSJohn Baldwin /* 103f28e1c8fSBruce Evans * For userland, always use lock prefixes so that the binaries will run 104f28e1c8fSBruce Evans * on both SMP and !SMP systems. 1052a89a48fSJohn Baldwin */ 1062a89a48fSJohn Baldwin #if defined(SMP) || !defined(_KERNEL) 1077e4277e5SBruce Evans #define MPLOCKED "lock ; " 108d2f22d70SBruce Evans #else 10947b8bc92SAlan Cox #define MPLOCKED 110d2f22d70SBruce Evans #endif 111069e9bc1SDoug Rabson 11247b8bc92SAlan Cox /* 11386d2e48cSAttilio Rao * The assembly is volatilized to avoid code chunk removal by the compiler. 11486d2e48cSAttilio Rao * GCC aggressively reorders operations and memory clobbering is necessary 11586d2e48cSAttilio Rao * in order to avoid that for memory barriers. 11647b8bc92SAlan Cox */ 117e4e991e1SJohn Baldwin #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 11847b8bc92SAlan Cox static __inline void \ 11903e3bc8eSAlan Cox atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 12047b8bc92SAlan Cox { \ 1217e4277e5SBruce Evans __asm __volatile(MPLOCKED OP \ 122fe94be3dSJung-uk Kim : "+m" (*p) \ 123fe94be3dSJung-uk Kim : CONS (V) \ 1247222d2fbSKonstantin Belousov : "cc"); \ 1256d800f89SBruce Evans } \ 12686d2e48cSAttilio Rao \ 12786d2e48cSAttilio Rao static __inline void \ 12886d2e48cSAttilio Rao atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 12986d2e48cSAttilio Rao { \ 13086d2e48cSAttilio Rao __asm __volatile(MPLOCKED OP \ 131fe94be3dSJung-uk Kim : "+m" (*p) \ 132fe94be3dSJung-uk Kim : CONS (V) \ 1337222d2fbSKonstantin Belousov : "memory", "cc"); \ 13486d2e48cSAttilio Rao } \ 1356d800f89SBruce Evans struct __hack 1364c5aee92SMark Murray 137819e370cSPoul-Henning Kamp /* 138819e370cSPoul-Henning Kamp * Atomic compare and set, used by the mutex functions 139819e370cSPoul-Henning Kamp * 140065b12a7SPoul-Henning Kamp * if (*dst == expect) *dst = src (all 32 bit words) 141819e370cSPoul-Henning Kamp * 142819e370cSPoul-Henning Kamp * Returns 0 on failure, non-zero on success 143819e370cSPoul-Henning Kamp */ 144819e370cSPoul-Henning Kamp 145f28e1c8fSBruce Evans #ifdef CPU_DISABLE_CMPXCHG 1464c5aee92SMark Murray 1478448afceSAttilio Rao static __inline int 148065b12a7SPoul-Henning Kamp atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src) 1498448afceSAttilio Rao { 1508448afceSAttilio Rao u_char res; 1518448afceSAttilio Rao 1528448afceSAttilio Rao __asm __volatile( 1538448afceSAttilio Rao " pushfl ; " 1548448afceSAttilio Rao " cli ; " 155fe94be3dSJung-uk Kim " cmpl %3,%1 ; " 1568448afceSAttilio Rao " jne 1f ; " 1578448afceSAttilio Rao " movl %2,%1 ; " 1588448afceSAttilio Rao "1: " 1598448afceSAttilio Rao " sete %0 ; " 1608448afceSAttilio Rao " popfl ; " 1618448afceSAttilio Rao "# atomic_cmpset_int" 1628448afceSAttilio Rao : "=q" (res), /* 0 */ 163fe94be3dSJung-uk Kim "+m" (*dst) /* 1 */ 1648448afceSAttilio Rao : "r" (src), /* 2 */ 165fe94be3dSJung-uk Kim "r" (expect) /* 3 */ 1668448afceSAttilio Rao : "memory"); 1678448afceSAttilio Rao return (res); 1688448afceSAttilio Rao } 1694c5aee92SMark Murray 170f28e1c8fSBruce Evans #else /* !CPU_DISABLE_CMPXCHG */ 1714c5aee92SMark Murray 1728448afceSAttilio Rao static __inline int 173065b12a7SPoul-Henning Kamp atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src) 1748448afceSAttilio Rao { 1758448afceSAttilio Rao u_char res; 1768448afceSAttilio Rao 1778448afceSAttilio Rao __asm __volatile( 1788448afceSAttilio Rao " " MPLOCKED " " 179da255e4cSJung-uk Kim " cmpxchgl %3,%1 ; " 1808448afceSAttilio Rao " sete %0 ; " 1818448afceSAttilio Rao "# atomic_cmpset_int" 182da255e4cSJung-uk Kim : "=q" (res), /* 0 */ 183da255e4cSJung-uk Kim "+m" (*dst), /* 1 */ 184da255e4cSJung-uk Kim "+a" (expect) /* 2 */ 185da255e4cSJung-uk Kim : "r" (src) /* 3 */ 1867222d2fbSKonstantin Belousov : "memory", "cc"); 1878448afceSAttilio Rao return (res); 1888448afceSAttilio Rao } 1894c5aee92SMark Murray 190f28e1c8fSBruce Evans #endif /* CPU_DISABLE_CMPXCHG */ 1914c5aee92SMark Murray 1923c2bc2bfSJohn Baldwin /* 1933c2bc2bfSJohn Baldwin * Atomically add the value of v to the integer pointed to by p and return 1943c2bc2bfSJohn Baldwin * the previous value of *p. 1953c2bc2bfSJohn Baldwin */ 1963c2bc2bfSJohn Baldwin static __inline u_int 1973c2bc2bfSJohn Baldwin atomic_fetchadd_int(volatile u_int *p, u_int v) 1983c2bc2bfSJohn Baldwin { 1993c2bc2bfSJohn Baldwin 2003c2bc2bfSJohn Baldwin __asm __volatile( 2017e4277e5SBruce Evans " " MPLOCKED " " 2023c2bc2bfSJohn Baldwin " xaddl %0,%1 ; " 2033c2bc2bfSJohn Baldwin "# atomic_fetchadd_int" 204ee93d117SJung-uk Kim : "+r" (v), /* 0 */ 205fe94be3dSJung-uk Kim "+m" (*p) /* 1 */ 206fe94be3dSJung-uk Kim : : "cc"); 2073c2bc2bfSJohn Baldwin return (v); 2083c2bc2bfSJohn Baldwin } 2093c2bc2bfSJohn Baldwin 2108a1ee2d3SJung-uk Kim static __inline int 2118a1ee2d3SJung-uk Kim atomic_testandset_int(volatile u_int *p, u_int v) 2128a1ee2d3SJung-uk Kim { 2138a1ee2d3SJung-uk Kim u_char res; 2148a1ee2d3SJung-uk Kim 2158a1ee2d3SJung-uk Kim __asm __volatile( 2168a1ee2d3SJung-uk Kim " " MPLOCKED " " 2178a1ee2d3SJung-uk Kim " btsl %2,%1 ; " 2188a1ee2d3SJung-uk Kim " setc %0 ; " 2198a1ee2d3SJung-uk Kim "# atomic_testandset_int" 2208a1ee2d3SJung-uk Kim : "=q" (res), /* 0 */ 2218a1ee2d3SJung-uk Kim "+m" (*p) /* 1 */ 2228a1ee2d3SJung-uk Kim : "Ir" (v & 0x1f) /* 2 */ 2238a1ee2d3SJung-uk Kim : "cc"); 2248a1ee2d3SJung-uk Kim return (res); 2258a1ee2d3SJung-uk Kim } 2268a1ee2d3SJung-uk Kim 227fa9f322dSKonstantin Belousov /* 228fa9f322dSKonstantin Belousov * We assume that a = b will do atomic loads and stores. Due to the 229fa9f322dSKonstantin Belousov * IA32 memory model, a simple store guarantees release semantics. 230fa9f322dSKonstantin Belousov * 231*7626d062SKonstantin Belousov * However, a load may pass a store if they are performed on distinct 232*7626d062SKonstantin Belousov * addresses, so for atomic_load_acq we introduce a Store/Load barrier 233*7626d062SKonstantin Belousov * before the load in SMP kernels. We use "lock addl $0,mem", as 234*7626d062SKonstantin Belousov * recommended by the AMD Software Optimization Guide, and not mfence. 235*7626d062SKonstantin Belousov * In the kernel, we use a private per-cpu cache line as the target 236*7626d062SKonstantin Belousov * for the locked addition, to avoid introducing false data 237*7626d062SKonstantin Belousov * dependencies. In userspace, a word at the top of the stack is 238*7626d062SKonstantin Belousov * utilized. 239*7626d062SKonstantin Belousov * 240*7626d062SKonstantin Belousov * For UP kernels, however, the memory of the single processor is 241*7626d062SKonstantin Belousov * always consistent, so we only need to stop the compiler from 242*7626d062SKonstantin Belousov * reordering accesses in a way that violates the semantics of acquire 243*7626d062SKonstantin Belousov * and release. 244fa9f322dSKonstantin Belousov */ 245*7626d062SKonstantin Belousov #if defined(_KERNEL) 246fa9f322dSKonstantin Belousov 247*7626d062SKonstantin Belousov /* 248*7626d062SKonstantin Belousov * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf). 249*7626d062SKonstantin Belousov * 250*7626d062SKonstantin Belousov * The open-coded number is used instead of the symbolic expression to 251*7626d062SKonstantin Belousov * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers. 252*7626d062SKonstantin Belousov * An assertion in i386/vm_machdep.c ensures that the value is correct. 253*7626d062SKonstantin Belousov */ 254*7626d062SKonstantin Belousov #define OFFSETOF_MONITORBUF 0x180 2554c5aee92SMark Murray 256*7626d062SKonstantin Belousov #if defined(SMP) 257*7626d062SKonstantin Belousov static __inline void 258*7626d062SKonstantin Belousov __storeload_barrier(void) 259*7626d062SKonstantin Belousov { 2604c5aee92SMark Murray 261*7626d062SKonstantin Belousov __asm __volatile("lock; addl $0,%%fs:%0" 262*7626d062SKonstantin Belousov : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc"); 263*7626d062SKonstantin Belousov } 264*7626d062SKonstantin Belousov #else /* _KERNEL && UP */ 265*7626d062SKonstantin Belousov static __inline void 266*7626d062SKonstantin Belousov __storeload_barrier(void) 267*7626d062SKonstantin Belousov { 268ccbdd9eeSJohn Baldwin 269*7626d062SKonstantin Belousov __compiler_membar(); 270*7626d062SKonstantin Belousov } 271*7626d062SKonstantin Belousov #endif /* SMP */ 272*7626d062SKonstantin Belousov #else /* !_KERNEL */ 273*7626d062SKonstantin Belousov static __inline void 274*7626d062SKonstantin Belousov __storeload_barrier(void) 275*7626d062SKonstantin Belousov { 276*7626d062SKonstantin Belousov 277*7626d062SKonstantin Belousov __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc"); 278*7626d062SKonstantin Belousov } 279*7626d062SKonstantin Belousov #endif /* _KERNEL*/ 280*7626d062SKonstantin Belousov 281*7626d062SKonstantin Belousov #define ATOMIC_LOAD(TYPE) \ 2829d979d89SJohn Baldwin static __inline u_##TYPE \ 2839d979d89SJohn Baldwin atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 2849d979d89SJohn Baldwin { \ 2859d979d89SJohn Baldwin u_##TYPE res; \ 2869d979d89SJohn Baldwin \ 287*7626d062SKonstantin Belousov __storeload_barrier(); \ 288*7626d062SKonstantin Belousov res = *p; \ 289*7626d062SKonstantin Belousov __compiler_membar(); \ 2909d979d89SJohn Baldwin return (res); \ 2919d979d89SJohn Baldwin } \ 2926d800f89SBruce Evans struct __hack 2934c5aee92SMark Murray 294*7626d062SKonstantin Belousov #define ATOMIC_STORE(TYPE) \ 295*7626d062SKonstantin Belousov static __inline void \ 296*7626d062SKonstantin Belousov atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \ 297*7626d062SKonstantin Belousov { \ 298*7626d062SKonstantin Belousov \ 299*7626d062SKonstantin Belousov __compiler_membar(); \ 300*7626d062SKonstantin Belousov *p = v; \ 301*7626d062SKonstantin Belousov } \ 302*7626d062SKonstantin Belousov struct __hack 3034c5aee92SMark Murray 3043264fd70SJung-uk Kim #ifdef _KERNEL 3053264fd70SJung-uk Kim 3063264fd70SJung-uk Kim #ifdef WANT_FUNCTIONS 3075188b5f3SJung-uk Kim int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t); 3085188b5f3SJung-uk Kim int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t); 3093264fd70SJung-uk Kim uint64_t atomic_load_acq_64_i386(volatile uint64_t *); 3103264fd70SJung-uk Kim uint64_t atomic_load_acq_64_i586(volatile uint64_t *); 3113264fd70SJung-uk Kim void atomic_store_rel_64_i386(volatile uint64_t *, uint64_t); 3123264fd70SJung-uk Kim void atomic_store_rel_64_i586(volatile uint64_t *, uint64_t); 3135188b5f3SJung-uk Kim uint64_t atomic_swap_64_i386(volatile uint64_t *, uint64_t); 3145188b5f3SJung-uk Kim uint64_t atomic_swap_64_i586(volatile uint64_t *, uint64_t); 3153264fd70SJung-uk Kim #endif 3163264fd70SJung-uk Kim 3173264fd70SJung-uk Kim /* I486 does not support SMP or CMPXCHG8B. */ 3185188b5f3SJung-uk Kim static __inline int 3195188b5f3SJung-uk Kim atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src) 3205188b5f3SJung-uk Kim { 3215188b5f3SJung-uk Kim volatile uint32_t *p; 3225188b5f3SJung-uk Kim u_char res; 3235188b5f3SJung-uk Kim 3245188b5f3SJung-uk Kim p = (volatile uint32_t *)dst; 3255188b5f3SJung-uk Kim __asm __volatile( 3265188b5f3SJung-uk Kim " pushfl ; " 3275188b5f3SJung-uk Kim " cli ; " 3285188b5f3SJung-uk Kim " xorl %1,%%eax ; " 3295188b5f3SJung-uk Kim " xorl %2,%%edx ; " 3305188b5f3SJung-uk Kim " orl %%edx,%%eax ; " 3315188b5f3SJung-uk Kim " jne 1f ; " 3325188b5f3SJung-uk Kim " movl %4,%1 ; " 3335188b5f3SJung-uk Kim " movl %5,%2 ; " 3345188b5f3SJung-uk Kim "1: " 3355188b5f3SJung-uk Kim " sete %3 ; " 3365188b5f3SJung-uk Kim " popfl" 3375188b5f3SJung-uk Kim : "+A" (expect), /* 0 */ 3385188b5f3SJung-uk Kim "+m" (*p), /* 1 */ 3395188b5f3SJung-uk Kim "+m" (*(p + 1)), /* 2 */ 3405188b5f3SJung-uk Kim "=q" (res) /* 3 */ 3415188b5f3SJung-uk Kim : "r" ((uint32_t)src), /* 4 */ 3425188b5f3SJung-uk Kim "r" ((uint32_t)(src >> 32)) /* 5 */ 3435188b5f3SJung-uk Kim : "memory", "cc"); 3445188b5f3SJung-uk Kim return (res); 3455188b5f3SJung-uk Kim } 3465188b5f3SJung-uk Kim 3473264fd70SJung-uk Kim static __inline uint64_t 3483264fd70SJung-uk Kim atomic_load_acq_64_i386(volatile uint64_t *p) 3493264fd70SJung-uk Kim { 3503264fd70SJung-uk Kim volatile uint32_t *q; 3513264fd70SJung-uk Kim uint64_t res; 3523264fd70SJung-uk Kim 3533264fd70SJung-uk Kim q = (volatile uint32_t *)p; 3543264fd70SJung-uk Kim __asm __volatile( 3553264fd70SJung-uk Kim " pushfl ; " 3563264fd70SJung-uk Kim " cli ; " 3573264fd70SJung-uk Kim " movl %1,%%eax ; " 3583264fd70SJung-uk Kim " movl %2,%%edx ; " 3593264fd70SJung-uk Kim " popfl" 3603264fd70SJung-uk Kim : "=&A" (res) /* 0 */ 3613264fd70SJung-uk Kim : "m" (*q), /* 1 */ 3623264fd70SJung-uk Kim "m" (*(q + 1)) /* 2 */ 3633264fd70SJung-uk Kim : "memory"); 3643264fd70SJung-uk Kim return (res); 3653264fd70SJung-uk Kim } 3663264fd70SJung-uk Kim 3673264fd70SJung-uk Kim static __inline void 3683264fd70SJung-uk Kim atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v) 3693264fd70SJung-uk Kim { 3703264fd70SJung-uk Kim volatile uint32_t *q; 3713264fd70SJung-uk Kim 3723264fd70SJung-uk Kim q = (volatile uint32_t *)p; 3733264fd70SJung-uk Kim __asm __volatile( 3743264fd70SJung-uk Kim " pushfl ; " 3753264fd70SJung-uk Kim " cli ; " 3763264fd70SJung-uk Kim " movl %%eax,%0 ; " 3773264fd70SJung-uk Kim " movl %%edx,%1 ; " 3783264fd70SJung-uk Kim " popfl" 3793264fd70SJung-uk Kim : "=m" (*q), /* 0 */ 3803264fd70SJung-uk Kim "=m" (*(q + 1)) /* 1 */ 3813264fd70SJung-uk Kim : "A" (v) /* 2 */ 3823264fd70SJung-uk Kim : "memory"); 3833264fd70SJung-uk Kim } 3843264fd70SJung-uk Kim 3853264fd70SJung-uk Kim static __inline uint64_t 3865188b5f3SJung-uk Kim atomic_swap_64_i386(volatile uint64_t *p, uint64_t v) 3875188b5f3SJung-uk Kim { 3885188b5f3SJung-uk Kim volatile uint32_t *q; 3895188b5f3SJung-uk Kim uint64_t res; 3905188b5f3SJung-uk Kim 3915188b5f3SJung-uk Kim q = (volatile uint32_t *)p; 3925188b5f3SJung-uk Kim __asm __volatile( 3935188b5f3SJung-uk Kim " pushfl ; " 3945188b5f3SJung-uk Kim " cli ; " 3955188b5f3SJung-uk Kim " movl %1,%%eax ; " 3965188b5f3SJung-uk Kim " movl %2,%%edx ; " 3975188b5f3SJung-uk Kim " movl %4,%2 ; " 3985188b5f3SJung-uk Kim " movl %3,%1 ; " 3995188b5f3SJung-uk Kim " popfl" 4005188b5f3SJung-uk Kim : "=&A" (res), /* 0 */ 4015188b5f3SJung-uk Kim "+m" (*q), /* 1 */ 4025188b5f3SJung-uk Kim "+m" (*(q + 1)) /* 2 */ 4035188b5f3SJung-uk Kim : "r" ((uint32_t)v), /* 3 */ 4045188b5f3SJung-uk Kim "r" ((uint32_t)(v >> 32))); /* 4 */ 4055188b5f3SJung-uk Kim return (res); 4065188b5f3SJung-uk Kim } 4075188b5f3SJung-uk Kim 4085188b5f3SJung-uk Kim static __inline int 4095188b5f3SJung-uk Kim atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src) 4105188b5f3SJung-uk Kim { 4115188b5f3SJung-uk Kim u_char res; 4125188b5f3SJung-uk Kim 4135188b5f3SJung-uk Kim __asm __volatile( 4145188b5f3SJung-uk Kim " " MPLOCKED " " 4155188b5f3SJung-uk Kim " cmpxchg8b %1 ; " 4165188b5f3SJung-uk Kim " sete %0" 4175188b5f3SJung-uk Kim : "=q" (res), /* 0 */ 4185188b5f3SJung-uk Kim "+m" (*dst), /* 1 */ 4195188b5f3SJung-uk Kim "+A" (expect) /* 2 */ 4205188b5f3SJung-uk Kim : "b" ((uint32_t)src), /* 3 */ 4215188b5f3SJung-uk Kim "c" ((uint32_t)(src >> 32)) /* 4 */ 4225188b5f3SJung-uk Kim : "memory", "cc"); 4235188b5f3SJung-uk Kim return (res); 4245188b5f3SJung-uk Kim } 4255188b5f3SJung-uk Kim 4265188b5f3SJung-uk Kim static __inline uint64_t 4273264fd70SJung-uk Kim atomic_load_acq_64_i586(volatile uint64_t *p) 4283264fd70SJung-uk Kim { 4293264fd70SJung-uk Kim uint64_t res; 4303264fd70SJung-uk Kim 4313264fd70SJung-uk Kim __asm __volatile( 4323264fd70SJung-uk Kim " movl %%ebx,%%eax ; " 4333264fd70SJung-uk Kim " movl %%ecx,%%edx ; " 4343264fd70SJung-uk Kim " " MPLOCKED " " 4353264fd70SJung-uk Kim " cmpxchg8b %1" 4363264fd70SJung-uk Kim : "=&A" (res), /* 0 */ 4373264fd70SJung-uk Kim "+m" (*p) /* 1 */ 4383264fd70SJung-uk Kim : : "memory", "cc"); 4393264fd70SJung-uk Kim return (res); 4403264fd70SJung-uk Kim } 4413264fd70SJung-uk Kim 4423264fd70SJung-uk Kim static __inline void 4433264fd70SJung-uk Kim atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v) 4443264fd70SJung-uk Kim { 4453264fd70SJung-uk Kim 4463264fd70SJung-uk Kim __asm __volatile( 4473264fd70SJung-uk Kim " movl %%eax,%%ebx ; " 4483264fd70SJung-uk Kim " movl %%edx,%%ecx ; " 4493264fd70SJung-uk Kim "1: " 4503264fd70SJung-uk Kim " " MPLOCKED " " 4513264fd70SJung-uk Kim " cmpxchg8b %0 ; " 4523264fd70SJung-uk Kim " jne 1b" 4533264fd70SJung-uk Kim : "+m" (*p), /* 0 */ 4543264fd70SJung-uk Kim "+A" (v) /* 1 */ 4553264fd70SJung-uk Kim : : "ebx", "ecx", "memory", "cc"); 4563264fd70SJung-uk Kim } 4573264fd70SJung-uk Kim 4583264fd70SJung-uk Kim static __inline uint64_t 4595188b5f3SJung-uk Kim atomic_swap_64_i586(volatile uint64_t *p, uint64_t v) 4605188b5f3SJung-uk Kim { 4615188b5f3SJung-uk Kim 4625188b5f3SJung-uk Kim __asm __volatile( 4635188b5f3SJung-uk Kim " movl %%eax,%%ebx ; " 4645188b5f3SJung-uk Kim " movl %%edx,%%ecx ; " 4655188b5f3SJung-uk Kim "1: " 4665188b5f3SJung-uk Kim " " MPLOCKED " " 4675188b5f3SJung-uk Kim " cmpxchg8b %0 ; " 4685188b5f3SJung-uk Kim " jne 1b" 4695188b5f3SJung-uk Kim : "+m" (*p), /* 0 */ 4705188b5f3SJung-uk Kim "+A" (v) /* 1 */ 4715188b5f3SJung-uk Kim : : "ebx", "ecx", "memory", "cc"); 4725188b5f3SJung-uk Kim return (v); 4735188b5f3SJung-uk Kim } 4745188b5f3SJung-uk Kim 4755188b5f3SJung-uk Kim static __inline int 4765188b5f3SJung-uk Kim atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src) 4775188b5f3SJung-uk Kim { 4785188b5f3SJung-uk Kim 4795188b5f3SJung-uk Kim if ((cpu_feature & CPUID_CX8) == 0) 4805188b5f3SJung-uk Kim return (atomic_cmpset_64_i386(dst, expect, src)); 4815188b5f3SJung-uk Kim else 4825188b5f3SJung-uk Kim return (atomic_cmpset_64_i586(dst, expect, src)); 4835188b5f3SJung-uk Kim } 4845188b5f3SJung-uk Kim 4855188b5f3SJung-uk Kim static __inline uint64_t 4863264fd70SJung-uk Kim atomic_load_acq_64(volatile uint64_t *p) 4873264fd70SJung-uk Kim { 4883264fd70SJung-uk Kim 4893264fd70SJung-uk Kim if ((cpu_feature & CPUID_CX8) == 0) 4903264fd70SJung-uk Kim return (atomic_load_acq_64_i386(p)); 4913264fd70SJung-uk Kim else 4923264fd70SJung-uk Kim return (atomic_load_acq_64_i586(p)); 4933264fd70SJung-uk Kim } 4943264fd70SJung-uk Kim 4953264fd70SJung-uk Kim static __inline void 4963264fd70SJung-uk Kim atomic_store_rel_64(volatile uint64_t *p, uint64_t v) 4973264fd70SJung-uk Kim { 4983264fd70SJung-uk Kim 4993264fd70SJung-uk Kim if ((cpu_feature & CPUID_CX8) == 0) 5003264fd70SJung-uk Kim atomic_store_rel_64_i386(p, v); 5013264fd70SJung-uk Kim else 5023264fd70SJung-uk Kim atomic_store_rel_64_i586(p, v); 5033264fd70SJung-uk Kim } 5043264fd70SJung-uk Kim 5055188b5f3SJung-uk Kim static __inline uint64_t 5065188b5f3SJung-uk Kim atomic_swap_64(volatile uint64_t *p, uint64_t v) 5075188b5f3SJung-uk Kim { 5085188b5f3SJung-uk Kim 5095188b5f3SJung-uk Kim if ((cpu_feature & CPUID_CX8) == 0) 5105188b5f3SJung-uk Kim return (atomic_swap_64_i386(p, v)); 5115188b5f3SJung-uk Kim else 5125188b5f3SJung-uk Kim return (atomic_swap_64_i586(p, v)); 5135188b5f3SJung-uk Kim } 5145188b5f3SJung-uk Kim 5153264fd70SJung-uk Kim #endif /* _KERNEL */ 5163264fd70SJung-uk Kim 51748281036SJohn Baldwin #endif /* KLD_MODULE || !__GNUCLIKE_ASM */ 5188a6b1c8fSJohn Baldwin 5198306a37bSMark Murray ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); 5208306a37bSMark Murray ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); 5218306a37bSMark Murray ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); 5228306a37bSMark Murray ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v); 5238a6b1c8fSJohn Baldwin 5248306a37bSMark Murray ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v); 5258306a37bSMark Murray ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v); 5268306a37bSMark Murray ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v); 5278306a37bSMark Murray ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v); 5288a6b1c8fSJohn Baldwin 5298306a37bSMark Murray ATOMIC_ASM(set, int, "orl %1,%0", "ir", v); 5308306a37bSMark Murray ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v); 5318306a37bSMark Murray ATOMIC_ASM(add, int, "addl %1,%0", "ir", v); 5328306a37bSMark Murray ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v); 5338a6b1c8fSJohn Baldwin 5348306a37bSMark Murray ATOMIC_ASM(set, long, "orl %1,%0", "ir", v); 5358306a37bSMark Murray ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v); 5368306a37bSMark Murray ATOMIC_ASM(add, long, "addl %1,%0", "ir", v); 5378306a37bSMark Murray ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v); 5389d979d89SJohn Baldwin 539*7626d062SKonstantin Belousov #define ATOMIC_LOADSTORE(TYPE) \ 540*7626d062SKonstantin Belousov ATOMIC_LOAD(TYPE); \ 541*7626d062SKonstantin Belousov ATOMIC_STORE(TYPE) 542fa9f322dSKonstantin Belousov 543*7626d062SKonstantin Belousov ATOMIC_LOADSTORE(char); 544*7626d062SKonstantin Belousov ATOMIC_LOADSTORE(short); 545*7626d062SKonstantin Belousov ATOMIC_LOADSTORE(int); 546*7626d062SKonstantin Belousov ATOMIC_LOADSTORE(long); 547ccbdd9eeSJohn Baldwin 5488a6b1c8fSJohn Baldwin #undef ATOMIC_ASM 549fa9f322dSKonstantin Belousov #undef ATOMIC_LOAD 550fa9f322dSKonstantin Belousov #undef ATOMIC_STORE 551*7626d062SKonstantin Belousov #undef ATOMIC_LOADSTORE 552ccbdd9eeSJohn Baldwin 553f28e1c8fSBruce Evans #ifndef WANT_FUNCTIONS 55448281036SJohn Baldwin 55548281036SJohn Baldwin static __inline int 556065b12a7SPoul-Henning Kamp atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src) 55748281036SJohn Baldwin { 55848281036SJohn Baldwin 559065b12a7SPoul-Henning Kamp return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect, 56048281036SJohn Baldwin (u_int)src)); 56148281036SJohn Baldwin } 56248281036SJohn Baldwin 5636eb4157fSPawel Jakub Dawidek static __inline u_long 5646eb4157fSPawel Jakub Dawidek atomic_fetchadd_long(volatile u_long *p, u_long v) 5656eb4157fSPawel Jakub Dawidek { 5666eb4157fSPawel Jakub Dawidek 5676eb4157fSPawel Jakub Dawidek return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v)); 5686eb4157fSPawel Jakub Dawidek } 5696eb4157fSPawel Jakub Dawidek 5708a1ee2d3SJung-uk Kim static __inline int 5718a1ee2d3SJung-uk Kim atomic_testandset_long(volatile u_long *p, u_int v) 5728a1ee2d3SJung-uk Kim { 5738a1ee2d3SJung-uk Kim 5748a1ee2d3SJung-uk Kim return (atomic_testandset_int((volatile u_int *)p, v)); 5758a1ee2d3SJung-uk Kim } 5768a1ee2d3SJung-uk Kim 5778a1ee2d3SJung-uk Kim /* Read the current value and store a new value in the destination. */ 57848281036SJohn Baldwin #ifdef __GNUCLIKE_ASM 57948281036SJohn Baldwin 58048281036SJohn Baldwin static __inline u_int 5818a1ee2d3SJung-uk Kim atomic_swap_int(volatile u_int *p, u_int v) 58248281036SJohn Baldwin { 58348281036SJohn Baldwin 58448281036SJohn Baldwin __asm __volatile( 58548281036SJohn Baldwin " xchgl %1,%0 ; " 5868a1ee2d3SJung-uk Kim "# atomic_swap_int" 5878a1ee2d3SJung-uk Kim : "+r" (v), /* 0 */ 588fe94be3dSJung-uk Kim "+m" (*p)); /* 1 */ 5898a1ee2d3SJung-uk Kim return (v); 59048281036SJohn Baldwin } 59148281036SJohn Baldwin 59248281036SJohn Baldwin static __inline u_long 5938a1ee2d3SJung-uk Kim atomic_swap_long(volatile u_long *p, u_long v) 59448281036SJohn Baldwin { 59548281036SJohn Baldwin 5968a1ee2d3SJung-uk Kim return (atomic_swap_int((volatile u_int *)p, (u_int)v)); 59748281036SJohn Baldwin } 59848281036SJohn Baldwin 59948281036SJohn Baldwin #else /* !__GNUCLIKE_ASM */ 60048281036SJohn Baldwin 6018a1ee2d3SJung-uk Kim u_int atomic_swap_int(volatile u_int *p, u_int v); 6028a1ee2d3SJung-uk Kim u_long atomic_swap_long(volatile u_long *p, u_long v); 60348281036SJohn Baldwin 60448281036SJohn Baldwin #endif /* __GNUCLIKE_ASM */ 60548281036SJohn Baldwin 60686d2e48cSAttilio Rao #define atomic_set_acq_char atomic_set_barr_char 60786d2e48cSAttilio Rao #define atomic_set_rel_char atomic_set_barr_char 60886d2e48cSAttilio Rao #define atomic_clear_acq_char atomic_clear_barr_char 60986d2e48cSAttilio Rao #define atomic_clear_rel_char atomic_clear_barr_char 61086d2e48cSAttilio Rao #define atomic_add_acq_char atomic_add_barr_char 61186d2e48cSAttilio Rao #define atomic_add_rel_char atomic_add_barr_char 61286d2e48cSAttilio Rao #define atomic_subtract_acq_char atomic_subtract_barr_char 61386d2e48cSAttilio Rao #define atomic_subtract_rel_char atomic_subtract_barr_char 6148a6b1c8fSJohn Baldwin 61586d2e48cSAttilio Rao #define atomic_set_acq_short atomic_set_barr_short 61686d2e48cSAttilio Rao #define atomic_set_rel_short atomic_set_barr_short 61786d2e48cSAttilio Rao #define atomic_clear_acq_short atomic_clear_barr_short 61886d2e48cSAttilio Rao #define atomic_clear_rel_short atomic_clear_barr_short 61986d2e48cSAttilio Rao #define atomic_add_acq_short atomic_add_barr_short 62086d2e48cSAttilio Rao #define atomic_add_rel_short atomic_add_barr_short 62186d2e48cSAttilio Rao #define atomic_subtract_acq_short atomic_subtract_barr_short 62286d2e48cSAttilio Rao #define atomic_subtract_rel_short atomic_subtract_barr_short 6238a6b1c8fSJohn Baldwin 62486d2e48cSAttilio Rao #define atomic_set_acq_int atomic_set_barr_int 62586d2e48cSAttilio Rao #define atomic_set_rel_int atomic_set_barr_int 62686d2e48cSAttilio Rao #define atomic_clear_acq_int atomic_clear_barr_int 62786d2e48cSAttilio Rao #define atomic_clear_rel_int atomic_clear_barr_int 62886d2e48cSAttilio Rao #define atomic_add_acq_int atomic_add_barr_int 62986d2e48cSAttilio Rao #define atomic_add_rel_int atomic_add_barr_int 63086d2e48cSAttilio Rao #define atomic_subtract_acq_int atomic_subtract_barr_int 63186d2e48cSAttilio Rao #define atomic_subtract_rel_int atomic_subtract_barr_int 6328448afceSAttilio Rao #define atomic_cmpset_acq_int atomic_cmpset_int 6338448afceSAttilio Rao #define atomic_cmpset_rel_int atomic_cmpset_int 6348a6b1c8fSJohn Baldwin 63586d2e48cSAttilio Rao #define atomic_set_acq_long atomic_set_barr_long 63686d2e48cSAttilio Rao #define atomic_set_rel_long atomic_set_barr_long 63786d2e48cSAttilio Rao #define atomic_clear_acq_long atomic_clear_barr_long 63886d2e48cSAttilio Rao #define atomic_clear_rel_long atomic_clear_barr_long 63986d2e48cSAttilio Rao #define atomic_add_acq_long atomic_add_barr_long 64086d2e48cSAttilio Rao #define atomic_add_rel_long atomic_add_barr_long 64186d2e48cSAttilio Rao #define atomic_subtract_acq_long atomic_subtract_barr_long 64286d2e48cSAttilio Rao #define atomic_subtract_rel_long atomic_subtract_barr_long 6438448afceSAttilio Rao #define atomic_cmpset_acq_long atomic_cmpset_long 6448448afceSAttilio Rao #define atomic_cmpset_rel_long atomic_cmpset_long 6458a6b1c8fSJohn Baldwin 6468a1ee2d3SJung-uk Kim #define atomic_readandclear_int(p) atomic_swap_int(p, 0) 6478a1ee2d3SJung-uk Kim #define atomic_readandclear_long(p) atomic_swap_long(p, 0) 6488a1ee2d3SJung-uk Kim 64948281036SJohn Baldwin /* Operations on 8-bit bytes. */ 6508a6b1c8fSJohn Baldwin #define atomic_set_8 atomic_set_char 6518a6b1c8fSJohn Baldwin #define atomic_set_acq_8 atomic_set_acq_char 6528a6b1c8fSJohn Baldwin #define atomic_set_rel_8 atomic_set_rel_char 6538a6b1c8fSJohn Baldwin #define atomic_clear_8 atomic_clear_char 6548a6b1c8fSJohn Baldwin #define atomic_clear_acq_8 atomic_clear_acq_char 6558a6b1c8fSJohn Baldwin #define atomic_clear_rel_8 atomic_clear_rel_char 6568a6b1c8fSJohn Baldwin #define atomic_add_8 atomic_add_char 6578a6b1c8fSJohn Baldwin #define atomic_add_acq_8 atomic_add_acq_char 6588a6b1c8fSJohn Baldwin #define atomic_add_rel_8 atomic_add_rel_char 6598a6b1c8fSJohn Baldwin #define atomic_subtract_8 atomic_subtract_char 6608a6b1c8fSJohn Baldwin #define atomic_subtract_acq_8 atomic_subtract_acq_char 6618a6b1c8fSJohn Baldwin #define atomic_subtract_rel_8 atomic_subtract_rel_char 6628a6b1c8fSJohn Baldwin #define atomic_load_acq_8 atomic_load_acq_char 6638a6b1c8fSJohn Baldwin #define atomic_store_rel_8 atomic_store_rel_char 6648a6b1c8fSJohn Baldwin 66548281036SJohn Baldwin /* Operations on 16-bit words. */ 6668a6b1c8fSJohn Baldwin #define atomic_set_16 atomic_set_short 6678a6b1c8fSJohn Baldwin #define atomic_set_acq_16 atomic_set_acq_short 6688a6b1c8fSJohn Baldwin #define atomic_set_rel_16 atomic_set_rel_short 6698a6b1c8fSJohn Baldwin #define atomic_clear_16 atomic_clear_short 6708a6b1c8fSJohn Baldwin #define atomic_clear_acq_16 atomic_clear_acq_short 6718a6b1c8fSJohn Baldwin #define atomic_clear_rel_16 atomic_clear_rel_short 6728a6b1c8fSJohn Baldwin #define atomic_add_16 atomic_add_short 6738a6b1c8fSJohn Baldwin #define atomic_add_acq_16 atomic_add_acq_short 6748a6b1c8fSJohn Baldwin #define atomic_add_rel_16 atomic_add_rel_short 6758a6b1c8fSJohn Baldwin #define atomic_subtract_16 atomic_subtract_short 6768a6b1c8fSJohn Baldwin #define atomic_subtract_acq_16 atomic_subtract_acq_short 6778a6b1c8fSJohn Baldwin #define atomic_subtract_rel_16 atomic_subtract_rel_short 6788a6b1c8fSJohn Baldwin #define atomic_load_acq_16 atomic_load_acq_short 6798a6b1c8fSJohn Baldwin #define atomic_store_rel_16 atomic_store_rel_short 6808a6b1c8fSJohn Baldwin 68148281036SJohn Baldwin /* Operations on 32-bit double words. */ 6828a6b1c8fSJohn Baldwin #define atomic_set_32 atomic_set_int 6838a6b1c8fSJohn Baldwin #define atomic_set_acq_32 atomic_set_acq_int 6848a6b1c8fSJohn Baldwin #define atomic_set_rel_32 atomic_set_rel_int 6858a6b1c8fSJohn Baldwin #define atomic_clear_32 atomic_clear_int 6868a6b1c8fSJohn Baldwin #define atomic_clear_acq_32 atomic_clear_acq_int 6878a6b1c8fSJohn Baldwin #define atomic_clear_rel_32 atomic_clear_rel_int 6888a6b1c8fSJohn Baldwin #define atomic_add_32 atomic_add_int 6898a6b1c8fSJohn Baldwin #define atomic_add_acq_32 atomic_add_acq_int 6908a6b1c8fSJohn Baldwin #define atomic_add_rel_32 atomic_add_rel_int 6918a6b1c8fSJohn Baldwin #define atomic_subtract_32 atomic_subtract_int 6928a6b1c8fSJohn Baldwin #define atomic_subtract_acq_32 atomic_subtract_acq_int 6938a6b1c8fSJohn Baldwin #define atomic_subtract_rel_32 atomic_subtract_rel_int 6948a6b1c8fSJohn Baldwin #define atomic_load_acq_32 atomic_load_acq_int 6958a6b1c8fSJohn Baldwin #define atomic_store_rel_32 atomic_store_rel_int 6968a6b1c8fSJohn Baldwin #define atomic_cmpset_32 atomic_cmpset_int 6978a6b1c8fSJohn Baldwin #define atomic_cmpset_acq_32 atomic_cmpset_acq_int 6988a6b1c8fSJohn Baldwin #define atomic_cmpset_rel_32 atomic_cmpset_rel_int 6998a1ee2d3SJung-uk Kim #define atomic_swap_32 atomic_swap_int 7008a6b1c8fSJohn Baldwin #define atomic_readandclear_32 atomic_readandclear_int 7013c2bc2bfSJohn Baldwin #define atomic_fetchadd_32 atomic_fetchadd_int 7028a1ee2d3SJung-uk Kim #define atomic_testandset_32 atomic_testandset_int 7038a6b1c8fSJohn Baldwin 70448281036SJohn Baldwin /* Operations on pointers. */ 7056f0f8ccaSDag-Erling Smørgrav #define atomic_set_ptr(p, v) \ 7066f0f8ccaSDag-Erling Smørgrav atomic_set_int((volatile u_int *)(p), (u_int)(v)) 7076f0f8ccaSDag-Erling Smørgrav #define atomic_set_acq_ptr(p, v) \ 7086f0f8ccaSDag-Erling Smørgrav atomic_set_acq_int((volatile u_int *)(p), (u_int)(v)) 7096f0f8ccaSDag-Erling Smørgrav #define atomic_set_rel_ptr(p, v) \ 7106f0f8ccaSDag-Erling Smørgrav atomic_set_rel_int((volatile u_int *)(p), (u_int)(v)) 7116f0f8ccaSDag-Erling Smørgrav #define atomic_clear_ptr(p, v) \ 7126f0f8ccaSDag-Erling Smørgrav atomic_clear_int((volatile u_int *)(p), (u_int)(v)) 7136f0f8ccaSDag-Erling Smørgrav #define atomic_clear_acq_ptr(p, v) \ 7146f0f8ccaSDag-Erling Smørgrav atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v)) 7156f0f8ccaSDag-Erling Smørgrav #define atomic_clear_rel_ptr(p, v) \ 7166f0f8ccaSDag-Erling Smørgrav atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v)) 7176f0f8ccaSDag-Erling Smørgrav #define atomic_add_ptr(p, v) \ 7186f0f8ccaSDag-Erling Smørgrav atomic_add_int((volatile u_int *)(p), (u_int)(v)) 7196f0f8ccaSDag-Erling Smørgrav #define atomic_add_acq_ptr(p, v) \ 7206f0f8ccaSDag-Erling Smørgrav atomic_add_acq_int((volatile u_int *)(p), (u_int)(v)) 7216f0f8ccaSDag-Erling Smørgrav #define atomic_add_rel_ptr(p, v) \ 7226f0f8ccaSDag-Erling Smørgrav atomic_add_rel_int((volatile u_int *)(p), (u_int)(v)) 7236f0f8ccaSDag-Erling Smørgrav #define atomic_subtract_ptr(p, v) \ 7246f0f8ccaSDag-Erling Smørgrav atomic_subtract_int((volatile u_int *)(p), (u_int)(v)) 7256f0f8ccaSDag-Erling Smørgrav #define atomic_subtract_acq_ptr(p, v) \ 7266f0f8ccaSDag-Erling Smørgrav atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v)) 7276f0f8ccaSDag-Erling Smørgrav #define atomic_subtract_rel_ptr(p, v) \ 7286f0f8ccaSDag-Erling Smørgrav atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v)) 7296f0f8ccaSDag-Erling Smørgrav #define atomic_load_acq_ptr(p) \ 7306f0f8ccaSDag-Erling Smørgrav atomic_load_acq_int((volatile u_int *)(p)) 7316f0f8ccaSDag-Erling Smørgrav #define atomic_store_rel_ptr(p, v) \ 7326f0f8ccaSDag-Erling Smørgrav atomic_store_rel_int((volatile u_int *)(p), (v)) 7336f0f8ccaSDag-Erling Smørgrav #define atomic_cmpset_ptr(dst, old, new) \ 7346f0f8ccaSDag-Erling Smørgrav atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new)) 7356f0f8ccaSDag-Erling Smørgrav #define atomic_cmpset_acq_ptr(dst, old, new) \ 7366c296ffaSBruce Evans atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \ 7376c296ffaSBruce Evans (u_int)(new)) 7386f0f8ccaSDag-Erling Smørgrav #define atomic_cmpset_rel_ptr(dst, old, new) \ 7396c296ffaSBruce Evans atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \ 7406c296ffaSBruce Evans (u_int)(new)) 7418a1ee2d3SJung-uk Kim #define atomic_swap_ptr(p, v) \ 7428a1ee2d3SJung-uk Kim atomic_swap_int((volatile u_int *)(p), (u_int)(v)) 7436f0f8ccaSDag-Erling Smørgrav #define atomic_readandclear_ptr(p) \ 7446f0f8ccaSDag-Erling Smørgrav atomic_readandclear_int((volatile u_int *)(p)) 745ccbdd9eeSJohn Baldwin 746f28e1c8fSBruce Evans #endif /* !WANT_FUNCTIONS */ 7476c296ffaSBruce Evans 748069e9bc1SDoug Rabson #endif /* !_MACHINE_ATOMIC_H_ */ 749