1 /*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 #ifndef _MACHINE_ATOMIC_H_ 29 #define _MACHINE_ATOMIC_H_ 30 31 /* 32 * Various simple arithmetic on memory which is atomic in the presence 33 * of interrupts and multiple processors. 34 * 35 * atomic_set_char(P, V) (*(u_char*)(P) |= (V)) 36 * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V)) 37 * atomic_add_char(P, V) (*(u_char*)(P) += (V)) 38 * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V)) 39 * 40 * atomic_set_short(P, V) (*(u_short*)(P) |= (V)) 41 * atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V)) 42 * atomic_add_short(P, V) (*(u_short*)(P) += (V)) 43 * atomic_subtract_short(P, V) (*(u_short*)(P) -= (V)) 44 * 45 * atomic_set_int(P, V) (*(u_int*)(P) |= (V)) 46 * atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V)) 47 * atomic_add_int(P, V) (*(u_int*)(P) += (V)) 48 * atomic_subtract_int(P, V) (*(u_int*)(P) -= (V)) 49 * atomic_readandclear_int(P) (return *(u_int*)P; *(u_int*)P = 0;) 50 * 51 * atomic_set_long(P, V) (*(u_long*)(P) |= (V)) 52 * atomic_clear_long(P, V) (*(u_long*)(P) &= ~(V)) 53 * atomic_add_long(P, V) (*(u_long*)(P) += (V)) 54 * atomic_subtract_long(P, V) (*(u_long*)(P) -= (V)) 55 * atomic_readandclear_long(P) (return *(u_long*)P; *(u_long*)P = 0;) 56 */ 57 58 /* 59 * The above functions are expanded inline in the statically-linked 60 * kernel. Lock prefixes are generated if an SMP kernel is being 61 * built. 62 * 63 * Kernel modules call real functions which are built into the kernel. 64 * This allows kernel modules to be portable between UP and SMP systems. 65 */ 66 #if defined(KLD_MODULE) 67 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 68 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 69 70 int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src); 71 int atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src); 72 73 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 74 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ 75 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 76 77 #else /* !KLD_MODULE */ 78 79 #ifdef __GNUC__ 80 81 /* 82 * For userland, assume the SMP case and use lock prefixes so that 83 * the binaries will run on both types of systems. 84 */ 85 #if defined(SMP) || !defined(_KERNEL) 86 #define MPLOCKED lock ; 87 #else 88 #define MPLOCKED 89 #endif 90 91 /* 92 * The assembly is volatilized to demark potential before-and-after side 93 * effects if an interrupt or SMP collision were to occur. 94 */ 95 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 96 static __inline void \ 97 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 98 { \ 99 __asm __volatile(__XSTRING(MPLOCKED) OP \ 100 : "+m" (*p) \ 101 : CONS (V)); \ 102 } \ 103 struct __hack 104 105 #else /* !__GNUC__ */ 106 107 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 108 extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 109 110 #endif /* __GNUC__ */ 111 112 /* 113 * Atomic compare and set, used by the mutex functions 114 * 115 * if (*dst == exp) *dst = src (all 32 bit words) 116 * 117 * Returns 0 on failure, non-zero on success 118 */ 119 120 #if defined(__GNUC__) 121 122 static __inline int 123 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src) 124 { 125 int res = exp; 126 127 __asm __volatile ( 128 " " __XSTRING(MPLOCKED) " " 129 " cmpxchgl %1,%2 ; " 130 " setz %%al ; " 131 " movzbl %%al,%0 ; " 132 "1: " 133 "# atomic_cmpset_int" 134 : "+a" (res) /* 0 (result) */ 135 : "r" (src), /* 1 */ 136 "m" (*(dst)) /* 2 */ 137 : "memory"); 138 139 return (res); 140 } 141 142 static __inline int 143 atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src) 144 { 145 long res = exp; 146 147 __asm __volatile ( 148 " " __XSTRING(MPLOCKED) " " 149 " cmpxchgq %1,%2 ; " 150 " setz %%al ; " 151 " movzbq %%al,%0 ; " 152 "1: " 153 "# atomic_cmpset_long" 154 : "+a" (res) /* 0 (result) */ 155 : "r" (src), /* 1 */ 156 "m" (*(dst)) /* 2 */ 157 : "memory"); 158 159 return (res); 160 } 161 #endif /* defined(__GNUC__) */ 162 163 #if defined(__GNUC__) 164 165 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 166 static __inline u_##TYPE \ 167 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 168 { \ 169 u_##TYPE res; \ 170 \ 171 __asm __volatile(__XSTRING(MPLOCKED) LOP \ 172 : "=a" (res), /* 0 (result) */\ 173 "+m" (*p) /* 1 */ \ 174 : : "memory"); \ 175 \ 176 return (res); \ 177 } \ 178 \ 179 /* \ 180 * The XCHG instruction asserts LOCK automagically. \ 181 */ \ 182 static __inline void \ 183 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 184 { \ 185 __asm __volatile(SOP \ 186 : "+m" (*p), /* 0 */ \ 187 "+r" (v) /* 1 */ \ 188 : : "memory"); \ 189 } \ 190 struct __hack 191 192 #else /* !defined(__GNUC__) */ 193 194 extern int atomic_cmpset_int(volatile u_int *, u_int, u_int); 195 extern int atomic_cmpset_long(volatile u_long *, u_long, u_long); 196 197 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 198 extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ 199 extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 200 201 #endif /* defined(__GNUC__) */ 202 203 #endif /* KLD_MODULE */ 204 205 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); 206 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); 207 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); 208 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v); 209 210 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v); 211 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v); 212 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v); 213 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v); 214 215 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v); 216 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v); 217 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v); 218 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v); 219 220 ATOMIC_ASM(set, long, "orq %1,%0", "ir", v); 221 ATOMIC_ASM(clear, long, "andq %1,%0", "ir", ~v); 222 ATOMIC_ASM(add, long, "addq %1,%0", "ir", v); 223 ATOMIC_ASM(subtract, long, "subq %1,%0", "ir", v); 224 225 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); 226 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0"); 227 ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0"); 228 ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1", "xchgq %1,%0"); 229 230 #undef ATOMIC_ASM 231 #undef ATOMIC_STORE_LOAD 232 233 #define atomic_set_acq_char atomic_set_char 234 #define atomic_set_rel_char atomic_set_char 235 #define atomic_clear_acq_char atomic_clear_char 236 #define atomic_clear_rel_char atomic_clear_char 237 #define atomic_add_acq_char atomic_add_char 238 #define atomic_add_rel_char atomic_add_char 239 #define atomic_subtract_acq_char atomic_subtract_char 240 #define atomic_subtract_rel_char atomic_subtract_char 241 242 #define atomic_set_acq_short atomic_set_short 243 #define atomic_set_rel_short atomic_set_short 244 #define atomic_clear_acq_short atomic_clear_short 245 #define atomic_clear_rel_short atomic_clear_short 246 #define atomic_add_acq_short atomic_add_short 247 #define atomic_add_rel_short atomic_add_short 248 #define atomic_subtract_acq_short atomic_subtract_short 249 #define atomic_subtract_rel_short atomic_subtract_short 250 251 #define atomic_set_acq_int atomic_set_int 252 #define atomic_set_rel_int atomic_set_int 253 #define atomic_clear_acq_int atomic_clear_int 254 #define atomic_clear_rel_int atomic_clear_int 255 #define atomic_add_acq_int atomic_add_int 256 #define atomic_add_rel_int atomic_add_int 257 #define atomic_subtract_acq_int atomic_subtract_int 258 #define atomic_subtract_rel_int atomic_subtract_int 259 #define atomic_cmpset_acq_int atomic_cmpset_int 260 #define atomic_cmpset_rel_int atomic_cmpset_int 261 262 #define atomic_set_acq_long atomic_set_long 263 #define atomic_set_rel_long atomic_set_long 264 #define atomic_clear_acq_long atomic_clear_long 265 #define atomic_clear_rel_long atomic_clear_long 266 #define atomic_add_acq_long atomic_add_long 267 #define atomic_add_rel_long atomic_add_long 268 #define atomic_subtract_acq_long atomic_subtract_long 269 #define atomic_subtract_rel_long atomic_subtract_long 270 271 #define atomic_cmpset_acq_ptr atomic_cmpset_ptr 272 #define atomic_cmpset_rel_ptr atomic_cmpset_ptr 273 274 #define atomic_set_8 atomic_set_char 275 #define atomic_set_acq_8 atomic_set_acq_char 276 #define atomic_set_rel_8 atomic_set_rel_char 277 #define atomic_clear_8 atomic_clear_char 278 #define atomic_clear_acq_8 atomic_clear_acq_char 279 #define atomic_clear_rel_8 atomic_clear_rel_char 280 #define atomic_add_8 atomic_add_char 281 #define atomic_add_acq_8 atomic_add_acq_char 282 #define atomic_add_rel_8 atomic_add_rel_char 283 #define atomic_subtract_8 atomic_subtract_char 284 #define atomic_subtract_acq_8 atomic_subtract_acq_char 285 #define atomic_subtract_rel_8 atomic_subtract_rel_char 286 #define atomic_load_acq_8 atomic_load_acq_char 287 #define atomic_store_rel_8 atomic_store_rel_char 288 289 #define atomic_set_16 atomic_set_short 290 #define atomic_set_acq_16 atomic_set_acq_short 291 #define atomic_set_rel_16 atomic_set_rel_short 292 #define atomic_clear_16 atomic_clear_short 293 #define atomic_clear_acq_16 atomic_clear_acq_short 294 #define atomic_clear_rel_16 atomic_clear_rel_short 295 #define atomic_add_16 atomic_add_short 296 #define atomic_add_acq_16 atomic_add_acq_short 297 #define atomic_add_rel_16 atomic_add_rel_short 298 #define atomic_subtract_16 atomic_subtract_short 299 #define atomic_subtract_acq_16 atomic_subtract_acq_short 300 #define atomic_subtract_rel_16 atomic_subtract_rel_short 301 #define atomic_load_acq_16 atomic_load_acq_short 302 #define atomic_store_rel_16 atomic_store_rel_short 303 304 #define atomic_set_32 atomic_set_int 305 #define atomic_set_acq_32 atomic_set_acq_int 306 #define atomic_set_rel_32 atomic_set_rel_int 307 #define atomic_clear_32 atomic_clear_int 308 #define atomic_clear_acq_32 atomic_clear_acq_int 309 #define atomic_clear_rel_32 atomic_clear_rel_int 310 #define atomic_add_32 atomic_add_int 311 #define atomic_add_acq_32 atomic_add_acq_int 312 #define atomic_add_rel_32 atomic_add_rel_int 313 #define atomic_subtract_32 atomic_subtract_int 314 #define atomic_subtract_acq_32 atomic_subtract_acq_int 315 #define atomic_subtract_rel_32 atomic_subtract_rel_int 316 #define atomic_load_acq_32 atomic_load_acq_int 317 #define atomic_store_rel_32 atomic_store_rel_int 318 #define atomic_cmpset_32 atomic_cmpset_int 319 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int 320 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int 321 #define atomic_readandclear_32 atomic_readandclear_int 322 323 #if !defined(WANT_FUNCTIONS) 324 static __inline int 325 atomic_cmpset_ptr(volatile void *dst, void *exp, void *src) 326 { 327 328 return (atomic_cmpset_long((volatile u_long *)dst, 329 (u_long)exp, (u_long)src)); 330 } 331 332 static __inline void * 333 atomic_load_acq_ptr(volatile void *p) 334 { 335 /* 336 * The apparently-bogus cast to intptr_t in the following is to 337 * avoid a warning from "gcc -Wbad-function-cast". 338 */ 339 return ((void *)(intptr_t)atomic_load_acq_long((volatile u_long *)p)); 340 } 341 342 static __inline void 343 atomic_store_rel_ptr(volatile void *p, void *v) 344 { 345 atomic_store_rel_long((volatile u_long *)p, (u_long)v); 346 } 347 348 #define ATOMIC_PTR(NAME) \ 349 static __inline void \ 350 atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \ 351 { \ 352 atomic_##NAME##_long((volatile u_long *)p, v); \ 353 } \ 354 \ 355 static __inline void \ 356 atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \ 357 { \ 358 atomic_##NAME##_acq_long((volatile u_long *)p, v);\ 359 } \ 360 \ 361 static __inline void \ 362 atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \ 363 { \ 364 atomic_##NAME##_rel_long((volatile u_long *)p, v);\ 365 } 366 367 ATOMIC_PTR(set) 368 ATOMIC_PTR(clear) 369 ATOMIC_PTR(add) 370 ATOMIC_PTR(subtract) 371 372 #undef ATOMIC_PTR 373 374 #if defined(__GNUC__) 375 376 static __inline u_int 377 atomic_readandclear_int(volatile u_int *addr) 378 { 379 u_int result; 380 381 __asm __volatile ( 382 " xorl %0,%0 ; " 383 " xchgl %1,%0 ; " 384 "# atomic_readandclear_int" 385 : "=&r" (result) /* 0 (result) */ 386 : "m" (*addr)); /* 1 (addr) */ 387 388 return (result); 389 } 390 391 static __inline u_long 392 atomic_readandclear_long(volatile u_long *addr) 393 { 394 u_long result; 395 396 __asm __volatile ( 397 " xorq %0,%0 ; " 398 " xchgq %1,%0 ; " 399 "# atomic_readandclear_int" 400 : "=&r" (result) /* 0 (result) */ 401 : "m" (*addr)); /* 1 (addr) */ 402 403 return (result); 404 } 405 406 #else /* !defined(__GNUC__) */ 407 408 extern u_long atomic_readandclear_long(volatile u_long *); 409 extern u_int atomic_readandclear_int(volatile u_int *); 410 411 #endif /* defined(__GNUC__) */ 412 413 #endif /* !defined(WANT_FUNCTIONS) */ 414 #endif /* ! _MACHINE_ATOMIC_H_ */ 415