1 /*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 #ifndef _MACHINE_ATOMIC_H_ 29 #define _MACHINE_ATOMIC_H_ 30 31 #ifndef _SYS_CDEFS_H_ 32 #error this file needs sys/cdefs.h as a prerequisite 33 #endif 34 35 /* 36 * Various simple operations on memory, each of which is atomic in the 37 * presence of interrupts and multiple processors. 38 * 39 * atomic_set_char(P, V) (*(u_char *)(P) |= (V)) 40 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V)) 41 * atomic_add_char(P, V) (*(u_char *)(P) += (V)) 42 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V)) 43 * 44 * atomic_set_short(P, V) (*(u_short *)(P) |= (V)) 45 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V)) 46 * atomic_add_short(P, V) (*(u_short *)(P) += (V)) 47 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V)) 48 * 49 * atomic_set_int(P, V) (*(u_int *)(P) |= (V)) 50 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V)) 51 * atomic_add_int(P, V) (*(u_int *)(P) += (V)) 52 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V)) 53 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;) 54 * 55 * atomic_set_long(P, V) (*(u_long *)(P) |= (V)) 56 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V)) 57 * atomic_add_long(P, V) (*(u_long *)(P) += (V)) 58 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V)) 59 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;) 60 */ 61 62 /* 63 * The above functions are expanded inline in the statically-linked 64 * kernel. Lock prefixes are generated if an SMP kernel is being 65 * built. 66 * 67 * Kernel modules call real functions which are built into the kernel. 68 * This allows kernel modules to be portable between UP and SMP systems. 69 */ 70 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM) 71 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 72 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 73 74 int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src); 75 int atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src); 76 u_int atomic_fetchadd_int(volatile u_int *p, u_int v); 77 u_long atomic_fetchadd_long(volatile u_long *p, u_long v); 78 79 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 80 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ 81 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 82 83 #else /* !KLD_MODULE && __GNUCLIKE_ASM */ 84 85 /* 86 * For userland, always use lock prefixes so that the binaries will run 87 * on both SMP and !SMP systems. 88 */ 89 #if defined(SMP) || !defined(_KERNEL) 90 #define MPLOCKED "lock ; " 91 #else 92 #define MPLOCKED 93 #endif 94 95 /* 96 * The assembly is volatilized to demark potential before-and-after side 97 * effects if an interrupt or SMP collision were to occur. 98 */ 99 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 100 static __inline void \ 101 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 102 { \ 103 __asm __volatile(MPLOCKED OP \ 104 : "=m" (*p) \ 105 : CONS (V), "m" (*p)); \ 106 } \ 107 struct __hack 108 109 /* 110 * Atomic compare and set, used by the mutex functions 111 * 112 * if (*dst == exp) *dst = src (all 32 bit words) 113 * 114 * Returns 0 on failure, non-zero on success 115 */ 116 117 static __inline int 118 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src) 119 { 120 u_char res; 121 122 __asm __volatile( 123 " " MPLOCKED " " 124 " cmpxchgl %2,%1 ; " 125 " sete %0 ; " 126 "1: " 127 "# atomic_cmpset_int" 128 : "=a" (res), /* 0 */ 129 "=m" (*dst) /* 1 */ 130 : "r" (src), /* 2 */ 131 "a" (exp), /* 3 */ 132 "m" (*dst) /* 4 */ 133 : "memory"); 134 135 return (res); 136 } 137 138 static __inline int 139 atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src) 140 { 141 u_char res; 142 143 __asm __volatile( 144 " " MPLOCKED " " 145 " cmpxchgq %2,%1 ; " 146 " sete %0 ; " 147 "1: " 148 "# atomic_cmpset_long" 149 : "=a" (res), /* 0 */ 150 "=m" (*dst) /* 1 */ 151 : "r" (src), /* 2 */ 152 "a" (exp), /* 3 */ 153 "m" (*dst) /* 4 */ 154 : "memory"); 155 156 return (res); 157 } 158 159 /* 160 * Atomically add the value of v to the integer pointed to by p and return 161 * the previous value of *p. 162 */ 163 static __inline u_int 164 atomic_fetchadd_int(volatile u_int *p, u_int v) 165 { 166 167 __asm __volatile( 168 " " MPLOCKED " " 169 " xaddl %0, %1 ; " 170 "# atomic_fetchadd_int" 171 : "+r" (v), /* 0 (result) */ 172 "=m" (*p) /* 1 */ 173 : "m" (*p)); /* 2 */ 174 175 return (v); 176 } 177 178 /* 179 * Atomically add the value of v to the long integer pointed to by p and return 180 * the previous value of *p. 181 */ 182 static __inline u_long 183 atomic_fetchadd_long(volatile u_long *p, u_long v) 184 { 185 186 __asm __volatile( 187 " " MPLOCKED " " 188 " xaddq %0, %1 ; " 189 "# atomic_fetchadd_long" 190 : "+r" (v), /* 0 (result) */ 191 "=m" (*p) /* 1 */ 192 : "m" (*p)); /* 2 */ 193 194 return (v); 195 } 196 197 #if defined(_KERNEL) && !defined(SMP) 198 199 /* 200 * We assume that a = b will do atomic loads and stores. However, on a 201 * PentiumPro or higher, reads may pass writes, so for that case we have 202 * to use a serializing instruction (i.e. with LOCK) to do the load in 203 * SMP kernels. For UP kernels, however, the cache of the single processor 204 * is always consistent, so we don't need any memory barriers. 205 */ 206 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 207 static __inline u_##TYPE \ 208 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 209 { \ 210 return (*p); \ 211 } \ 212 \ 213 static __inline void \ 214 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 215 { \ 216 *p = v; \ 217 } \ 218 struct __hack 219 220 #else /* !(_KERNEL && !SMP) */ 221 222 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 223 static __inline u_##TYPE \ 224 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 225 { \ 226 u_##TYPE res; \ 227 \ 228 __asm __volatile(MPLOCKED LOP \ 229 : "=a" (res), /* 0 */ \ 230 "=m" (*p) /* 1 */ \ 231 : "m" (*p) /* 2 */ \ 232 : "memory"); \ 233 \ 234 return (res); \ 235 } \ 236 \ 237 /* \ 238 * The XCHG instruction asserts LOCK automagically. \ 239 */ \ 240 static __inline void \ 241 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 242 { \ 243 __asm __volatile(SOP \ 244 : "=m" (*p), /* 0 */ \ 245 "+r" (v) /* 1 */ \ 246 : "m" (*p)); /* 2 */ \ 247 } \ 248 struct __hack 249 250 #endif /* _KERNEL && !SMP */ 251 252 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */ 253 254 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); 255 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); 256 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); 257 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v); 258 259 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v); 260 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v); 261 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v); 262 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v); 263 264 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v); 265 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v); 266 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v); 267 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v); 268 269 ATOMIC_ASM(set, long, "orq %1,%0", "ir", v); 270 ATOMIC_ASM(clear, long, "andq %1,%0", "ir", ~v); 271 ATOMIC_ASM(add, long, "addq %1,%0", "ir", v); 272 ATOMIC_ASM(subtract, long, "subq %1,%0", "ir", v); 273 274 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); 275 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0"); 276 ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0"); 277 ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1", "xchgq %1,%0"); 278 279 #undef ATOMIC_ASM 280 #undef ATOMIC_STORE_LOAD 281 282 #ifndef WANT_FUNCTIONS 283 284 /* Read the current value and store a zero in the destination. */ 285 #ifdef __GNUCLIKE_ASM 286 287 static __inline u_int 288 atomic_readandclear_int(volatile u_int *addr) 289 { 290 u_int res; 291 292 res = 0; 293 __asm __volatile( 294 " xchgl %1,%0 ; " 295 "# atomic_readandclear_int" 296 : "+r" (res), /* 0 */ 297 "=m" (*addr) /* 1 */ 298 : "m" (*addr)); 299 300 return (res); 301 } 302 303 static __inline u_long 304 atomic_readandclear_long(volatile u_long *addr) 305 { 306 u_long res; 307 308 res = 0; 309 __asm __volatile( 310 " xchgq %1,%0 ; " 311 "# atomic_readandclear_long" 312 : "+r" (res), /* 0 */ 313 "=m" (*addr) /* 1 */ 314 : "m" (*addr)); 315 316 return (res); 317 } 318 319 #else /* !__GNUCLIKE_ASM */ 320 321 u_int atomic_readandclear_int(volatile u_int *addr); 322 u_long atomic_readandclear_long(volatile u_long *addr); 323 324 #endif /* __GNUCLIKE_ASM */ 325 326 /* Acquire and release variants are identical to the normal ones. */ 327 #define atomic_set_acq_char atomic_set_char 328 #define atomic_set_rel_char atomic_set_char 329 #define atomic_clear_acq_char atomic_clear_char 330 #define atomic_clear_rel_char atomic_clear_char 331 #define atomic_add_acq_char atomic_add_char 332 #define atomic_add_rel_char atomic_add_char 333 #define atomic_subtract_acq_char atomic_subtract_char 334 #define atomic_subtract_rel_char atomic_subtract_char 335 336 #define atomic_set_acq_short atomic_set_short 337 #define atomic_set_rel_short atomic_set_short 338 #define atomic_clear_acq_short atomic_clear_short 339 #define atomic_clear_rel_short atomic_clear_short 340 #define atomic_add_acq_short atomic_add_short 341 #define atomic_add_rel_short atomic_add_short 342 #define atomic_subtract_acq_short atomic_subtract_short 343 #define atomic_subtract_rel_short atomic_subtract_short 344 345 #define atomic_set_acq_int atomic_set_int 346 #define atomic_set_rel_int atomic_set_int 347 #define atomic_clear_acq_int atomic_clear_int 348 #define atomic_clear_rel_int atomic_clear_int 349 #define atomic_add_acq_int atomic_add_int 350 #define atomic_add_rel_int atomic_add_int 351 #define atomic_subtract_acq_int atomic_subtract_int 352 #define atomic_subtract_rel_int atomic_subtract_int 353 #define atomic_cmpset_acq_int atomic_cmpset_int 354 #define atomic_cmpset_rel_int atomic_cmpset_int 355 356 #define atomic_set_acq_long atomic_set_long 357 #define atomic_set_rel_long atomic_set_long 358 #define atomic_clear_acq_long atomic_clear_long 359 #define atomic_clear_rel_long atomic_clear_long 360 #define atomic_add_acq_long atomic_add_long 361 #define atomic_add_rel_long atomic_add_long 362 #define atomic_subtract_acq_long atomic_subtract_long 363 #define atomic_subtract_rel_long atomic_subtract_long 364 #define atomic_cmpset_acq_long atomic_cmpset_long 365 #define atomic_cmpset_rel_long atomic_cmpset_long 366 367 /* Operations on 8-bit bytes. */ 368 #define atomic_set_8 atomic_set_char 369 #define atomic_set_acq_8 atomic_set_acq_char 370 #define atomic_set_rel_8 atomic_set_rel_char 371 #define atomic_clear_8 atomic_clear_char 372 #define atomic_clear_acq_8 atomic_clear_acq_char 373 #define atomic_clear_rel_8 atomic_clear_rel_char 374 #define atomic_add_8 atomic_add_char 375 #define atomic_add_acq_8 atomic_add_acq_char 376 #define atomic_add_rel_8 atomic_add_rel_char 377 #define atomic_subtract_8 atomic_subtract_char 378 #define atomic_subtract_acq_8 atomic_subtract_acq_char 379 #define atomic_subtract_rel_8 atomic_subtract_rel_char 380 #define atomic_load_acq_8 atomic_load_acq_char 381 #define atomic_store_rel_8 atomic_store_rel_char 382 383 /* Operations on 16-bit words. */ 384 #define atomic_set_16 atomic_set_short 385 #define atomic_set_acq_16 atomic_set_acq_short 386 #define atomic_set_rel_16 atomic_set_rel_short 387 #define atomic_clear_16 atomic_clear_short 388 #define atomic_clear_acq_16 atomic_clear_acq_short 389 #define atomic_clear_rel_16 atomic_clear_rel_short 390 #define atomic_add_16 atomic_add_short 391 #define atomic_add_acq_16 atomic_add_acq_short 392 #define atomic_add_rel_16 atomic_add_rel_short 393 #define atomic_subtract_16 atomic_subtract_short 394 #define atomic_subtract_acq_16 atomic_subtract_acq_short 395 #define atomic_subtract_rel_16 atomic_subtract_rel_short 396 #define atomic_load_acq_16 atomic_load_acq_short 397 #define atomic_store_rel_16 atomic_store_rel_short 398 399 /* Operations on 32-bit double words. */ 400 #define atomic_set_32 atomic_set_int 401 #define atomic_set_acq_32 atomic_set_acq_int 402 #define atomic_set_rel_32 atomic_set_rel_int 403 #define atomic_clear_32 atomic_clear_int 404 #define atomic_clear_acq_32 atomic_clear_acq_int 405 #define atomic_clear_rel_32 atomic_clear_rel_int 406 #define atomic_add_32 atomic_add_int 407 #define atomic_add_acq_32 atomic_add_acq_int 408 #define atomic_add_rel_32 atomic_add_rel_int 409 #define atomic_subtract_32 atomic_subtract_int 410 #define atomic_subtract_acq_32 atomic_subtract_acq_int 411 #define atomic_subtract_rel_32 atomic_subtract_rel_int 412 #define atomic_load_acq_32 atomic_load_acq_int 413 #define atomic_store_rel_32 atomic_store_rel_int 414 #define atomic_cmpset_32 atomic_cmpset_int 415 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int 416 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int 417 #define atomic_readandclear_32 atomic_readandclear_int 418 #define atomic_fetchadd_32 atomic_fetchadd_int 419 420 /* Operations on 64-bit quad words. */ 421 #define atomic_set_64 atomic_set_long 422 #define atomic_set_acq_64 atomic_set_acq_long 423 #define atomic_set_rel_64 atomic_set_rel_long 424 #define atomic_clear_64 atomic_clear_long 425 #define atomic_clear_acq_64 atomic_clear_acq_long 426 #define atomic_clear_rel_64 atomic_clear_rel_long 427 #define atomic_add_64 atomic_add_long 428 #define atomic_add_acq_64 atomic_add_acq_long 429 #define atomic_add_rel_64 atomic_add_rel_long 430 #define atomic_subtract_64 atomic_subtract_long 431 #define atomic_subtract_acq_64 atomic_subtract_acq_long 432 #define atomic_subtract_rel_64 atomic_subtract_rel_long 433 #define atomic_load_acq_64 atomic_load_acq_long 434 #define atomic_store_rel_64 atomic_store_rel_long 435 #define atomic_cmpset_64 atomic_cmpset_long 436 #define atomic_cmpset_acq_64 atomic_cmpset_acq_long 437 #define atomic_cmpset_rel_64 atomic_cmpset_rel_long 438 #define atomic_readandclear_64 atomic_readandclear_long 439 440 /* Operations on pointers. */ 441 #define atomic_set_ptr atomic_set_long 442 #define atomic_set_acq_ptr atomic_set_acq_long 443 #define atomic_set_rel_ptr atomic_set_rel_long 444 #define atomic_clear_ptr atomic_clear_long 445 #define atomic_clear_acq_ptr atomic_clear_acq_long 446 #define atomic_clear_rel_ptr atomic_clear_rel_long 447 #define atomic_add_ptr atomic_add_long 448 #define atomic_add_acq_ptr atomic_add_acq_long 449 #define atomic_add_rel_ptr atomic_add_rel_long 450 #define atomic_subtract_ptr atomic_subtract_long 451 #define atomic_subtract_acq_ptr atomic_subtract_acq_long 452 #define atomic_subtract_rel_ptr atomic_subtract_rel_long 453 #define atomic_load_acq_ptr atomic_load_acq_long 454 #define atomic_store_rel_ptr atomic_store_rel_long 455 #define atomic_cmpset_ptr atomic_cmpset_long 456 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_long 457 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_long 458 #define atomic_readandclear_ptr atomic_readandclear_long 459 460 #endif /* !WANT_FUNCTIONS */ 461 462 #endif /* !_MACHINE_ATOMIC_H_ */ 463