1 /*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 #ifndef _MACHINE_ATOMIC_H_ 29 #define _MACHINE_ATOMIC_H_ 30 31 #ifndef _SYS_CDEFS_H_ 32 #error this file needs sys/cdefs.h as a prerequisite 33 #endif 34 35 #ifdef _KERNEL 36 #include <machine/md_var.h> 37 #include <machine/specialreg.h> 38 #endif 39 40 #ifndef __OFFSETOF_MONITORBUF 41 /* 42 * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf). 43 * 44 * The open-coded number is used instead of the symbolic expression to 45 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers. 46 * An assertion in i386/vm_machdep.c ensures that the value is correct. 47 */ 48 #define __OFFSETOF_MONITORBUF 0x80 49 50 static __inline void 51 __mbk(void) 52 { 53 54 __asm __volatile("lock; addl $0,%%fs:%0" 55 : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc"); 56 } 57 58 static __inline void 59 __mbu(void) 60 { 61 62 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc"); 63 } 64 #endif 65 66 /* 67 * Various simple operations on memory, each of which is atomic in the 68 * presence of interrupts and multiple processors. 69 * 70 * atomic_set_char(P, V) (*(u_char *)(P) |= (V)) 71 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V)) 72 * atomic_add_char(P, V) (*(u_char *)(P) += (V)) 73 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V)) 74 * 75 * atomic_set_short(P, V) (*(u_short *)(P) |= (V)) 76 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V)) 77 * atomic_add_short(P, V) (*(u_short *)(P) += (V)) 78 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V)) 79 * 80 * atomic_set_int(P, V) (*(u_int *)(P) |= (V)) 81 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V)) 82 * atomic_add_int(P, V) (*(u_int *)(P) += (V)) 83 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V)) 84 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);) 85 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;) 86 * 87 * atomic_set_long(P, V) (*(u_long *)(P) |= (V)) 88 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V)) 89 * atomic_add_long(P, V) (*(u_long *)(P) += (V)) 90 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V)) 91 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);) 92 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;) 93 */ 94 95 /* 96 * The above functions are expanded inline in the statically-linked 97 * kernel. Lock prefixes are generated if an SMP kernel is being 98 * built. 99 * 100 * Kernel modules call real functions which are built into the kernel. 101 * This allows kernel modules to be portable between UP and SMP systems. 102 */ 103 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM) 104 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 105 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ 106 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 107 108 int atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src); 109 int atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src); 110 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src); 111 int atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src); 112 int atomic_fcmpset_short(volatile u_short *dst, u_short *expect, 113 u_short src); 114 int atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src); 115 u_int atomic_fetchadd_int(volatile u_int *p, u_int v); 116 int atomic_testandset_int(volatile u_int *p, u_int v); 117 int atomic_testandclear_int(volatile u_int *p, u_int v); 118 void atomic_thread_fence_acq(void); 119 void atomic_thread_fence_acq_rel(void); 120 void atomic_thread_fence_rel(void); 121 void atomic_thread_fence_seq_cst(void); 122 123 #define ATOMIC_LOAD(TYPE) \ 124 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p) 125 #define ATOMIC_STORE(TYPE) \ 126 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 127 128 int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t); 129 uint64_t atomic_load_acq_64(volatile uint64_t *); 130 void atomic_store_rel_64(volatile uint64_t *, uint64_t); 131 uint64_t atomic_swap_64(volatile uint64_t *, uint64_t); 132 133 #else /* !KLD_MODULE && __GNUCLIKE_ASM */ 134 135 /* 136 * For userland, always use lock prefixes so that the binaries will run 137 * on both SMP and !SMP systems. 138 */ 139 #if defined(SMP) || !defined(_KERNEL) 140 #define MPLOCKED "lock ; " 141 #else 142 #define MPLOCKED 143 #endif 144 145 /* 146 * The assembly is volatilized to avoid code chunk removal by the compiler. 147 * GCC aggressively reorders operations and memory clobbering is necessary 148 * in order to avoid that for memory barriers. 149 */ 150 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 151 static __inline void \ 152 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 153 { \ 154 __asm __volatile(MPLOCKED OP \ 155 : "+m" (*p) \ 156 : CONS (V) \ 157 : "cc"); \ 158 } \ 159 \ 160 static __inline void \ 161 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 162 { \ 163 __asm __volatile(MPLOCKED OP \ 164 : "+m" (*p) \ 165 : CONS (V) \ 166 : "memory", "cc"); \ 167 } \ 168 struct __hack 169 170 /* 171 * Atomic compare and set, used by the mutex functions. 172 * 173 * cmpset: 174 * if (*dst == expect) 175 * *dst = src 176 * 177 * fcmpset: 178 * if (*dst == *expect) 179 * *dst = src 180 * else 181 * *expect = *dst 182 * 183 * Returns 0 on failure, non-zero on success. 184 */ 185 #define ATOMIC_CMPSET(TYPE, CONS) \ 186 static __inline int \ 187 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \ 188 { \ 189 u_char res; \ 190 \ 191 __asm __volatile( \ 192 " " MPLOCKED " " \ 193 " cmpxchg %3,%1 ; " \ 194 " sete %0 ; " \ 195 "# atomic_cmpset_" #TYPE " " \ 196 : "=q" (res), /* 0 */ \ 197 "+m" (*dst), /* 1 */ \ 198 "+a" (expect) /* 2 */ \ 199 : CONS (src) /* 3 */ \ 200 : "memory", "cc"); \ 201 return (res); \ 202 } \ 203 \ 204 static __inline int \ 205 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \ 206 { \ 207 u_char res; \ 208 \ 209 __asm __volatile( \ 210 " " MPLOCKED " " \ 211 " cmpxchg %3,%1 ; " \ 212 " sete %0 ; " \ 213 "# atomic_fcmpset_" #TYPE " " \ 214 : "=q" (res), /* 0 */ \ 215 "+m" (*dst), /* 1 */ \ 216 "+a" (*expect) /* 2 */ \ 217 : CONS (src) /* 3 */ \ 218 : "memory", "cc"); \ 219 return (res); \ 220 } 221 222 ATOMIC_CMPSET(char, "q"); 223 ATOMIC_CMPSET(short, "r"); 224 ATOMIC_CMPSET(int, "r"); 225 226 /* 227 * Atomically add the value of v to the integer pointed to by p and return 228 * the previous value of *p. 229 */ 230 static __inline u_int 231 atomic_fetchadd_int(volatile u_int *p, u_int v) 232 { 233 234 __asm __volatile( 235 " " MPLOCKED " " 236 " xaddl %0,%1 ; " 237 "# atomic_fetchadd_int" 238 : "+r" (v), /* 0 */ 239 "+m" (*p) /* 1 */ 240 : : "cc"); 241 return (v); 242 } 243 244 static __inline int 245 atomic_testandset_int(volatile u_int *p, u_int v) 246 { 247 u_char res; 248 249 __asm __volatile( 250 " " MPLOCKED " " 251 " btsl %2,%1 ; " 252 " setc %0 ; " 253 "# atomic_testandset_int" 254 : "=q" (res), /* 0 */ 255 "+m" (*p) /* 1 */ 256 : "Ir" (v & 0x1f) /* 2 */ 257 : "cc"); 258 return (res); 259 } 260 261 static __inline int 262 atomic_testandclear_int(volatile u_int *p, u_int v) 263 { 264 u_char res; 265 266 __asm __volatile( 267 " " MPLOCKED " " 268 " btrl %2,%1 ; " 269 " setc %0 ; " 270 "# atomic_testandclear_int" 271 : "=q" (res), /* 0 */ 272 "+m" (*p) /* 1 */ 273 : "Ir" (v & 0x1f) /* 2 */ 274 : "cc"); 275 return (res); 276 } 277 278 /* 279 * We assume that a = b will do atomic loads and stores. Due to the 280 * IA32 memory model, a simple store guarantees release semantics. 281 * 282 * However, a load may pass a store if they are performed on distinct 283 * addresses, so we need Store/Load barrier for sequentially 284 * consistent fences in SMP kernels. We use "lock addl $0,mem" for a 285 * Store/Load barrier, as recommended by the AMD Software Optimization 286 * Guide, and not mfence. In the kernel, we use a private per-cpu 287 * cache line for "mem", to avoid introducing false data 288 * dependencies. In user space, we use the word at the top of the 289 * stack. 290 * 291 * For UP kernels, however, the memory of the single processor is 292 * always consistent, so we only need to stop the compiler from 293 * reordering accesses in a way that violates the semantics of acquire 294 * and release. 295 */ 296 297 #if defined(_KERNEL) 298 #if defined(SMP) 299 #define __storeload_barrier() __mbk() 300 #else /* _KERNEL && UP */ 301 #define __storeload_barrier() __compiler_membar() 302 #endif /* SMP */ 303 #else /* !_KERNEL */ 304 #define __storeload_barrier() __mbu() 305 #endif /* _KERNEL*/ 306 307 #define ATOMIC_LOAD(TYPE) \ 308 static __inline u_##TYPE \ 309 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 310 { \ 311 u_##TYPE res; \ 312 \ 313 res = *p; \ 314 __compiler_membar(); \ 315 return (res); \ 316 } \ 317 struct __hack 318 319 #define ATOMIC_STORE(TYPE) \ 320 static __inline void \ 321 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \ 322 { \ 323 \ 324 __compiler_membar(); \ 325 *p = v; \ 326 } \ 327 struct __hack 328 329 static __inline void 330 atomic_thread_fence_acq(void) 331 { 332 333 __compiler_membar(); 334 } 335 336 static __inline void 337 atomic_thread_fence_rel(void) 338 { 339 340 __compiler_membar(); 341 } 342 343 static __inline void 344 atomic_thread_fence_acq_rel(void) 345 { 346 347 __compiler_membar(); 348 } 349 350 static __inline void 351 atomic_thread_fence_seq_cst(void) 352 { 353 354 __storeload_barrier(); 355 } 356 357 #ifdef _KERNEL 358 359 #ifdef WANT_FUNCTIONS 360 int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t); 361 int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t); 362 uint64_t atomic_load_acq_64_i386(volatile uint64_t *); 363 uint64_t atomic_load_acq_64_i586(volatile uint64_t *); 364 void atomic_store_rel_64_i386(volatile uint64_t *, uint64_t); 365 void atomic_store_rel_64_i586(volatile uint64_t *, uint64_t); 366 uint64_t atomic_swap_64_i386(volatile uint64_t *, uint64_t); 367 uint64_t atomic_swap_64_i586(volatile uint64_t *, uint64_t); 368 #endif 369 370 /* I486 does not support SMP or CMPXCHG8B. */ 371 static __inline int 372 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src) 373 { 374 volatile uint32_t *p; 375 u_char res; 376 377 p = (volatile uint32_t *)dst; 378 __asm __volatile( 379 " pushfl ; " 380 " cli ; " 381 " xorl %1,%%eax ; " 382 " xorl %2,%%edx ; " 383 " orl %%edx,%%eax ; " 384 " jne 1f ; " 385 " movl %4,%1 ; " 386 " movl %5,%2 ; " 387 "1: " 388 " sete %3 ; " 389 " popfl" 390 : "+A" (expect), /* 0 */ 391 "+m" (*p), /* 1 */ 392 "+m" (*(p + 1)), /* 2 */ 393 "=q" (res) /* 3 */ 394 : "r" ((uint32_t)src), /* 4 */ 395 "r" ((uint32_t)(src >> 32)) /* 5 */ 396 : "memory", "cc"); 397 return (res); 398 } 399 400 static __inline uint64_t 401 atomic_load_acq_64_i386(volatile uint64_t *p) 402 { 403 volatile uint32_t *q; 404 uint64_t res; 405 406 q = (volatile uint32_t *)p; 407 __asm __volatile( 408 " pushfl ; " 409 " cli ; " 410 " movl %1,%%eax ; " 411 " movl %2,%%edx ; " 412 " popfl" 413 : "=&A" (res) /* 0 */ 414 : "m" (*q), /* 1 */ 415 "m" (*(q + 1)) /* 2 */ 416 : "memory"); 417 return (res); 418 } 419 420 static __inline void 421 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v) 422 { 423 volatile uint32_t *q; 424 425 q = (volatile uint32_t *)p; 426 __asm __volatile( 427 " pushfl ; " 428 " cli ; " 429 " movl %%eax,%0 ; " 430 " movl %%edx,%1 ; " 431 " popfl" 432 : "=m" (*q), /* 0 */ 433 "=m" (*(q + 1)) /* 1 */ 434 : "A" (v) /* 2 */ 435 : "memory"); 436 } 437 438 static __inline uint64_t 439 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v) 440 { 441 volatile uint32_t *q; 442 uint64_t res; 443 444 q = (volatile uint32_t *)p; 445 __asm __volatile( 446 " pushfl ; " 447 " cli ; " 448 " movl %1,%%eax ; " 449 " movl %2,%%edx ; " 450 " movl %4,%2 ; " 451 " movl %3,%1 ; " 452 " popfl" 453 : "=&A" (res), /* 0 */ 454 "+m" (*q), /* 1 */ 455 "+m" (*(q + 1)) /* 2 */ 456 : "r" ((uint32_t)v), /* 3 */ 457 "r" ((uint32_t)(v >> 32))); /* 4 */ 458 return (res); 459 } 460 461 static __inline int 462 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src) 463 { 464 u_char res; 465 466 __asm __volatile( 467 " " MPLOCKED " " 468 " cmpxchg8b %1 ; " 469 " sete %0" 470 : "=q" (res), /* 0 */ 471 "+m" (*dst), /* 1 */ 472 "+A" (expect) /* 2 */ 473 : "b" ((uint32_t)src), /* 3 */ 474 "c" ((uint32_t)(src >> 32)) /* 4 */ 475 : "memory", "cc"); 476 return (res); 477 } 478 479 static __inline uint64_t 480 atomic_load_acq_64_i586(volatile uint64_t *p) 481 { 482 uint64_t res; 483 484 __asm __volatile( 485 " movl %%ebx,%%eax ; " 486 " movl %%ecx,%%edx ; " 487 " " MPLOCKED " " 488 " cmpxchg8b %1" 489 : "=&A" (res), /* 0 */ 490 "+m" (*p) /* 1 */ 491 : : "memory", "cc"); 492 return (res); 493 } 494 495 static __inline void 496 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v) 497 { 498 499 __asm __volatile( 500 " movl %%eax,%%ebx ; " 501 " movl %%edx,%%ecx ; " 502 "1: " 503 " " MPLOCKED " " 504 " cmpxchg8b %0 ; " 505 " jne 1b" 506 : "+m" (*p), /* 0 */ 507 "+A" (v) /* 1 */ 508 : : "ebx", "ecx", "memory", "cc"); 509 } 510 511 static __inline uint64_t 512 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v) 513 { 514 515 __asm __volatile( 516 " movl %%eax,%%ebx ; " 517 " movl %%edx,%%ecx ; " 518 "1: " 519 " " MPLOCKED " " 520 " cmpxchg8b %0 ; " 521 " jne 1b" 522 : "+m" (*p), /* 0 */ 523 "+A" (v) /* 1 */ 524 : : "ebx", "ecx", "memory", "cc"); 525 return (v); 526 } 527 528 static __inline int 529 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src) 530 { 531 532 if ((cpu_feature & CPUID_CX8) == 0) 533 return (atomic_cmpset_64_i386(dst, expect, src)); 534 else 535 return (atomic_cmpset_64_i586(dst, expect, src)); 536 } 537 538 static __inline uint64_t 539 atomic_load_acq_64(volatile uint64_t *p) 540 { 541 542 if ((cpu_feature & CPUID_CX8) == 0) 543 return (atomic_load_acq_64_i386(p)); 544 else 545 return (atomic_load_acq_64_i586(p)); 546 } 547 548 static __inline void 549 atomic_store_rel_64(volatile uint64_t *p, uint64_t v) 550 { 551 552 if ((cpu_feature & CPUID_CX8) == 0) 553 atomic_store_rel_64_i386(p, v); 554 else 555 atomic_store_rel_64_i586(p, v); 556 } 557 558 static __inline uint64_t 559 atomic_swap_64(volatile uint64_t *p, uint64_t v) 560 { 561 562 if ((cpu_feature & CPUID_CX8) == 0) 563 return (atomic_swap_64_i386(p, v)); 564 else 565 return (atomic_swap_64_i586(p, v)); 566 } 567 568 #endif /* _KERNEL */ 569 570 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */ 571 572 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); 573 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); 574 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); 575 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v); 576 577 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v); 578 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v); 579 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v); 580 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v); 581 582 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v); 583 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v); 584 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v); 585 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v); 586 587 ATOMIC_ASM(set, long, "orl %1,%0", "ir", v); 588 ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v); 589 ATOMIC_ASM(add, long, "addl %1,%0", "ir", v); 590 ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v); 591 592 #define ATOMIC_LOADSTORE(TYPE) \ 593 ATOMIC_LOAD(TYPE); \ 594 ATOMIC_STORE(TYPE) 595 596 ATOMIC_LOADSTORE(char); 597 ATOMIC_LOADSTORE(short); 598 ATOMIC_LOADSTORE(int); 599 ATOMIC_LOADSTORE(long); 600 601 #undef ATOMIC_ASM 602 #undef ATOMIC_LOAD 603 #undef ATOMIC_STORE 604 #undef ATOMIC_LOADSTORE 605 606 #ifndef WANT_FUNCTIONS 607 608 static __inline int 609 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src) 610 { 611 612 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect, 613 (u_int)src)); 614 } 615 616 static __inline u_long 617 atomic_fetchadd_long(volatile u_long *p, u_long v) 618 { 619 620 return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v)); 621 } 622 623 static __inline int 624 atomic_testandset_long(volatile u_long *p, u_int v) 625 { 626 627 return (atomic_testandset_int((volatile u_int *)p, v)); 628 } 629 630 static __inline int 631 atomic_testandclear_long(volatile u_long *p, u_int v) 632 { 633 634 return (atomic_testandclear_int((volatile u_int *)p, v)); 635 } 636 637 /* Read the current value and store a new value in the destination. */ 638 #ifdef __GNUCLIKE_ASM 639 640 static __inline u_int 641 atomic_swap_int(volatile u_int *p, u_int v) 642 { 643 644 __asm __volatile( 645 " xchgl %1,%0 ; " 646 "# atomic_swap_int" 647 : "+r" (v), /* 0 */ 648 "+m" (*p)); /* 1 */ 649 return (v); 650 } 651 652 static __inline u_long 653 atomic_swap_long(volatile u_long *p, u_long v) 654 { 655 656 return (atomic_swap_int((volatile u_int *)p, (u_int)v)); 657 } 658 659 #else /* !__GNUCLIKE_ASM */ 660 661 u_int atomic_swap_int(volatile u_int *p, u_int v); 662 u_long atomic_swap_long(volatile u_long *p, u_long v); 663 664 #endif /* __GNUCLIKE_ASM */ 665 666 #define atomic_set_acq_char atomic_set_barr_char 667 #define atomic_set_rel_char atomic_set_barr_char 668 #define atomic_clear_acq_char atomic_clear_barr_char 669 #define atomic_clear_rel_char atomic_clear_barr_char 670 #define atomic_add_acq_char atomic_add_barr_char 671 #define atomic_add_rel_char atomic_add_barr_char 672 #define atomic_subtract_acq_char atomic_subtract_barr_char 673 #define atomic_subtract_rel_char atomic_subtract_barr_char 674 #define atomic_cmpset_acq_char atomic_cmpset_char 675 #define atomic_cmpset_rel_char atomic_cmpset_char 676 #define atomic_fcmpset_acq_char atomic_fcmpset_char 677 #define atomic_fcmpset_rel_char atomic_fcmpset_char 678 679 #define atomic_set_acq_short atomic_set_barr_short 680 #define atomic_set_rel_short atomic_set_barr_short 681 #define atomic_clear_acq_short atomic_clear_barr_short 682 #define atomic_clear_rel_short atomic_clear_barr_short 683 #define atomic_add_acq_short atomic_add_barr_short 684 #define atomic_add_rel_short atomic_add_barr_short 685 #define atomic_subtract_acq_short atomic_subtract_barr_short 686 #define atomic_subtract_rel_short atomic_subtract_barr_short 687 #define atomic_cmpset_acq_short atomic_cmpset_short 688 #define atomic_cmpset_rel_short atomic_cmpset_short 689 #define atomic_fcmpset_acq_short atomic_fcmpset_short 690 #define atomic_fcmpset_rel_short atomic_fcmpset_short 691 692 #define atomic_set_acq_int atomic_set_barr_int 693 #define atomic_set_rel_int atomic_set_barr_int 694 #define atomic_clear_acq_int atomic_clear_barr_int 695 #define atomic_clear_rel_int atomic_clear_barr_int 696 #define atomic_add_acq_int atomic_add_barr_int 697 #define atomic_add_rel_int atomic_add_barr_int 698 #define atomic_subtract_acq_int atomic_subtract_barr_int 699 #define atomic_subtract_rel_int atomic_subtract_barr_int 700 #define atomic_cmpset_acq_int atomic_cmpset_int 701 #define atomic_cmpset_rel_int atomic_cmpset_int 702 #define atomic_fcmpset_acq_int atomic_fcmpset_int 703 #define atomic_fcmpset_rel_int atomic_fcmpset_int 704 705 #define atomic_set_acq_long atomic_set_barr_long 706 #define atomic_set_rel_long atomic_set_barr_long 707 #define atomic_clear_acq_long atomic_clear_barr_long 708 #define atomic_clear_rel_long atomic_clear_barr_long 709 #define atomic_add_acq_long atomic_add_barr_long 710 #define atomic_add_rel_long atomic_add_barr_long 711 #define atomic_subtract_acq_long atomic_subtract_barr_long 712 #define atomic_subtract_rel_long atomic_subtract_barr_long 713 #define atomic_cmpset_acq_long atomic_cmpset_long 714 #define atomic_cmpset_rel_long atomic_cmpset_long 715 #define atomic_fcmpset_acq_long atomic_fcmpset_long 716 #define atomic_fcmpset_rel_long atomic_fcmpset_long 717 718 #define atomic_readandclear_int(p) atomic_swap_int(p, 0) 719 #define atomic_readandclear_long(p) atomic_swap_long(p, 0) 720 721 /* Operations on 8-bit bytes. */ 722 #define atomic_set_8 atomic_set_char 723 #define atomic_set_acq_8 atomic_set_acq_char 724 #define atomic_set_rel_8 atomic_set_rel_char 725 #define atomic_clear_8 atomic_clear_char 726 #define atomic_clear_acq_8 atomic_clear_acq_char 727 #define atomic_clear_rel_8 atomic_clear_rel_char 728 #define atomic_add_8 atomic_add_char 729 #define atomic_add_acq_8 atomic_add_acq_char 730 #define atomic_add_rel_8 atomic_add_rel_char 731 #define atomic_subtract_8 atomic_subtract_char 732 #define atomic_subtract_acq_8 atomic_subtract_acq_char 733 #define atomic_subtract_rel_8 atomic_subtract_rel_char 734 #define atomic_load_acq_8 atomic_load_acq_char 735 #define atomic_store_rel_8 atomic_store_rel_char 736 #define atomic_cmpset_8 atomic_cmpset_char 737 #define atomic_cmpset_acq_8 atomic_cmpset_acq_char 738 #define atomic_cmpset_rel_8 atomic_cmpset_rel_char 739 #define atomic_fcmpset_8 atomic_fcmpset_char 740 #define atomic_fcmpset_acq_8 atomic_fcmpset_acq_char 741 #define atomic_fcmpset_rel_8 atomic_fcmpset_rel_char 742 743 /* Operations on 16-bit words. */ 744 #define atomic_set_16 atomic_set_short 745 #define atomic_set_acq_16 atomic_set_acq_short 746 #define atomic_set_rel_16 atomic_set_rel_short 747 #define atomic_clear_16 atomic_clear_short 748 #define atomic_clear_acq_16 atomic_clear_acq_short 749 #define atomic_clear_rel_16 atomic_clear_rel_short 750 #define atomic_add_16 atomic_add_short 751 #define atomic_add_acq_16 atomic_add_acq_short 752 #define atomic_add_rel_16 atomic_add_rel_short 753 #define atomic_subtract_16 atomic_subtract_short 754 #define atomic_subtract_acq_16 atomic_subtract_acq_short 755 #define atomic_subtract_rel_16 atomic_subtract_rel_short 756 #define atomic_load_acq_16 atomic_load_acq_short 757 #define atomic_store_rel_16 atomic_store_rel_short 758 #define atomic_cmpset_16 atomic_cmpset_short 759 #define atomic_cmpset_acq_16 atomic_cmpset_acq_short 760 #define atomic_cmpset_rel_16 atomic_cmpset_rel_short 761 #define atomic_fcmpset_16 atomic_fcmpset_short 762 #define atomic_fcmpset_acq_16 atomic_fcmpset_acq_short 763 #define atomic_fcmpset_rel_16 atomic_fcmpset_rel_short 764 765 /* Operations on 32-bit double words. */ 766 #define atomic_set_32 atomic_set_int 767 #define atomic_set_acq_32 atomic_set_acq_int 768 #define atomic_set_rel_32 atomic_set_rel_int 769 #define atomic_clear_32 atomic_clear_int 770 #define atomic_clear_acq_32 atomic_clear_acq_int 771 #define atomic_clear_rel_32 atomic_clear_rel_int 772 #define atomic_add_32 atomic_add_int 773 #define atomic_add_acq_32 atomic_add_acq_int 774 #define atomic_add_rel_32 atomic_add_rel_int 775 #define atomic_subtract_32 atomic_subtract_int 776 #define atomic_subtract_acq_32 atomic_subtract_acq_int 777 #define atomic_subtract_rel_32 atomic_subtract_rel_int 778 #define atomic_load_acq_32 atomic_load_acq_int 779 #define atomic_store_rel_32 atomic_store_rel_int 780 #define atomic_cmpset_32 atomic_cmpset_int 781 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int 782 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int 783 #define atomic_fcmpset_32 atomic_fcmpset_int 784 #define atomic_fcmpset_acq_32 atomic_fcmpset_acq_int 785 #define atomic_fcmpset_rel_32 atomic_fcmpset_rel_int 786 #define atomic_swap_32 atomic_swap_int 787 #define atomic_readandclear_32 atomic_readandclear_int 788 #define atomic_fetchadd_32 atomic_fetchadd_int 789 #define atomic_testandset_32 atomic_testandset_int 790 #define atomic_testandclear_32 atomic_testandclear_int 791 792 /* Operations on pointers. */ 793 #define atomic_set_ptr(p, v) \ 794 atomic_set_int((volatile u_int *)(p), (u_int)(v)) 795 #define atomic_set_acq_ptr(p, v) \ 796 atomic_set_acq_int((volatile u_int *)(p), (u_int)(v)) 797 #define atomic_set_rel_ptr(p, v) \ 798 atomic_set_rel_int((volatile u_int *)(p), (u_int)(v)) 799 #define atomic_clear_ptr(p, v) \ 800 atomic_clear_int((volatile u_int *)(p), (u_int)(v)) 801 #define atomic_clear_acq_ptr(p, v) \ 802 atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v)) 803 #define atomic_clear_rel_ptr(p, v) \ 804 atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v)) 805 #define atomic_add_ptr(p, v) \ 806 atomic_add_int((volatile u_int *)(p), (u_int)(v)) 807 #define atomic_add_acq_ptr(p, v) \ 808 atomic_add_acq_int((volatile u_int *)(p), (u_int)(v)) 809 #define atomic_add_rel_ptr(p, v) \ 810 atomic_add_rel_int((volatile u_int *)(p), (u_int)(v)) 811 #define atomic_subtract_ptr(p, v) \ 812 atomic_subtract_int((volatile u_int *)(p), (u_int)(v)) 813 #define atomic_subtract_acq_ptr(p, v) \ 814 atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v)) 815 #define atomic_subtract_rel_ptr(p, v) \ 816 atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v)) 817 #define atomic_load_acq_ptr(p) \ 818 atomic_load_acq_int((volatile u_int *)(p)) 819 #define atomic_store_rel_ptr(p, v) \ 820 atomic_store_rel_int((volatile u_int *)(p), (v)) 821 #define atomic_cmpset_ptr(dst, old, new) \ 822 atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new)) 823 #define atomic_cmpset_acq_ptr(dst, old, new) \ 824 atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \ 825 (u_int)(new)) 826 #define atomic_cmpset_rel_ptr(dst, old, new) \ 827 atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \ 828 (u_int)(new)) 829 #define atomic_fcmpset_ptr(dst, old, new) \ 830 atomic_fcmpset_int((volatile u_int *)(dst), (u_int *)(old), (u_int)(new)) 831 #define atomic_fcmpset_acq_ptr(dst, old, new) \ 832 atomic_fcmpset_acq_int((volatile u_int *)(dst), (u_int *)(old), \ 833 (u_int)(new)) 834 #define atomic_fcmpset_rel_ptr(dst, old, new) \ 835 atomic_fcmpset_rel_int((volatile u_int *)(dst), (u_int *)(old), \ 836 (u_int)(new)) 837 #define atomic_swap_ptr(p, v) \ 838 atomic_swap_int((volatile u_int *)(p), (u_int)(v)) 839 #define atomic_readandclear_ptr(p) \ 840 atomic_readandclear_int((volatile u_int *)(p)) 841 842 #endif /* !WANT_FUNCTIONS */ 843 844 #if defined(_KERNEL) 845 #define mb() __mbk() 846 #define wmb() __mbk() 847 #define rmb() __mbk() 848 #else 849 #define mb() __mbu() 850 #define wmb() __mbu() 851 #define rmb() __mbu() 852 #endif 853 854 #endif /* !_MACHINE_ATOMIC_H_ */ 855