1 #ifndef ASM_X86_CMPXCHG_H 2 #define ASM_X86_CMPXCHG_H 3 4 #include <linux/compiler.h> 5 #include <asm/alternative.h> /* Provides LOCK_PREFIX */ 6 7 /* 8 * Non-existant functions to indicate usage errors at link time 9 * (or compile-time if the compiler implements __compiletime_error(). 10 */ 11 extern void __xchg_wrong_size(void) 12 __compiletime_error("Bad argument size for xchg"); 13 extern void __cmpxchg_wrong_size(void) 14 __compiletime_error("Bad argument size for cmpxchg"); 15 extern void __xadd_wrong_size(void) 16 __compiletime_error("Bad argument size for xadd"); 17 extern void __add_wrong_size(void) 18 __compiletime_error("Bad argument size for add"); 19 20 /* 21 * Constants for operation sizes. On 32-bit, the 64-bit size it set to 22 * -1 because sizeof will never return -1, thereby making those switch 23 * case statements guaranteeed dead code which the compiler will 24 * eliminate, and allowing the "missing symbol in the default case" to 25 * indicate a usage error. 26 */ 27 #define __X86_CASE_B 1 28 #define __X86_CASE_W 2 29 #define __X86_CASE_L 4 30 #ifdef CONFIG_64BIT 31 #define __X86_CASE_Q 8 32 #else 33 #define __X86_CASE_Q -1 /* sizeof will never return -1 */ 34 #endif 35 36 /* 37 * An exchange-type operation, which takes a value and a pointer, and 38 * returns the old value. 39 */ 40 #define __xchg_op(ptr, arg, op, lock) \ 41 ({ \ 42 __typeof__ (*(ptr)) __ret = (arg); \ 43 switch (sizeof(*(ptr))) { \ 44 case __X86_CASE_B: \ 45 asm volatile (lock #op "b %b0, %1\n" \ 46 : "+q" (__ret), "+m" (*(ptr)) \ 47 : : "memory", "cc"); \ 48 break; \ 49 case __X86_CASE_W: \ 50 asm volatile (lock #op "w %w0, %1\n" \ 51 : "+r" (__ret), "+m" (*(ptr)) \ 52 : : "memory", "cc"); \ 53 break; \ 54 case __X86_CASE_L: \ 55 asm volatile (lock #op "l %0, %1\n" \ 56 : "+r" (__ret), "+m" (*(ptr)) \ 57 : : "memory", "cc"); \ 58 break; \ 59 case __X86_CASE_Q: \ 60 asm volatile (lock #op "q %q0, %1\n" \ 61 : "+r" (__ret), "+m" (*(ptr)) \ 62 : : "memory", "cc"); \ 63 break; \ 64 default: \ 65 __ ## op ## _wrong_size(); \ 66 } \ 67 __ret; \ 68 }) 69 70 /* 71 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. 72 * Since this is generally used to protect other memory information, we 73 * use "asm volatile" and "memory" clobbers to prevent gcc from moving 74 * information around. 75 */ 76 #define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "") 77 78 /* 79 * Atomic compare and exchange. Compare OLD with MEM, if identical, 80 * store NEW in MEM. Return the initial value in MEM. Success is 81 * indicated by comparing RETURN with OLD. 82 */ 83 #define __raw_cmpxchg(ptr, old, new, size, lock) \ 84 ({ \ 85 __typeof__(*(ptr)) __ret; \ 86 __typeof__(*(ptr)) __old = (old); \ 87 __typeof__(*(ptr)) __new = (new); \ 88 switch (size) { \ 89 case __X86_CASE_B: \ 90 { \ 91 volatile u8 *__ptr = (volatile u8 *)(ptr); \ 92 asm volatile(lock "cmpxchgb %2,%1" \ 93 : "=a" (__ret), "+m" (*__ptr) \ 94 : "q" (__new), "0" (__old) \ 95 : "memory"); \ 96 break; \ 97 } \ 98 case __X86_CASE_W: \ 99 { \ 100 volatile u16 *__ptr = (volatile u16 *)(ptr); \ 101 asm volatile(lock "cmpxchgw %2,%1" \ 102 : "=a" (__ret), "+m" (*__ptr) \ 103 : "r" (__new), "0" (__old) \ 104 : "memory"); \ 105 break; \ 106 } \ 107 case __X86_CASE_L: \ 108 { \ 109 volatile u32 *__ptr = (volatile u32 *)(ptr); \ 110 asm volatile(lock "cmpxchgl %2,%1" \ 111 : "=a" (__ret), "+m" (*__ptr) \ 112 : "r" (__new), "0" (__old) \ 113 : "memory"); \ 114 break; \ 115 } \ 116 case __X86_CASE_Q: \ 117 { \ 118 volatile u64 *__ptr = (volatile u64 *)(ptr); \ 119 asm volatile(lock "cmpxchgq %2,%1" \ 120 : "=a" (__ret), "+m" (*__ptr) \ 121 : "r" (__new), "0" (__old) \ 122 : "memory"); \ 123 break; \ 124 } \ 125 default: \ 126 __cmpxchg_wrong_size(); \ 127 } \ 128 __ret; \ 129 }) 130 131 #define __cmpxchg(ptr, old, new, size) \ 132 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) 133 134 #define __sync_cmpxchg(ptr, old, new, size) \ 135 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") 136 137 #define __cmpxchg_local(ptr, old, new, size) \ 138 __raw_cmpxchg((ptr), (old), (new), (size), "") 139 140 #ifdef CONFIG_X86_32 141 # include <asm/cmpxchg_32.h> 142 #else 143 # include <asm/cmpxchg_64.h> 144 #endif 145 146 #define cmpxchg(ptr, old, new) \ 147 __cmpxchg(ptr, old, new, sizeof(*(ptr))) 148 149 #define sync_cmpxchg(ptr, old, new) \ 150 __sync_cmpxchg(ptr, old, new, sizeof(*(ptr))) 151 152 #define cmpxchg_local(ptr, old, new) \ 153 __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) 154 155 /* 156 * xadd() adds "inc" to "*ptr" and atomically returns the previous 157 * value of "*ptr". 158 * 159 * xadd() is locked when multiple CPUs are online 160 * xadd_sync() is always locked 161 * xadd_local() is never locked 162 */ 163 #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) 164 #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) 165 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") 166 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") 167 168 #define __add(ptr, inc, lock) \ 169 ({ \ 170 __typeof__ (*(ptr)) __ret = (inc); \ 171 switch (sizeof(*(ptr))) { \ 172 case __X86_CASE_B: \ 173 asm volatile (lock "addb %b1, %0\n" \ 174 : "+m" (*(ptr)) : "qi" (inc) \ 175 : "memory", "cc"); \ 176 break; \ 177 case __X86_CASE_W: \ 178 asm volatile (lock "addw %w1, %0\n" \ 179 : "+m" (*(ptr)) : "ri" (inc) \ 180 : "memory", "cc"); \ 181 break; \ 182 case __X86_CASE_L: \ 183 asm volatile (lock "addl %1, %0\n" \ 184 : "+m" (*(ptr)) : "ri" (inc) \ 185 : "memory", "cc"); \ 186 break; \ 187 case __X86_CASE_Q: \ 188 asm volatile (lock "addq %1, %0\n" \ 189 : "+m" (*(ptr)) : "ri" (inc) \ 190 : "memory", "cc"); \ 191 break; \ 192 default: \ 193 __add_wrong_size(); \ 194 } \ 195 __ret; \ 196 }) 197 198 /* 199 * add_*() adds "inc" to "*ptr" 200 * 201 * __add() takes a lock prefix 202 * add_smp() is locked when multiple CPUs are online 203 * add_sync() is always locked 204 */ 205 #define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX) 206 #define add_sync(ptr, inc) __add((ptr), (inc), "lock; ") 207 208 #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \ 209 ({ \ 210 bool __ret; \ 211 __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \ 212 __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \ 213 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ 214 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ 215 VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \ 216 VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \ 217 asm volatile(pfx "cmpxchg%c4b %2; sete %0" \ 218 : "=a" (__ret), "+d" (__old2), \ 219 "+m" (*(p1)), "+m" (*(p2)) \ 220 : "i" (2 * sizeof(long)), "a" (__old1), \ 221 "b" (__new1), "c" (__new2)); \ 222 __ret; \ 223 }) 224 225 #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ 226 __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2) 227 228 #define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ 229 __cmpxchg_double(, p1, p2, o1, o2, n1, n2) 230 231 #endif /* ASM_X86_CMPXCHG_H */ 232