1 #ifndef _ASM_X86_PERCPU_H 2 #define _ASM_X86_PERCPU_H 3 4 #ifdef CONFIG_X86_64 5 #define __percpu_seg gs 6 #define __percpu_mov_op movq 7 #else 8 #define __percpu_seg fs 9 #define __percpu_mov_op movl 10 #endif 11 12 #ifdef __ASSEMBLY__ 13 14 /* 15 * PER_CPU finds an address of a per-cpu variable. 16 * 17 * Args: 18 * var - variable name 19 * reg - 32bit register 20 * 21 * The resulting address is stored in the "reg" argument. 22 * 23 * Example: 24 * PER_CPU(cpu_gdt_descr, %ebx) 25 */ 26 #ifdef CONFIG_SMP 27 #define PER_CPU(var, reg) \ 28 __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \ 29 lea per_cpu__##var(reg), reg 30 #define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var 31 #else /* ! SMP */ 32 #define PER_CPU(var, reg) \ 33 __percpu_mov_op $per_cpu__##var, reg 34 #define PER_CPU_VAR(var) per_cpu__##var 35 #endif /* SMP */ 36 37 #ifdef CONFIG_X86_64_SMP 38 #define INIT_PER_CPU_VAR(var) init_per_cpu__##var 39 #else 40 #define INIT_PER_CPU_VAR(var) per_cpu__##var 41 #endif 42 43 #else /* ...!ASSEMBLY */ 44 45 #include <linux/kernel.h> 46 #include <linux/stringify.h> 47 48 #ifdef CONFIG_SMP 49 #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x 50 #define __my_cpu_offset percpu_read(this_cpu_off) 51 #else 52 #define __percpu_arg(x) "%" #x 53 #endif 54 55 /* 56 * Initialized pointers to per-cpu variables needed for the boot 57 * processor need to use these macros to get the proper address 58 * offset from __per_cpu_load on SMP. 59 * 60 * There also must be an entry in vmlinux_64.lds.S 61 */ 62 #define DECLARE_INIT_PER_CPU(var) \ 63 extern typeof(per_cpu_var(var)) init_per_cpu_var(var) 64 65 #ifdef CONFIG_X86_64_SMP 66 #define init_per_cpu_var(var) init_per_cpu__##var 67 #else 68 #define init_per_cpu_var(var) per_cpu_var(var) 69 #endif 70 71 /* For arch-specific code, we can use direct single-insn ops (they 72 * don't give an lvalue though). */ 73 extern void __bad_percpu_size(void); 74 75 #define percpu_to_op(op, var, val) \ 76 do { \ 77 typedef typeof(var) T__; \ 78 if (0) { \ 79 T__ tmp__; \ 80 tmp__ = (val); \ 81 } \ 82 switch (sizeof(var)) { \ 83 case 1: \ 84 asm(op "b %1,"__percpu_arg(0) \ 85 : "+m" (var) \ 86 : "qi" ((T__)(val))); \ 87 break; \ 88 case 2: \ 89 asm(op "w %1,"__percpu_arg(0) \ 90 : "+m" (var) \ 91 : "ri" ((T__)(val))); \ 92 break; \ 93 case 4: \ 94 asm(op "l %1,"__percpu_arg(0) \ 95 : "+m" (var) \ 96 : "ri" ((T__)(val))); \ 97 break; \ 98 case 8: \ 99 asm(op "q %1,"__percpu_arg(0) \ 100 : "+m" (var) \ 101 : "re" ((T__)(val))); \ 102 break; \ 103 default: __bad_percpu_size(); \ 104 } \ 105 } while (0) 106 107 #define percpu_from_op(op, var) \ 108 ({ \ 109 typeof(var) ret__; \ 110 switch (sizeof(var)) { \ 111 case 1: \ 112 asm(op "b "__percpu_arg(1)",%0" \ 113 : "=q" (ret__) \ 114 : "m" (var)); \ 115 break; \ 116 case 2: \ 117 asm(op "w "__percpu_arg(1)",%0" \ 118 : "=r" (ret__) \ 119 : "m" (var)); \ 120 break; \ 121 case 4: \ 122 asm(op "l "__percpu_arg(1)",%0" \ 123 : "=r" (ret__) \ 124 : "m" (var)); \ 125 break; \ 126 case 8: \ 127 asm(op "q "__percpu_arg(1)",%0" \ 128 : "=r" (ret__) \ 129 : "m" (var)); \ 130 break; \ 131 default: __bad_percpu_size(); \ 132 } \ 133 ret__; \ 134 }) 135 136 #define percpu_read(var) percpu_from_op("mov", per_cpu__##var) 137 #define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) 138 #define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) 139 #define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) 140 #define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val) 141 #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) 142 #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) 143 144 /* This is not atomic against other CPUs -- CPU preemption needs to be off */ 145 #define x86_test_and_clear_bit_percpu(bit, var) \ 146 ({ \ 147 int old__; \ 148 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ 149 : "=r" (old__), "+m" (per_cpu__##var) \ 150 : "dIr" (bit)); \ 151 old__; \ 152 }) 153 154 #include <asm-generic/percpu.h> 155 156 /* We can use this directly for local CPU (faster). */ 157 DECLARE_PER_CPU(unsigned long, this_cpu_off); 158 159 #ifdef CONFIG_NEED_MULTIPLE_NODES 160 void *pcpu_lpage_remapped(void *kaddr); 161 #else 162 static inline void *pcpu_lpage_remapped(void *kaddr) 163 { 164 return NULL; 165 } 166 #endif 167 168 #endif /* !__ASSEMBLY__ */ 169 170 #ifdef CONFIG_SMP 171 172 /* 173 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu 174 * variables that are initialized and accessed before there are per_cpu 175 * areas allocated. 176 */ 177 178 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ 179 DEFINE_PER_CPU(_type, _name) = _initvalue; \ 180 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ 181 { [0 ... NR_CPUS-1] = _initvalue }; \ 182 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map 183 184 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ 185 EXPORT_PER_CPU_SYMBOL(_name) 186 187 #define DECLARE_EARLY_PER_CPU(_type, _name) \ 188 DECLARE_PER_CPU(_type, _name); \ 189 extern __typeof__(_type) *_name##_early_ptr; \ 190 extern __typeof__(_type) _name##_early_map[] 191 192 #define early_per_cpu_ptr(_name) (_name##_early_ptr) 193 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) 194 #define early_per_cpu(_name, _cpu) \ 195 *(early_per_cpu_ptr(_name) ? \ 196 &early_per_cpu_ptr(_name)[_cpu] : \ 197 &per_cpu(_name, _cpu)) 198 199 #else /* !CONFIG_SMP */ 200 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ 201 DEFINE_PER_CPU(_type, _name) = _initvalue 202 203 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ 204 EXPORT_PER_CPU_SYMBOL(_name) 205 206 #define DECLARE_EARLY_PER_CPU(_type, _name) \ 207 DECLARE_PER_CPU(_type, _name) 208 209 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) 210 #define early_per_cpu_ptr(_name) NULL 211 /* no early_per_cpu_map() */ 212 213 #endif /* !CONFIG_SMP */ 214 215 #endif /* _ASM_X86_PERCPU_H */ 216