1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PERCPU_H 3 #define _ASM_X86_PERCPU_H 4 5 #ifdef CONFIG_X86_64 6 #define __percpu_seg gs 7 #define __percpu_mov_op movq 8 #else 9 #define __percpu_seg fs 10 #define __percpu_mov_op movl 11 #endif 12 13 #ifdef __ASSEMBLY__ 14 15 /* 16 * PER_CPU finds an address of a per-cpu variable. 17 * 18 * Args: 19 * var - variable name 20 * reg - 32bit register 21 * 22 * The resulting address is stored in the "reg" argument. 23 * 24 * Example: 25 * PER_CPU(cpu_gdt_descr, %ebx) 26 */ 27 #ifdef CONFIG_SMP 28 #define PER_CPU(var, reg) \ 29 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ 30 lea var(reg), reg 31 #define PER_CPU_VAR(var) %__percpu_seg:var 32 #else /* ! SMP */ 33 #define PER_CPU(var, reg) __percpu_mov_op $var, reg 34 #define PER_CPU_VAR(var) var 35 #endif /* SMP */ 36 37 #ifdef CONFIG_X86_64_SMP 38 #define INIT_PER_CPU_VAR(var) init_per_cpu__##var 39 #else 40 #define INIT_PER_CPU_VAR(var) var 41 #endif 42 43 #else /* ...!ASSEMBLY */ 44 45 #include <linux/kernel.h> 46 #include <linux/stringify.h> 47 48 #ifdef CONFIG_SMP 49 #define __percpu_prefix "%%"__stringify(__percpu_seg)":" 50 #define __my_cpu_offset this_cpu_read(this_cpu_off) 51 52 /* 53 * Compared to the generic __my_cpu_offset version, the following 54 * saves one instruction and avoids clobbering a temp register. 55 */ 56 #define arch_raw_cpu_ptr(ptr) \ 57 ({ \ 58 unsigned long tcp_ptr__; \ 59 asm volatile("add " __percpu_arg(1) ", %0" \ 60 : "=r" (tcp_ptr__) \ 61 : "m" (this_cpu_off), "0" (ptr)); \ 62 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ 63 }) 64 #else 65 #define __percpu_prefix "" 66 #endif 67 68 #define __percpu_arg(x) __percpu_prefix "%" #x 69 70 /* 71 * Initialized pointers to per-cpu variables needed for the boot 72 * processor need to use these macros to get the proper address 73 * offset from __per_cpu_load on SMP. 74 * 75 * There also must be an entry in vmlinux_64.lds.S 76 */ 77 #define DECLARE_INIT_PER_CPU(var) \ 78 extern typeof(var) init_per_cpu_var(var) 79 80 #ifdef CONFIG_X86_64_SMP 81 #define init_per_cpu_var(var) init_per_cpu__##var 82 #else 83 #define init_per_cpu_var(var) var 84 #endif 85 86 /* For arch-specific code, we can use direct single-insn ops (they 87 * don't give an lvalue though). */ 88 extern void __bad_percpu_size(void); 89 90 #define percpu_to_op(op, var, val) \ 91 do { \ 92 typedef typeof(var) pto_T__; \ 93 if (0) { \ 94 pto_T__ pto_tmp__; \ 95 pto_tmp__ = (val); \ 96 (void)pto_tmp__; \ 97 } \ 98 switch (sizeof(var)) { \ 99 case 1: \ 100 asm(op "b %1,"__percpu_arg(0) \ 101 : "+m" (var) \ 102 : "qi" ((pto_T__)(val))); \ 103 break; \ 104 case 2: \ 105 asm(op "w %1,"__percpu_arg(0) \ 106 : "+m" (var) \ 107 : "ri" ((pto_T__)(val))); \ 108 break; \ 109 case 4: \ 110 asm(op "l %1,"__percpu_arg(0) \ 111 : "+m" (var) \ 112 : "ri" ((pto_T__)(val))); \ 113 break; \ 114 case 8: \ 115 asm(op "q %1,"__percpu_arg(0) \ 116 : "+m" (var) \ 117 : "re" ((pto_T__)(val))); \ 118 break; \ 119 default: __bad_percpu_size(); \ 120 } \ 121 } while (0) 122 123 /* 124 * Generate a percpu add to memory instruction and optimize code 125 * if one is added or subtracted. 126 */ 127 #define percpu_add_op(var, val) \ 128 do { \ 129 typedef typeof(var) pao_T__; \ 130 const int pao_ID__ = (__builtin_constant_p(val) && \ 131 ((val) == 1 || (val) == -1)) ? \ 132 (int)(val) : 0; \ 133 if (0) { \ 134 pao_T__ pao_tmp__; \ 135 pao_tmp__ = (val); \ 136 (void)pao_tmp__; \ 137 } \ 138 switch (sizeof(var)) { \ 139 case 1: \ 140 if (pao_ID__ == 1) \ 141 asm("incb "__percpu_arg(0) : "+m" (var)); \ 142 else if (pao_ID__ == -1) \ 143 asm("decb "__percpu_arg(0) : "+m" (var)); \ 144 else \ 145 asm("addb %1, "__percpu_arg(0) \ 146 : "+m" (var) \ 147 : "qi" ((pao_T__)(val))); \ 148 break; \ 149 case 2: \ 150 if (pao_ID__ == 1) \ 151 asm("incw "__percpu_arg(0) : "+m" (var)); \ 152 else if (pao_ID__ == -1) \ 153 asm("decw "__percpu_arg(0) : "+m" (var)); \ 154 else \ 155 asm("addw %1, "__percpu_arg(0) \ 156 : "+m" (var) \ 157 : "ri" ((pao_T__)(val))); \ 158 break; \ 159 case 4: \ 160 if (pao_ID__ == 1) \ 161 asm("incl "__percpu_arg(0) : "+m" (var)); \ 162 else if (pao_ID__ == -1) \ 163 asm("decl "__percpu_arg(0) : "+m" (var)); \ 164 else \ 165 asm("addl %1, "__percpu_arg(0) \ 166 : "+m" (var) \ 167 : "ri" ((pao_T__)(val))); \ 168 break; \ 169 case 8: \ 170 if (pao_ID__ == 1) \ 171 asm("incq "__percpu_arg(0) : "+m" (var)); \ 172 else if (pao_ID__ == -1) \ 173 asm("decq "__percpu_arg(0) : "+m" (var)); \ 174 else \ 175 asm("addq %1, "__percpu_arg(0) \ 176 : "+m" (var) \ 177 : "re" ((pao_T__)(val))); \ 178 break; \ 179 default: __bad_percpu_size(); \ 180 } \ 181 } while (0) 182 183 #define percpu_from_op(op, var) \ 184 ({ \ 185 typeof(var) pfo_ret__; \ 186 switch (sizeof(var)) { \ 187 case 1: \ 188 asm volatile(op "b "__percpu_arg(1)",%0"\ 189 : "=q" (pfo_ret__) \ 190 : "m" (var)); \ 191 break; \ 192 case 2: \ 193 asm volatile(op "w "__percpu_arg(1)",%0"\ 194 : "=r" (pfo_ret__) \ 195 : "m" (var)); \ 196 break; \ 197 case 4: \ 198 asm volatile(op "l "__percpu_arg(1)",%0"\ 199 : "=r" (pfo_ret__) \ 200 : "m" (var)); \ 201 break; \ 202 case 8: \ 203 asm volatile(op "q "__percpu_arg(1)",%0"\ 204 : "=r" (pfo_ret__) \ 205 : "m" (var)); \ 206 break; \ 207 default: __bad_percpu_size(); \ 208 } \ 209 pfo_ret__; \ 210 }) 211 212 #define percpu_stable_op(op, var) \ 213 ({ \ 214 typeof(var) pfo_ret__; \ 215 switch (sizeof(var)) { \ 216 case 1: \ 217 asm(op "b "__percpu_arg(P1)",%0" \ 218 : "=q" (pfo_ret__) \ 219 : "p" (&(var))); \ 220 break; \ 221 case 2: \ 222 asm(op "w "__percpu_arg(P1)",%0" \ 223 : "=r" (pfo_ret__) \ 224 : "p" (&(var))); \ 225 break; \ 226 case 4: \ 227 asm(op "l "__percpu_arg(P1)",%0" \ 228 : "=r" (pfo_ret__) \ 229 : "p" (&(var))); \ 230 break; \ 231 case 8: \ 232 asm(op "q "__percpu_arg(P1)",%0" \ 233 : "=r" (pfo_ret__) \ 234 : "p" (&(var))); \ 235 break; \ 236 default: __bad_percpu_size(); \ 237 } \ 238 pfo_ret__; \ 239 }) 240 241 #define percpu_unary_op(op, var) \ 242 ({ \ 243 switch (sizeof(var)) { \ 244 case 1: \ 245 asm(op "b "__percpu_arg(0) \ 246 : "+m" (var)); \ 247 break; \ 248 case 2: \ 249 asm(op "w "__percpu_arg(0) \ 250 : "+m" (var)); \ 251 break; \ 252 case 4: \ 253 asm(op "l "__percpu_arg(0) \ 254 : "+m" (var)); \ 255 break; \ 256 case 8: \ 257 asm(op "q "__percpu_arg(0) \ 258 : "+m" (var)); \ 259 break; \ 260 default: __bad_percpu_size(); \ 261 } \ 262 }) 263 264 /* 265 * Add return operation 266 */ 267 #define percpu_add_return_op(var, val) \ 268 ({ \ 269 typeof(var) paro_ret__ = val; \ 270 switch (sizeof(var)) { \ 271 case 1: \ 272 asm("xaddb %0, "__percpu_arg(1) \ 273 : "+q" (paro_ret__), "+m" (var) \ 274 : : "memory"); \ 275 break; \ 276 case 2: \ 277 asm("xaddw %0, "__percpu_arg(1) \ 278 : "+r" (paro_ret__), "+m" (var) \ 279 : : "memory"); \ 280 break; \ 281 case 4: \ 282 asm("xaddl %0, "__percpu_arg(1) \ 283 : "+r" (paro_ret__), "+m" (var) \ 284 : : "memory"); \ 285 break; \ 286 case 8: \ 287 asm("xaddq %0, "__percpu_arg(1) \ 288 : "+re" (paro_ret__), "+m" (var) \ 289 : : "memory"); \ 290 break; \ 291 default: __bad_percpu_size(); \ 292 } \ 293 paro_ret__ += val; \ 294 paro_ret__; \ 295 }) 296 297 /* 298 * xchg is implemented using cmpxchg without a lock prefix. xchg is 299 * expensive due to the implied lock prefix. The processor cannot prefetch 300 * cachelines if xchg is used. 301 */ 302 #define percpu_xchg_op(var, nval) \ 303 ({ \ 304 typeof(var) pxo_ret__; \ 305 typeof(var) pxo_new__ = (nval); \ 306 switch (sizeof(var)) { \ 307 case 1: \ 308 asm("\n\tmov "__percpu_arg(1)",%%al" \ 309 "\n1:\tcmpxchgb %2, "__percpu_arg(1) \ 310 "\n\tjnz 1b" \ 311 : "=&a" (pxo_ret__), "+m" (var) \ 312 : "q" (pxo_new__) \ 313 : "memory"); \ 314 break; \ 315 case 2: \ 316 asm("\n\tmov "__percpu_arg(1)",%%ax" \ 317 "\n1:\tcmpxchgw %2, "__percpu_arg(1) \ 318 "\n\tjnz 1b" \ 319 : "=&a" (pxo_ret__), "+m" (var) \ 320 : "r" (pxo_new__) \ 321 : "memory"); \ 322 break; \ 323 case 4: \ 324 asm("\n\tmov "__percpu_arg(1)",%%eax" \ 325 "\n1:\tcmpxchgl %2, "__percpu_arg(1) \ 326 "\n\tjnz 1b" \ 327 : "=&a" (pxo_ret__), "+m" (var) \ 328 : "r" (pxo_new__) \ 329 : "memory"); \ 330 break; \ 331 case 8: \ 332 asm("\n\tmov "__percpu_arg(1)",%%rax" \ 333 "\n1:\tcmpxchgq %2, "__percpu_arg(1) \ 334 "\n\tjnz 1b" \ 335 : "=&a" (pxo_ret__), "+m" (var) \ 336 : "r" (pxo_new__) \ 337 : "memory"); \ 338 break; \ 339 default: __bad_percpu_size(); \ 340 } \ 341 pxo_ret__; \ 342 }) 343 344 /* 345 * cmpxchg has no such implied lock semantics as a result it is much 346 * more efficient for cpu local operations. 347 */ 348 #define percpu_cmpxchg_op(var, oval, nval) \ 349 ({ \ 350 typeof(var) pco_ret__; \ 351 typeof(var) pco_old__ = (oval); \ 352 typeof(var) pco_new__ = (nval); \ 353 switch (sizeof(var)) { \ 354 case 1: \ 355 asm("cmpxchgb %2, "__percpu_arg(1) \ 356 : "=a" (pco_ret__), "+m" (var) \ 357 : "q" (pco_new__), "0" (pco_old__) \ 358 : "memory"); \ 359 break; \ 360 case 2: \ 361 asm("cmpxchgw %2, "__percpu_arg(1) \ 362 : "=a" (pco_ret__), "+m" (var) \ 363 : "r" (pco_new__), "0" (pco_old__) \ 364 : "memory"); \ 365 break; \ 366 case 4: \ 367 asm("cmpxchgl %2, "__percpu_arg(1) \ 368 : "=a" (pco_ret__), "+m" (var) \ 369 : "r" (pco_new__), "0" (pco_old__) \ 370 : "memory"); \ 371 break; \ 372 case 8: \ 373 asm("cmpxchgq %2, "__percpu_arg(1) \ 374 : "=a" (pco_ret__), "+m" (var) \ 375 : "r" (pco_new__), "0" (pco_old__) \ 376 : "memory"); \ 377 break; \ 378 default: __bad_percpu_size(); \ 379 } \ 380 pco_ret__; \ 381 }) 382 383 /* 384 * this_cpu_read() makes gcc load the percpu variable every time it is 385 * accessed while this_cpu_read_stable() allows the value to be cached. 386 * this_cpu_read_stable() is more efficient and can be used if its value 387 * is guaranteed to be valid across cpus. The current users include 388 * get_current() and get_thread_info() both of which are actually 389 * per-thread variables implemented as per-cpu variables and thus 390 * stable for the duration of the respective task. 391 */ 392 #define this_cpu_read_stable(var) percpu_stable_op("mov", var) 393 394 #define raw_cpu_read_1(pcp) percpu_from_op("mov", pcp) 395 #define raw_cpu_read_2(pcp) percpu_from_op("mov", pcp) 396 #define raw_cpu_read_4(pcp) percpu_from_op("mov", pcp) 397 398 #define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) 399 #define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) 400 #define raw_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) 401 #define raw_cpu_add_1(pcp, val) percpu_add_op((pcp), val) 402 #define raw_cpu_add_2(pcp, val) percpu_add_op((pcp), val) 403 #define raw_cpu_add_4(pcp, val) percpu_add_op((pcp), val) 404 #define raw_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) 405 #define raw_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) 406 #define raw_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) 407 #define raw_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) 408 #define raw_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) 409 #define raw_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) 410 #define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val) 411 #define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val) 412 #define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val) 413 414 #define this_cpu_read_1(pcp) percpu_from_op("mov", pcp) 415 #define this_cpu_read_2(pcp) percpu_from_op("mov", pcp) 416 #define this_cpu_read_4(pcp) percpu_from_op("mov", pcp) 417 #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) 418 #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) 419 #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) 420 #define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) 421 #define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) 422 #define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) 423 #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) 424 #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) 425 #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) 426 #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) 427 #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) 428 #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) 429 #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) 430 #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 431 #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 432 433 #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) 434 #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) 435 #define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) 436 #define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 437 #define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 438 #define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 439 440 #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) 441 #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) 442 #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) 443 #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 444 #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 445 #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 446 447 #ifdef CONFIG_X86_CMPXCHG64 448 #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ 449 ({ \ 450 bool __ret; \ 451 typeof(pcp1) __o1 = (o1), __n1 = (n1); \ 452 typeof(pcp2) __o2 = (o2), __n2 = (n2); \ 453 asm volatile("cmpxchg8b "__percpu_arg(1) \ 454 CC_SET(z) \ 455 : CC_OUT(z) (__ret), "+m" (pcp1), "+m" (pcp2), "+a" (__o1), "+d" (__o2) \ 456 : "b" (__n1), "c" (__n2)); \ 457 __ret; \ 458 }) 459 460 #define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double 461 #define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double 462 #endif /* CONFIG_X86_CMPXCHG64 */ 463 464 /* 465 * Per cpu atomic 64 bit operations are only available under 64 bit. 466 * 32 bit must fall back to generic operations. 467 */ 468 #ifdef CONFIG_X86_64 469 #define raw_cpu_read_8(pcp) percpu_from_op("mov", pcp) 470 #define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) 471 #define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 472 #define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 473 #define raw_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 474 #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) 475 #define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) 476 #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 477 478 #define this_cpu_read_8(pcp) percpu_from_op("mov", pcp) 479 #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) 480 #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 481 #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 482 #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 483 #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) 484 #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) 485 #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 486 487 /* 488 * Pretty complex macro to generate cmpxchg16 instruction. The instruction 489 * is not supported on early AMD64 processors so we must be able to emulate 490 * it in software. The address used in the cmpxchg16 instruction must be 491 * aligned to a 16 byte boundary. 492 */ 493 #define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \ 494 ({ \ 495 bool __ret; \ 496 typeof(pcp1) __o1 = (o1), __n1 = (n1); \ 497 typeof(pcp2) __o2 = (o2), __n2 = (n2); \ 498 alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \ 499 "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \ 500 X86_FEATURE_CX16, \ 501 ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \ 502 "+m" (pcp2), "+d" (__o2)), \ 503 "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \ 504 __ret; \ 505 }) 506 507 #define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double 508 #define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double 509 510 #endif 511 512 static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr, 513 const unsigned long __percpu *addr) 514 { 515 unsigned long __percpu *a = 516 (unsigned long __percpu *)addr + nr / BITS_PER_LONG; 517 518 #ifdef CONFIG_X86_64 519 return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0; 520 #else 521 return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0; 522 #endif 523 } 524 525 static inline bool x86_this_cpu_variable_test_bit(int nr, 526 const unsigned long __percpu *addr) 527 { 528 bool oldbit; 529 530 asm volatile("btl "__percpu_arg(2)",%1" 531 CC_SET(c) 532 : CC_OUT(c) (oldbit) 533 : "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); 534 535 return oldbit; 536 } 537 538 #define x86_this_cpu_test_bit(nr, addr) \ 539 (__builtin_constant_p((nr)) \ 540 ? x86_this_cpu_constant_test_bit((nr), (addr)) \ 541 : x86_this_cpu_variable_test_bit((nr), (addr))) 542 543 544 #include <asm-generic/percpu.h> 545 546 /* We can use this directly for local CPU (faster). */ 547 DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); 548 549 #endif /* !__ASSEMBLY__ */ 550 551 #ifdef CONFIG_SMP 552 553 /* 554 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu 555 * variables that are initialized and accessed before there are per_cpu 556 * areas allocated. 557 */ 558 559 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ 560 DEFINE_PER_CPU(_type, _name) = _initvalue; \ 561 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ 562 { [0 ... NR_CPUS-1] = _initvalue }; \ 563 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map 564 565 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ 566 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \ 567 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ 568 { [0 ... NR_CPUS-1] = _initvalue }; \ 569 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map 570 571 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ 572 EXPORT_PER_CPU_SYMBOL(_name) 573 574 #define DECLARE_EARLY_PER_CPU(_type, _name) \ 575 DECLARE_PER_CPU(_type, _name); \ 576 extern __typeof__(_type) *_name##_early_ptr; \ 577 extern __typeof__(_type) _name##_early_map[] 578 579 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ 580 DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \ 581 extern __typeof__(_type) *_name##_early_ptr; \ 582 extern __typeof__(_type) _name##_early_map[] 583 584 #define early_per_cpu_ptr(_name) (_name##_early_ptr) 585 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) 586 #define early_per_cpu(_name, _cpu) \ 587 *(early_per_cpu_ptr(_name) ? \ 588 &early_per_cpu_ptr(_name)[_cpu] : \ 589 &per_cpu(_name, _cpu)) 590 591 #else /* !CONFIG_SMP */ 592 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ 593 DEFINE_PER_CPU(_type, _name) = _initvalue 594 595 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ 596 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue 597 598 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ 599 EXPORT_PER_CPU_SYMBOL(_name) 600 601 #define DECLARE_EARLY_PER_CPU(_type, _name) \ 602 DECLARE_PER_CPU(_type, _name) 603 604 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ 605 DECLARE_PER_CPU_READ_MOSTLY(_type, _name) 606 607 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) 608 #define early_per_cpu_ptr(_name) NULL 609 /* no early_per_cpu_map() */ 610 611 #endif /* !CONFIG_SMP */ 612 613 #endif /* _ASM_X86_PERCPU_H */ 614