xref: /linux/arch/x86/include/asm/percpu.h (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERCPU_H
3 #define _ASM_X86_PERCPU_H
4 
5 #ifdef CONFIG_X86_64
6 # define __percpu_seg		gs
7 # define __percpu_rel		(%rip)
8 #else
9 # define __percpu_seg		fs
10 # define __percpu_rel
11 #endif
12 
13 #ifdef __ASSEMBLY__
14 
15 #ifdef CONFIG_SMP
16 # define __percpu		%__percpu_seg:
17 #else
18 # define __percpu
19 #endif
20 
21 #define PER_CPU_VAR(var)	__percpu(var)__percpu_rel
22 
23 #ifdef CONFIG_X86_64_SMP
24 # define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
25 #else
26 # define INIT_PER_CPU_VAR(var)  var
27 #endif
28 
29 #else /* !__ASSEMBLY__: */
30 
31 #include <linux/build_bug.h>
32 #include <linux/stringify.h>
33 #include <asm/asm.h>
34 
35 #ifdef CONFIG_SMP
36 
37 #ifdef CONFIG_CC_HAS_NAMED_AS
38 
39 #ifdef __CHECKER__
40 # define __seg_gs		__attribute__((address_space(__seg_gs)))
41 # define __seg_fs		__attribute__((address_space(__seg_fs)))
42 #endif
43 
44 #ifdef CONFIG_X86_64
45 # define __percpu_seg_override	__seg_gs
46 #else
47 # define __percpu_seg_override	__seg_fs
48 #endif
49 
50 #define __percpu_prefix		""
51 
52 #else /* !CONFIG_CC_HAS_NAMED_AS: */
53 
54 #define __percpu_seg_override
55 #define __percpu_prefix		"%%"__stringify(__percpu_seg)":"
56 
57 #endif /* CONFIG_CC_HAS_NAMED_AS */
58 
59 #define __force_percpu_prefix	"%%"__stringify(__percpu_seg)":"
60 #define __my_cpu_offset		this_cpu_read(this_cpu_off)
61 
62 /*
63  * Compared to the generic __my_cpu_offset version, the following
64  * saves one instruction and avoids clobbering a temp register.
65  *
66  * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
67  * kernel, because games are played with CONFIG_X86_64 there and
68  * sizeof(this_cpu_off) becames 4.
69  */
70 #ifndef BUILD_VDSO32_64
71 #define arch_raw_cpu_ptr(_ptr)						\
72 ({									\
73 	unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off);	\
74 									\
75 	tcp_ptr__ += (__force unsigned long)(_ptr);			\
76 	(typeof(*(_ptr)) __kernel __force *)tcp_ptr__;			\
77 })
78 #else
79 #define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; })
80 #endif
81 
82 #define PER_CPU_VAR(var)	%__percpu_seg:(var)__percpu_rel
83 
84 #else /* !CONFIG_SMP: */
85 
86 #define __percpu_seg_override
87 #define __percpu_prefix		""
88 #define __force_percpu_prefix	""
89 
90 #define PER_CPU_VAR(var)	(var)__percpu_rel
91 
92 #endif /* CONFIG_SMP */
93 
94 #define __my_cpu_type(var)	typeof(var) __percpu_seg_override
95 #define __my_cpu_ptr(ptr)	(__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr)
96 #define __my_cpu_var(var)	(*__my_cpu_ptr(&(var)))
97 #define __percpu_arg(x)		__percpu_prefix "%" #x
98 #define __force_percpu_arg(x)	__force_percpu_prefix "%" #x
99 
100 /*
101  * Initialized pointers to per-CPU variables needed for the boot
102  * processor need to use these macros to get the proper address
103  * offset from __per_cpu_load on SMP.
104  *
105  * There also must be an entry in vmlinux_64.lds.S
106  */
107 #define DECLARE_INIT_PER_CPU(var) \
108        extern typeof(var) init_per_cpu_var(var)
109 
110 #ifdef CONFIG_X86_64_SMP
111 # define init_per_cpu_var(var)  init_per_cpu__##var
112 #else
113 # define init_per_cpu_var(var)  var
114 #endif
115 
116 /*
117  * For arch-specific code, we can use direct single-insn ops (they
118  * don't give an lvalue though).
119  */
120 
121 #define __pcpu_type_1		u8
122 #define __pcpu_type_2		u16
123 #define __pcpu_type_4		u32
124 #define __pcpu_type_8		u64
125 
126 #define __pcpu_cast_1(val)	((u8)(((unsigned long) val) & 0xff))
127 #define __pcpu_cast_2(val)	((u16)(((unsigned long) val) & 0xffff))
128 #define __pcpu_cast_4(val)	((u32)(((unsigned long) val) & 0xffffffff))
129 #define __pcpu_cast_8(val)	((u64)(val))
130 
131 #define __pcpu_op1_1(op, dst)	op "b " dst
132 #define __pcpu_op1_2(op, dst)	op "w " dst
133 #define __pcpu_op1_4(op, dst)	op "l " dst
134 #define __pcpu_op1_8(op, dst)	op "q " dst
135 
136 #define __pcpu_op2_1(op, src, dst) op "b " src ", " dst
137 #define __pcpu_op2_2(op, src, dst) op "w " src ", " dst
138 #define __pcpu_op2_4(op, src, dst) op "l " src ", " dst
139 #define __pcpu_op2_8(op, src, dst) op "q " src ", " dst
140 
141 #define __pcpu_reg_1(mod, x)	mod "q" (x)
142 #define __pcpu_reg_2(mod, x)	mod "r" (x)
143 #define __pcpu_reg_4(mod, x)	mod "r" (x)
144 #define __pcpu_reg_8(mod, x)	mod "r" (x)
145 
146 #define __pcpu_reg_imm_1(x)	"qi" (x)
147 #define __pcpu_reg_imm_2(x)	"ri" (x)
148 #define __pcpu_reg_imm_4(x)	"ri" (x)
149 #define __pcpu_reg_imm_8(x)	"re" (x)
150 
151 #ifdef CONFIG_USE_X86_SEG_SUPPORT
152 
153 #define __raw_cpu_read(size, qual, pcp)					\
154 ({									\
155 	*(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp));		\
156 })
157 
158 #define __raw_cpu_write(size, qual, pcp, val)				\
159 do {									\
160 	*(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val);	\
161 } while (0)
162 
163 #define __raw_cpu_read_const(pcp)	__raw_cpu_read(, , pcp)
164 
165 #else /* !CONFIG_USE_X86_SEG_SUPPORT: */
166 
167 #define __raw_cpu_read(size, qual, _var)				\
168 ({									\
169 	__pcpu_type_##size pfo_val__;					\
170 									\
171 	asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), "%[val]") \
172 	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
173 	    : [var] "m" (__my_cpu_var(_var)));				\
174 									\
175 	(typeof(_var))(unsigned long) pfo_val__;			\
176 })
177 
178 #define __raw_cpu_write(size, qual, _var, _val)				\
179 do {									\
180 	__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\
181 									\
182 	if (0) {		                                        \
183 		typeof(_var) pto_tmp__;					\
184 		pto_tmp__ = (_val);					\
185 		(void)pto_tmp__;					\
186 	}								\
187 	asm qual(__pcpu_op2_##size("mov", "%[val]", __percpu_arg([var])) \
188 	    : [var] "=m" (__my_cpu_var(_var))				\
189 	    : [val] __pcpu_reg_imm_##size(pto_val__));			\
190 } while (0)
191 
192 /*
193  * The generic per-CPU infrastrucutre is not suitable for
194  * reading const-qualified variables.
195  */
196 #define __raw_cpu_read_const(pcp)	({ BUILD_BUG(); (typeof(pcp))0; })
197 
198 #endif /* CONFIG_USE_X86_SEG_SUPPORT */
199 
200 #define __raw_cpu_read_stable(size, _var)				\
201 ({									\
202 	__pcpu_type_##size pfo_val__;					\
203 									\
204 	asm(__pcpu_op2_##size("mov", __force_percpu_arg(a[var]), "%[val]") \
205 	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
206 	    : [var] "i" (&(_var)));					\
207 									\
208 	(typeof(_var))(unsigned long) pfo_val__;			\
209 })
210 
211 #define percpu_unary_op(size, qual, op, _var)				\
212 ({									\
213 	asm qual (__pcpu_op1_##size(op, __percpu_arg([var]))		\
214 	    : [var] "+m" (__my_cpu_var(_var)));				\
215 })
216 
217 #define percpu_binary_op(size, qual, op, _var, _val)			\
218 do {									\
219 	__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\
220 									\
221 	if (0) {		                                        \
222 		typeof(_var) pto_tmp__;					\
223 		pto_tmp__ = (_val);					\
224 		(void)pto_tmp__;					\
225 	}								\
226 	asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var]))	\
227 	    : [var] "+m" (__my_cpu_var(_var))				\
228 	    : [val] __pcpu_reg_imm_##size(pto_val__));			\
229 } while (0)
230 
231 /*
232  * Generate a per-CPU add to memory instruction and optimize code
233  * if one is added or subtracted.
234  */
235 #define percpu_add_op(size, qual, var, val)				\
236 do {									\
237 	const int pao_ID__ = (__builtin_constant_p(val) &&		\
238 			      ((val) == 1 || (val) == -1)) ?		\
239 				(int)(val) : 0;				\
240 									\
241 	if (0) {							\
242 		typeof(var) pao_tmp__;					\
243 		pao_tmp__ = (val);					\
244 		(void)pao_tmp__;					\
245 	}								\
246 	if (pao_ID__ == 1)						\
247 		percpu_unary_op(size, qual, "inc", var);		\
248 	else if (pao_ID__ == -1)					\
249 		percpu_unary_op(size, qual, "dec", var);		\
250 	else								\
251 		percpu_binary_op(size, qual, "add", var, val);		\
252 } while (0)
253 
254 /*
255  * Add return operation
256  */
257 #define percpu_add_return_op(size, qual, _var, _val)			\
258 ({									\
259 	__pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val);	\
260 									\
261 	asm qual (__pcpu_op2_##size("xadd", "%[tmp]",			\
262 				     __percpu_arg([var]))		\
263 		  : [tmp] __pcpu_reg_##size("+", paro_tmp__),		\
264 		    [var] "+m" (__my_cpu_var(_var))			\
265 		  : : "memory");					\
266 	(typeof(_var))(unsigned long) (paro_tmp__ + _val);		\
267 })
268 
269 /*
270  * raw_cpu_xchg() can use a load-store since
271  * it is not required to be IRQ-safe.
272  */
273 #define raw_percpu_xchg_op(_var, _nval)					\
274 ({									\
275 	typeof(_var) pxo_old__ = raw_cpu_read(_var);			\
276 									\
277 	raw_cpu_write(_var, _nval);					\
278 									\
279 	pxo_old__;							\
280 })
281 
282 /*
283  * this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix.
284  * XCHG is expensive due to the implied LOCK prefix. The processor
285  * cannot prefetch cachelines if XCHG is used.
286  */
287 #define this_percpu_xchg_op(_var, _nval)				\
288 ({									\
289 	typeof(_var) pxo_old__ = this_cpu_read(_var);			\
290 									\
291 	do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval));	\
292 									\
293 	pxo_old__;							\
294 })
295 
296 /*
297  * CMPXCHG has no such implied lock semantics as a result it is much
298  * more efficient for CPU-local operations.
299  */
300 #define percpu_cmpxchg_op(size, qual, _var, _oval, _nval)		\
301 ({									\
302 	__pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval);	\
303 	__pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);	\
304 									\
305 	asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]",		\
306 				    __percpu_arg([var]))		\
307 		  : [oval] "+a" (pco_old__),				\
308 		    [var] "+m" (__my_cpu_var(_var))			\
309 		  : [nval] __pcpu_reg_##size(, pco_new__)		\
310 		  : "memory");						\
311 									\
312 	(typeof(_var))(unsigned long) pco_old__;			\
313 })
314 
315 #define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval)		\
316 ({									\
317 	bool success;							\
318 	__pcpu_type_##size *pco_oval__ = (__pcpu_type_##size *)(_ovalp); \
319 	__pcpu_type_##size pco_old__ = *pco_oval__;			\
320 	__pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);	\
321 									\
322 	asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]",		\
323 				    __percpu_arg([var]))		\
324 		  CC_SET(z)						\
325 		  : CC_OUT(z) (success),				\
326 		    [oval] "+a" (pco_old__),				\
327 		    [var] "+m" (__my_cpu_var(_var))			\
328 		  : [nval] __pcpu_reg_##size(, pco_new__)		\
329 		  : "memory");						\
330 	if (unlikely(!success))						\
331 		*pco_oval__ = pco_old__;				\
332 									\
333 	likely(success);						\
334 })
335 
336 #if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
337 
338 #define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval)		\
339 ({									\
340 	union {								\
341 		u64 var;						\
342 		struct {						\
343 			u32 low, high;					\
344 		};							\
345 	} old__, new__;							\
346 									\
347 	old__.var = _oval;						\
348 	new__.var = _nval;						\
349 									\
350 	asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu",		\
351 			      "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
352 		  : [var] "+m" (__my_cpu_var(_var)),			\
353 		    "+a" (old__.low),					\
354 		    "+d" (old__.high)					\
355 		  : "b" (new__.low),					\
356 		    "c" (new__.high),					\
357 		    "S" (&(_var))					\
358 		  : "memory");						\
359 									\
360 	old__.var;							\
361 })
362 
363 #define raw_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg64_op(8,         , pcp, oval, nval)
364 #define this_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg64_op(8, volatile, pcp, oval, nval)
365 
366 #define percpu_try_cmpxchg64_op(size, qual, _var, _ovalp, _nval)	\
367 ({									\
368 	bool success;							\
369 	u64 *_oval = (u64 *)(_ovalp);					\
370 	union {								\
371 		u64 var;						\
372 		struct {						\
373 			u32 low, high;					\
374 		};							\
375 	} old__, new__;							\
376 									\
377 	old__.var = *_oval;						\
378 	new__.var = _nval;						\
379 									\
380 	asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu",		\
381 			      "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
382 		  CC_SET(z)						\
383 		  : CC_OUT(z) (success),				\
384 		    [var] "+m" (__my_cpu_var(_var)),			\
385 		    "+a" (old__.low),					\
386 		    "+d" (old__.high)					\
387 		  : "b" (new__.low),					\
388 		    "c" (new__.high),					\
389 		    "S" (&(_var))					\
390 		  : "memory");						\
391 	if (unlikely(!success))						\
392 		*_oval = old__.var;					\
393 									\
394 	likely(success);						\
395 })
396 
397 #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval)		percpu_try_cmpxchg64_op(8,         , pcp, ovalp, nval)
398 #define this_cpu_try_cmpxchg64(pcp, ovalp, nval)	percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval)
399 
400 #endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */
401 
402 #ifdef CONFIG_X86_64
403 #define raw_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg_op(8,         , pcp, oval, nval);
404 #define this_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg_op(8, volatile, pcp, oval, nval);
405 
406 #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval)		percpu_try_cmpxchg_op(8,         , pcp, ovalp, nval);
407 #define this_cpu_try_cmpxchg64(pcp, ovalp, nval)	percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval);
408 
409 #define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval)		\
410 ({									\
411 	union {								\
412 		u128 var;						\
413 		struct {						\
414 			u64 low, high;					\
415 		};							\
416 	} old__, new__;							\
417 									\
418 	old__.var = _oval;						\
419 	new__.var = _nval;						\
420 									\
421 	asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu",		\
422 			      "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
423 		  : [var] "+m" (__my_cpu_var(_var)),			\
424 		    "+a" (old__.low),					\
425 		    "+d" (old__.high)					\
426 		  : "b" (new__.low),					\
427 		    "c" (new__.high),					\
428 		    "S" (&(_var))					\
429 		  : "memory");						\
430 									\
431 	old__.var;							\
432 })
433 
434 #define raw_cpu_cmpxchg128(pcp, oval, nval)		percpu_cmpxchg128_op(16,         , pcp, oval, nval)
435 #define this_cpu_cmpxchg128(pcp, oval, nval)		percpu_cmpxchg128_op(16, volatile, pcp, oval, nval)
436 
437 #define percpu_try_cmpxchg128_op(size, qual, _var, _ovalp, _nval)	\
438 ({									\
439 	bool success;							\
440 	u128 *_oval = (u128 *)(_ovalp);					\
441 	union {								\
442 		u128 var;						\
443 		struct {						\
444 			u64 low, high;					\
445 		};							\
446 	} old__, new__;							\
447 									\
448 	old__.var = *_oval;						\
449 	new__.var = _nval;						\
450 									\
451 	asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu",		\
452 			      "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
453 		  CC_SET(z)						\
454 		  : CC_OUT(z) (success),				\
455 		    [var] "+m" (__my_cpu_var(_var)),			\
456 		    "+a" (old__.low),					\
457 		    "+d" (old__.high)					\
458 		  : "b" (new__.low),					\
459 		    "c" (new__.high),					\
460 		    "S" (&(_var))					\
461 		  : "memory");						\
462 	if (unlikely(!success))						\
463 		*_oval = old__.var;					\
464 	likely(success);						\
465 })
466 
467 #define raw_cpu_try_cmpxchg128(pcp, ovalp, nval)	percpu_try_cmpxchg128_op(16,         , pcp, ovalp, nval)
468 #define this_cpu_try_cmpxchg128(pcp, ovalp, nval)	percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)
469 
470 #endif /* CONFIG_X86_64 */
471 
472 #define raw_cpu_read_1(pcp)				__raw_cpu_read(1, , pcp)
473 #define raw_cpu_read_2(pcp)				__raw_cpu_read(2, , pcp)
474 #define raw_cpu_read_4(pcp)				__raw_cpu_read(4, , pcp)
475 #define raw_cpu_write_1(pcp, val)			__raw_cpu_write(1, , pcp, val)
476 #define raw_cpu_write_2(pcp, val)			__raw_cpu_write(2, , pcp, val)
477 #define raw_cpu_write_4(pcp, val)			__raw_cpu_write(4, , pcp, val)
478 
479 #define this_cpu_read_1(pcp)				__raw_cpu_read(1, volatile, pcp)
480 #define this_cpu_read_2(pcp)				__raw_cpu_read(2, volatile, pcp)
481 #define this_cpu_read_4(pcp)				__raw_cpu_read(4, volatile, pcp)
482 #define this_cpu_write_1(pcp, val)			__raw_cpu_write(1, volatile, pcp, val)
483 #define this_cpu_write_2(pcp, val)			__raw_cpu_write(2, volatile, pcp, val)
484 #define this_cpu_write_4(pcp, val)			__raw_cpu_write(4, volatile, pcp, val)
485 
486 #define this_cpu_read_stable_1(pcp)			__raw_cpu_read_stable(1, pcp)
487 #define this_cpu_read_stable_2(pcp)			__raw_cpu_read_stable(2, pcp)
488 #define this_cpu_read_stable_4(pcp)			__raw_cpu_read_stable(4, pcp)
489 
490 #define raw_cpu_add_1(pcp, val)				percpu_add_op(1, , (pcp), val)
491 #define raw_cpu_add_2(pcp, val)				percpu_add_op(2, , (pcp), val)
492 #define raw_cpu_add_4(pcp, val)				percpu_add_op(4, , (pcp), val)
493 #define raw_cpu_and_1(pcp, val)				percpu_binary_op(1, , "and", (pcp), val)
494 #define raw_cpu_and_2(pcp, val)				percpu_binary_op(2, , "and", (pcp), val)
495 #define raw_cpu_and_4(pcp, val)				percpu_binary_op(4, , "and", (pcp), val)
496 #define raw_cpu_or_1(pcp, val)				percpu_binary_op(1, , "or", (pcp), val)
497 #define raw_cpu_or_2(pcp, val)				percpu_binary_op(2, , "or", (pcp), val)
498 #define raw_cpu_or_4(pcp, val)				percpu_binary_op(4, , "or", (pcp), val)
499 #define raw_cpu_xchg_1(pcp, val)			raw_percpu_xchg_op(pcp, val)
500 #define raw_cpu_xchg_2(pcp, val)			raw_percpu_xchg_op(pcp, val)
501 #define raw_cpu_xchg_4(pcp, val)			raw_percpu_xchg_op(pcp, val)
502 
503 #define this_cpu_add_1(pcp, val)			percpu_add_op(1, volatile, (pcp), val)
504 #define this_cpu_add_2(pcp, val)			percpu_add_op(2, volatile, (pcp), val)
505 #define this_cpu_add_4(pcp, val)			percpu_add_op(4, volatile, (pcp), val)
506 #define this_cpu_and_1(pcp, val)			percpu_binary_op(1, volatile, "and", (pcp), val)
507 #define this_cpu_and_2(pcp, val)			percpu_binary_op(2, volatile, "and", (pcp), val)
508 #define this_cpu_and_4(pcp, val)			percpu_binary_op(4, volatile, "and", (pcp), val)
509 #define this_cpu_or_1(pcp, val)				percpu_binary_op(1, volatile, "or", (pcp), val)
510 #define this_cpu_or_2(pcp, val)				percpu_binary_op(2, volatile, "or", (pcp), val)
511 #define this_cpu_or_4(pcp, val)				percpu_binary_op(4, volatile, "or", (pcp), val)
512 #define this_cpu_xchg_1(pcp, nval)			this_percpu_xchg_op(pcp, nval)
513 #define this_cpu_xchg_2(pcp, nval)			this_percpu_xchg_op(pcp, nval)
514 #define this_cpu_xchg_4(pcp, nval)			this_percpu_xchg_op(pcp, nval)
515 
516 #define raw_cpu_add_return_1(pcp, val)			percpu_add_return_op(1, , pcp, val)
517 #define raw_cpu_add_return_2(pcp, val)			percpu_add_return_op(2, , pcp, val)
518 #define raw_cpu_add_return_4(pcp, val)			percpu_add_return_op(4, , pcp, val)
519 #define raw_cpu_cmpxchg_1(pcp, oval, nval)		percpu_cmpxchg_op(1, , pcp, oval, nval)
520 #define raw_cpu_cmpxchg_2(pcp, oval, nval)		percpu_cmpxchg_op(2, , pcp, oval, nval)
521 #define raw_cpu_cmpxchg_4(pcp, oval, nval)		percpu_cmpxchg_op(4, , pcp, oval, nval)
522 #define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval)		percpu_try_cmpxchg_op(1, , pcp, ovalp, nval)
523 #define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval)		percpu_try_cmpxchg_op(2, , pcp, ovalp, nval)
524 #define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval)		percpu_try_cmpxchg_op(4, , pcp, ovalp, nval)
525 
526 #define this_cpu_add_return_1(pcp, val)			percpu_add_return_op(1, volatile, pcp, val)
527 #define this_cpu_add_return_2(pcp, val)			percpu_add_return_op(2, volatile, pcp, val)
528 #define this_cpu_add_return_4(pcp, val)			percpu_add_return_op(4, volatile, pcp, val)
529 #define this_cpu_cmpxchg_1(pcp, oval, nval)		percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
530 #define this_cpu_cmpxchg_2(pcp, oval, nval)		percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
531 #define this_cpu_cmpxchg_4(pcp, oval, nval)		percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
532 #define this_cpu_try_cmpxchg_1(pcp, ovalp, nval)	percpu_try_cmpxchg_op(1, volatile, pcp, ovalp, nval)
533 #define this_cpu_try_cmpxchg_2(pcp, ovalp, nval)	percpu_try_cmpxchg_op(2, volatile, pcp, ovalp, nval)
534 #define this_cpu_try_cmpxchg_4(pcp, ovalp, nval)	percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval)
535 
536 /*
537  * Per-CPU atomic 64-bit operations are only available under 64-bit kernels.
538  * 32-bit kernels must fall back to generic operations.
539  */
540 #ifdef CONFIG_X86_64
541 
542 #define raw_cpu_read_8(pcp)				__raw_cpu_read(8, , pcp)
543 #define raw_cpu_write_8(pcp, val)			__raw_cpu_write(8, , pcp, val)
544 
545 #define this_cpu_read_8(pcp)				__raw_cpu_read(8, volatile, pcp)
546 #define this_cpu_write_8(pcp, val)			__raw_cpu_write(8, volatile, pcp, val)
547 
548 #define this_cpu_read_stable_8(pcp)			__raw_cpu_read_stable(8, pcp)
549 
550 #define raw_cpu_add_8(pcp, val)				percpu_add_op(8, , (pcp), val)
551 #define raw_cpu_and_8(pcp, val)				percpu_binary_op(8, , "and", (pcp), val)
552 #define raw_cpu_or_8(pcp, val)				percpu_binary_op(8, , "or", (pcp), val)
553 #define raw_cpu_add_return_8(pcp, val)			percpu_add_return_op(8, , pcp, val)
554 #define raw_cpu_xchg_8(pcp, nval)			raw_percpu_xchg_op(pcp, nval)
555 #define raw_cpu_cmpxchg_8(pcp, oval, nval)		percpu_cmpxchg_op(8, , pcp, oval, nval)
556 #define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval)		percpu_try_cmpxchg_op(8, , pcp, ovalp, nval)
557 
558 #define this_cpu_add_8(pcp, val)			percpu_add_op(8, volatile, (pcp), val)
559 #define this_cpu_and_8(pcp, val)			percpu_binary_op(8, volatile, "and", (pcp), val)
560 #define this_cpu_or_8(pcp, val)				percpu_binary_op(8, volatile, "or", (pcp), val)
561 #define this_cpu_add_return_8(pcp, val)			percpu_add_return_op(8, volatile, pcp, val)
562 #define this_cpu_xchg_8(pcp, nval)			this_percpu_xchg_op(pcp, nval)
563 #define this_cpu_cmpxchg_8(pcp, oval, nval)		percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
564 #define this_cpu_try_cmpxchg_8(pcp, ovalp, nval)	percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
565 
566 #define raw_cpu_read_long(pcp)				raw_cpu_read_8(pcp)
567 
568 #else /* !CONFIG_X86_64: */
569 
570 /* There is no generic 64-bit read stable operation for 32-bit targets. */
571 #define this_cpu_read_stable_8(pcp)			({ BUILD_BUG(); (typeof(pcp))0; })
572 
573 #define raw_cpu_read_long(pcp)				raw_cpu_read_4(pcp)
574 
575 #endif /* CONFIG_X86_64 */
576 
577 #define this_cpu_read_const(pcp)			__raw_cpu_read_const(pcp)
578 
579 /*
580  * this_cpu_read() makes the compiler load the per-CPU variable every time
581  * it is accessed while this_cpu_read_stable() allows the value to be cached.
582  * this_cpu_read_stable() is more efficient and can be used if its value
583  * is guaranteed to be valid across CPUs.  The current users include
584  * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
585  * actually per-thread variables implemented as per-CPU variables and
586  * thus stable for the duration of the respective task.
587  */
588 #define this_cpu_read_stable(pcp)			__pcpu_size_call_return(this_cpu_read_stable_, pcp)
589 
590 #define x86_this_cpu_constant_test_bit(_nr, _var)			\
591 ({									\
592 	unsigned long __percpu *addr__ =				\
593 		(unsigned long __percpu *)&(_var) + ((_nr) / BITS_PER_LONG); \
594 									\
595 	!!((1UL << ((_nr) % BITS_PER_LONG)) & raw_cpu_read(*addr__));	\
596 })
597 
598 #define x86_this_cpu_variable_test_bit(_nr, _var)			\
599 ({									\
600 	bool oldbit;							\
601 									\
602 	asm volatile("btl %[nr], " __percpu_arg([var])			\
603 		     CC_SET(c)						\
604 		     : CC_OUT(c) (oldbit)				\
605 		     : [var] "m" (__my_cpu_var(_var)),			\
606 		       [nr] "rI" (_nr));				\
607 	oldbit;								\
608 })
609 
610 #define x86_this_cpu_test_bit(_nr, _var)				\
611 	(__builtin_constant_p(_nr)					\
612 	 ? x86_this_cpu_constant_test_bit(_nr, _var)			\
613 	 : x86_this_cpu_variable_test_bit(_nr, _var))
614 
615 
616 #include <asm-generic/percpu.h>
617 
618 /* We can use this directly for local CPU (faster). */
619 DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
620 
621 #endif /* !__ASSEMBLY__ */
622 
623 #ifdef CONFIG_SMP
624 
625 /*
626  * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
627  * variables that are initialized and accessed before there are per_cpu
628  * areas allocated.
629  */
630 
631 #define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\
632 	DEFINE_PER_CPU(_type, _name) = _initvalue;			\
633 	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
634 				{ [0 ... NR_CPUS-1] = _initvalue };	\
635 	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
636 
637 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
638 	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue;		\
639 	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
640 				{ [0 ... NR_CPUS-1] = _initvalue };	\
641 	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
642 
643 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)				\
644 	EXPORT_PER_CPU_SYMBOL(_name)
645 
646 #define DECLARE_EARLY_PER_CPU(_type, _name)				\
647 	DECLARE_PER_CPU(_type, _name);					\
648 	extern __typeof__(_type) *_name##_early_ptr;			\
649 	extern __typeof__(_type)  _name##_early_map[]
650 
651 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)			\
652 	DECLARE_PER_CPU_READ_MOSTLY(_type, _name);			\
653 	extern __typeof__(_type) *_name##_early_ptr;			\
654 	extern __typeof__(_type)  _name##_early_map[]
655 
656 #define	early_per_cpu_ptr(_name)			(_name##_early_ptr)
657 #define	early_per_cpu_map(_name, _idx)			(_name##_early_map[_idx])
658 
659 #define	early_per_cpu(_name, _cpu)					\
660 	*(early_per_cpu_ptr(_name) ?					\
661 		&early_per_cpu_ptr(_name)[_cpu] :			\
662 		&per_cpu(_name, _cpu))
663 
664 #else /* !CONFIG_SMP: */
665 #define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\
666 	DEFINE_PER_CPU(_type, _name) = _initvalue
667 
668 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
669 	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
670 
671 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)				\
672 	EXPORT_PER_CPU_SYMBOL(_name)
673 
674 #define DECLARE_EARLY_PER_CPU(_type, _name)				\
675 	DECLARE_PER_CPU(_type, _name)
676 
677 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)			\
678 	DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
679 
680 #define	early_per_cpu(_name, _cpu)			per_cpu(_name, _cpu)
681 #define	early_per_cpu_ptr(_name)			NULL
682 /* no early_per_cpu_map() */
683 
684 #endif /* !CONFIG_SMP */
685 
686 #endif /* _ASM_X86_PERCPU_H */
687