xref: /linux/include/linux/compiler.h (revision dcf8e5633e2e69ad60b730ab5905608b756a032f)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef __LINUX_COMPILER_H
31da177e4SLinus Torvalds #define __LINUX_COMPILER_H
41da177e4SLinus Torvalds 
5d1515582SWill Deacon #include <linux/compiler_types.h>
6d1515582SWill Deacon 
71da177e4SLinus Torvalds #ifndef __ASSEMBLY__
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds #ifdef __KERNEL__
101da177e4SLinus Torvalds 
112ed84eebSSteven Rostedt /*
122ed84eebSSteven Rostedt  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
132ed84eebSSteven Rostedt  * to disable branch tracing on a per file basis.
142ed84eebSSteven Rostedt  */
15d9ad8bc0SBart Van Assche #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16d9ad8bc0SBart Van Assche     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17134e6a03SSteven Rostedt (VMware) void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18d45ae1f7SSteven Rostedt (VMware) 			  int expect, int is_constant);
191f0d69a9SSteven Rostedt 
201f0d69a9SSteven Rostedt #define likely_notrace(x)	__builtin_expect(!!(x), 1)
211f0d69a9SSteven Rostedt #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
221f0d69a9SSteven Rostedt 
23d45ae1f7SSteven Rostedt (VMware) #define __branch_check__(x, expect, is_constant) ({			\
242026d357SMikulas Patocka 			long ______r;					\
25134e6a03SSteven Rostedt (VMware) 			static struct ftrace_likely_data		\
26e04462fbSMiguel Ojeda 				__aligned(4)				\
2733def849SJoe Perches 				__section("_ftrace_annotated_branch")	\
281f0d69a9SSteven Rostedt 				______f = {				\
29134e6a03SSteven Rostedt (VMware) 				.data.func = __func__,			\
30134e6a03SSteven Rostedt (VMware) 				.data.file = __FILE__,			\
31134e6a03SSteven Rostedt (VMware) 				.data.line = __LINE__,			\
321f0d69a9SSteven Rostedt 			};						\
33d45ae1f7SSteven Rostedt (VMware) 			______r = __builtin_expect(!!(x), expect);	\
34d45ae1f7SSteven Rostedt (VMware) 			ftrace_likely_update(&______f, ______r,		\
35d45ae1f7SSteven Rostedt (VMware) 					     expect, is_constant);	\
361f0d69a9SSteven Rostedt 			______r;					\
371f0d69a9SSteven Rostedt 		})
381f0d69a9SSteven Rostedt 
391f0d69a9SSteven Rostedt /*
401f0d69a9SSteven Rostedt  * Using __builtin_constant_p(x) to ignore cases where the return
411f0d69a9SSteven Rostedt  * value is always the same.  This idea is taken from a similar patch
421f0d69a9SSteven Rostedt  * written by Daniel Walker.
431f0d69a9SSteven Rostedt  */
441f0d69a9SSteven Rostedt # ifndef likely
45d45ae1f7SSteven Rostedt (VMware) #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
461f0d69a9SSteven Rostedt # endif
471f0d69a9SSteven Rostedt # ifndef unlikely
48d45ae1f7SSteven Rostedt (VMware) #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
491f0d69a9SSteven Rostedt # endif
502bcd521aSSteven Rostedt 
512bcd521aSSteven Rostedt #ifdef CONFIG_PROFILE_ALL_BRANCHES
522bcd521aSSteven Rostedt /*
532bcd521aSSteven Rostedt  * "Define 'is'", Bill Clinton
542bcd521aSSteven Rostedt  * "Define 'if'", Steven Rostedt
552bcd521aSSteven Rostedt  */
56a15fd609SLinus Torvalds #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
57a15fd609SLinus Torvalds 
58a15fd609SLinus Torvalds #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
59a15fd609SLinus Torvalds 
60a15fd609SLinus Torvalds #define __trace_if_value(cond) ({			\
612bcd521aSSteven Rostedt 	static struct ftrace_branch_data		\
62e04462fbSMiguel Ojeda 		__aligned(4)				\
6333def849SJoe Perches 		__section("_ftrace_branch")		\
64a15fd609SLinus Torvalds 		__if_trace = {				\
652bcd521aSSteven Rostedt 			.func = __func__,		\
662bcd521aSSteven Rostedt 			.file = __FILE__,		\
672bcd521aSSteven Rostedt 			.line = __LINE__,		\
682bcd521aSSteven Rostedt 		};					\
69a15fd609SLinus Torvalds 	(cond) ?					\
70a15fd609SLinus Torvalds 		(__if_trace.miss_hit[1]++,1) :		\
71a15fd609SLinus Torvalds 		(__if_trace.miss_hit[0]++,0);		\
72a15fd609SLinus Torvalds })
73a15fd609SLinus Torvalds 
742bcd521aSSteven Rostedt #endif /* CONFIG_PROFILE_ALL_BRANCHES */
752bcd521aSSteven Rostedt 
761f0d69a9SSteven Rostedt #else
771da177e4SLinus Torvalds # define likely(x)	__builtin_expect(!!(x), 1)
781da177e4SLinus Torvalds # define unlikely(x)	__builtin_expect(!!(x), 0)
792f0df49cSSteven Rostedt (VMware) # define likely_notrace(x)	likely(x)
802f0df49cSSteven Rostedt (VMware) # define unlikely_notrace(x)	unlikely(x)
811f0d69a9SSteven Rostedt #endif
821da177e4SLinus Torvalds 
831da177e4SLinus Torvalds /* Optimization barrier */
841da177e4SLinus Torvalds #ifndef barrier
853347acc6SArvind Sankar /* The "volatile" is due to gcc bugs */
863347acc6SArvind Sankar # define barrier() __asm__ __volatile__("": : :"memory")
871da177e4SLinus Torvalds #endif
881da177e4SLinus Torvalds 
897829fb09SDaniel Borkmann #ifndef barrier_data
903347acc6SArvind Sankar /*
913347acc6SArvind Sankar  * This version is i.e. to prevent dead stores elimination on @ptr
923347acc6SArvind Sankar  * where gcc and llvm may behave differently when otherwise using
933347acc6SArvind Sankar  * normal barrier(): while gcc behavior gets along with a normal
943347acc6SArvind Sankar  * barrier(), llvm needs an explicit input variable to be assumed
953347acc6SArvind Sankar  * clobbered. The issue is as follows: while the inline asm might
963347acc6SArvind Sankar  * access any memory it wants, the compiler could have fit all of
973347acc6SArvind Sankar  * @ptr into memory registers instead, and since @ptr never escaped
983347acc6SArvind Sankar  * from that, it proved that the inline asm wasn't touching any of
993347acc6SArvind Sankar  * it. This version works well with both compilers, i.e. we're telling
1003347acc6SArvind Sankar  * the compiler that the inline asm absolutely may see the contents
1013347acc6SArvind Sankar  * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
1023347acc6SArvind Sankar  */
1033347acc6SArvind Sankar # define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
1047829fb09SDaniel Borkmann #endif
1057829fb09SDaniel Borkmann 
106173a3efdSArnd Bergmann /* workaround for GCC PR82365 if needed */
107173a3efdSArnd Bergmann #ifndef barrier_before_unreachable
108173a3efdSArnd Bergmann # define barrier_before_unreachable() do { } while (0)
109173a3efdSArnd Bergmann #endif
110173a3efdSArnd Bergmann 
11138938c87SDavid Daney /* Unreachable code */
11203f16cd0SJosh Poimboeuf #ifdef CONFIG_OBJTOOL
113d0c2e691SJosh Poimboeuf /*
114d0c2e691SJosh Poimboeuf  * These macros help objtool understand GCC code flow for unreachable code.
115d0c2e691SJosh Poimboeuf  * The __COUNTER__ based labels are a hack to make each instance of the macros
116d0c2e691SJosh Poimboeuf  * unique, to convince GCC not to merge duplicate inline asm statements.
117d0c2e691SJosh Poimboeuf  */
118f1069a87SVasily Gorbik #define __stringify_label(n) #n
119f1069a87SVasily Gorbik 
120f1069a87SVasily Gorbik #define __annotate_unreachable(c) ({					\
121f1069a87SVasily Gorbik 	asm volatile(__stringify_label(c) ":\n\t"			\
12296af6cd0SIngo Molnar 		     ".pushsection .discard.unreachable\n\t"		\
123f1069a87SVasily Gorbik 		     ".long " __stringify_label(c) "b - .\n\t"		\
124dcce50e6SJosh Poimboeuf 		     ".popsection\n\t" : : "i" (c));			\
125649ea4d5SJosh Poimboeuf })
126f1069a87SVasily Gorbik #define annotate_unreachable() __annotate_unreachable(__COUNTER__)
127f1069a87SVasily Gorbik 
12887b512deSJosh Poimboeuf /* Annotate a C jump table to allow objtool to follow the code flow */
12933def849SJoe Perches #define __annotate_jump_table __section(".rodata..c_jump_table")
13087b512deSJosh Poimboeuf 
13103f16cd0SJosh Poimboeuf #else /* !CONFIG_OBJTOOL */
132649ea4d5SJosh Poimboeuf #define annotate_unreachable()
13387b512deSJosh Poimboeuf #define __annotate_jump_table
13403f16cd0SJosh Poimboeuf #endif /* CONFIG_OBJTOOL */
135649ea4d5SJosh Poimboeuf 
13638938c87SDavid Daney #ifndef unreachable
137fe0640ebSndesaulniers@google.com # define unreachable() do {		\
138fe0640ebSndesaulniers@google.com 	annotate_unreachable();		\
139fe0640ebSndesaulniers@google.com 	__builtin_unreachable();	\
140fe0640ebSndesaulniers@google.com } while (0)
14138938c87SDavid Daney #endif
14238938c87SDavid Daney 
143b67067f1SNicholas Piggin /*
144b67067f1SNicholas Piggin  * KENTRY - kernel entry point
145b67067f1SNicholas Piggin  * This can be used to annotate symbols (functions or data) that are used
146b67067f1SNicholas Piggin  * without their linker symbol being referenced explicitly. For example,
147b67067f1SNicholas Piggin  * interrupt vector handlers, or functions in the kernel image that are found
148b67067f1SNicholas Piggin  * programatically.
149b67067f1SNicholas Piggin  *
150b67067f1SNicholas Piggin  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
151b67067f1SNicholas Piggin  * are handled in their own way (with KEEP() in linker scripts).
152b67067f1SNicholas Piggin  *
153b67067f1SNicholas Piggin  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
154b67067f1SNicholas Piggin  * linker script. For example an architecture could KEEP() its entire
155b67067f1SNicholas Piggin  * boot/exception vector code rather than annotate each function and data.
156b67067f1SNicholas Piggin  */
157b67067f1SNicholas Piggin #ifndef KENTRY
158b67067f1SNicholas Piggin # define KENTRY(sym)						\
159b67067f1SNicholas Piggin 	extern typeof(sym) sym;					\
160b67067f1SNicholas Piggin 	static const unsigned long __kentry_##sym		\
161b67067f1SNicholas Piggin 	__used							\
162a25c13b3SNick Desaulniers 	__attribute__((__section__("___kentry+" #sym)))		\
163b67067f1SNicholas Piggin 	= (unsigned long)&sym;
164b67067f1SNicholas Piggin #endif
165b67067f1SNicholas Piggin 
1661da177e4SLinus Torvalds #ifndef RELOC_HIDE
1671da177e4SLinus Torvalds # define RELOC_HIDE(ptr, off)					\
1681da177e4SLinus Torvalds   ({ unsigned long __ptr;					\
1691da177e4SLinus Torvalds      __ptr = (unsigned long) (ptr);				\
1701da177e4SLinus Torvalds     (typeof(ptr)) (__ptr + (off)); })
1711da177e4SLinus Torvalds #endif
1721da177e4SLinus Torvalds 
173f6b5f1a5SGuenter Roeck #define absolute_pointer(val)	RELOC_HIDE((void *)(val), 0)
174f6b5f1a5SGuenter Roeck 
175fe8c8a12SCesar Eduardo Barros #ifndef OPTIMIZER_HIDE_VAR
1763e2ffd65SMichael S. Tsirkin /* Make the optimizer believe the variable can be manipulated arbitrarily. */
1773e2ffd65SMichael S. Tsirkin #define OPTIMIZER_HIDE_VAR(var)						\
1783e2ffd65SMichael S. Tsirkin 	__asm__ ("" : "=r" (var) : "0" (var))
179fe8c8a12SCesar Eduardo Barros #endif
180fe8c8a12SCesar Eduardo Barros 
1816f33d587SRusty Russell /* Not-quite-unique ID. */
1826f33d587SRusty Russell #ifndef __UNIQUE_ID
1836f33d587SRusty Russell # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
1846f33d587SRusty Russell #endif
1856f33d587SRusty Russell 
18637d1a04bSThomas Gleixner /**
18737d1a04bSThomas Gleixner  * data_race - mark an expression as containing intentional data races
18837d1a04bSThomas Gleixner  *
18937d1a04bSThomas Gleixner  * This data_race() macro is useful for situations in which data races
19037d1a04bSThomas Gleixner  * should be forgiven.  One example is diagnostic code that accesses
19137d1a04bSThomas Gleixner  * shared variables but is not a part of the core synchronization design.
19237d1a04bSThomas Gleixner  *
19337d1a04bSThomas Gleixner  * This macro *does not* affect normal code generation, but is a hint
19437d1a04bSThomas Gleixner  * to tooling that data races here are to be ignored.
19537d1a04bSThomas Gleixner  */
19637d1a04bSThomas Gleixner #define data_race(expr)							\
197d976441fSAndrey Ryabinin ({									\
19895c094fcSMarco Elver 	__unqual_scalar_typeof(({ expr; })) __v = ({			\
19937d1a04bSThomas Gleixner 		__kcsan_disable_current();				\
20095c094fcSMarco Elver 		expr;							\
20195c094fcSMarco Elver 	});								\
20237d1a04bSThomas Gleixner 	__kcsan_enable_current();					\
20337d1a04bSThomas Gleixner 	__v;								\
204d976441fSAndrey Ryabinin })
205230fa253SChristian Borntraeger 
206590e8a08SMark Rutland /*
207590e8a08SMark Rutland  * With CONFIG_CFI_CLANG, the compiler replaces function addresses in
208590e8a08SMark Rutland  * instrumented C code with jump table addresses. Architectures that
209590e8a08SMark Rutland  * support CFI can define this macro to return the actual function address
210590e8a08SMark Rutland  * when needed.
211590e8a08SMark Rutland  */
212590e8a08SMark Rutland #ifndef function_nocfi
213590e8a08SMark Rutland #define function_nocfi(x) (x)
214590e8a08SMark Rutland #endif
215590e8a08SMark Rutland 
2161da177e4SLinus Torvalds #endif /* __KERNEL__ */
2171da177e4SLinus Torvalds 
2187290d580SArd Biesheuvel /*
2197290d580SArd Biesheuvel  * Force the compiler to emit 'sym' as a symbol, so that we can reference
2207290d580SArd Biesheuvel  * it from inline assembler. Necessary in case 'sym' could be inlined
2217290d580SArd Biesheuvel  * otherwise, or eliminated entirely due to lack of references that are
2227290d580SArd Biesheuvel  * visible to the compiler.
2237290d580SArd Biesheuvel  */
2247290d580SArd Biesheuvel #define __ADDRESSABLE(sym) \
22533def849SJoe Perches 	static void * __section(".discard.addressable") __used \
226563a02b0SJosh Poimboeuf 		__UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
2277290d580SArd Biesheuvel 
2287290d580SArd Biesheuvel /**
2297290d580SArd Biesheuvel  * offset_to_ptr - convert a relative memory offset to an absolute pointer
2307290d580SArd Biesheuvel  * @off:	the address of the 32-bit offset value
2317290d580SArd Biesheuvel  */
2327290d580SArd Biesheuvel static inline void *offset_to_ptr(const int *off)
2337290d580SArd Biesheuvel {
2347290d580SArd Biesheuvel 	return (void *)((unsigned long)off + *off);
2357290d580SArd Biesheuvel }
2367290d580SArd Biesheuvel 
2371da177e4SLinus Torvalds #endif /* __ASSEMBLY__ */
2381da177e4SLinus Torvalds 
239ec0bbef6SMiguel Ojeda /* &a[0] degrades to a pointer: a different type from an array */
240ec0bbef6SMiguel Ojeda #define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
241ec0bbef6SMiguel Ojeda 
242a9a3ed1eSBorislav Petkov /*
243*dcf8e563SBart Van Assche  * Whether 'type' is a signed type or an unsigned type. Supports scalar types,
244*dcf8e563SBart Van Assche  * bool and also pointer types.
245*dcf8e563SBart Van Assche  */
246*dcf8e563SBart Van Assche #define is_signed_type(type) (((type)(-1)) < (__force type)1)
247*dcf8e563SBart Van Assche 
248*dcf8e563SBart Van Assche /*
249a9a3ed1eSBorislav Petkov  * This is needed in functions which generate the stack canary, see
250a9a3ed1eSBorislav Petkov  * arch/x86/kernel/smpboot.c::start_secondary() for an example.
251a9a3ed1eSBorislav Petkov  */
252a9a3ed1eSBorislav Petkov #define prevent_tail_call_optimization()	mb()
253a9a3ed1eSBorislav Petkov 
254e506ea45SWill Deacon #include <asm/rwonce.h>
255e506ea45SWill Deacon 
2561da177e4SLinus Torvalds #endif /* __LINUX_COMPILER_H */
257