xref: /linux/include/linux/compiler.h (revision a9a3ed1eff3601b63aea4fb462d8b3b92c7c1e7e)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef __LINUX_COMPILER_H
31da177e4SLinus Torvalds #define __LINUX_COMPILER_H
41da177e4SLinus Torvalds 
5d1515582SWill Deacon #include <linux/compiler_types.h>
6d1515582SWill Deacon 
71da177e4SLinus Torvalds #ifndef __ASSEMBLY__
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds #ifdef __KERNEL__
101da177e4SLinus Torvalds 
112ed84eebSSteven Rostedt /*
122ed84eebSSteven Rostedt  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
132ed84eebSSteven Rostedt  * to disable branch tracing on a per file basis.
142ed84eebSSteven Rostedt  */
15d9ad8bc0SBart Van Assche #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16d9ad8bc0SBart Van Assche     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17134e6a03SSteven Rostedt (VMware) void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18d45ae1f7SSteven Rostedt (VMware) 			  int expect, int is_constant);
191f0d69a9SSteven Rostedt 
201f0d69a9SSteven Rostedt #define likely_notrace(x)	__builtin_expect(!!(x), 1)
211f0d69a9SSteven Rostedt #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
221f0d69a9SSteven Rostedt 
23d45ae1f7SSteven Rostedt (VMware) #define __branch_check__(x, expect, is_constant) ({			\
242026d357SMikulas Patocka 			long ______r;					\
25134e6a03SSteven Rostedt (VMware) 			static struct ftrace_likely_data		\
26e04462fbSMiguel Ojeda 				__aligned(4)				\
27bfafddd8SNick Desaulniers 				__section(_ftrace_annotated_branch)	\
281f0d69a9SSteven Rostedt 				______f = {				\
29134e6a03SSteven Rostedt (VMware) 				.data.func = __func__,			\
30134e6a03SSteven Rostedt (VMware) 				.data.file = __FILE__,			\
31134e6a03SSteven Rostedt (VMware) 				.data.line = __LINE__,			\
321f0d69a9SSteven Rostedt 			};						\
33d45ae1f7SSteven Rostedt (VMware) 			______r = __builtin_expect(!!(x), expect);	\
34d45ae1f7SSteven Rostedt (VMware) 			ftrace_likely_update(&______f, ______r,		\
35d45ae1f7SSteven Rostedt (VMware) 					     expect, is_constant);	\
361f0d69a9SSteven Rostedt 			______r;					\
371f0d69a9SSteven Rostedt 		})
381f0d69a9SSteven Rostedt 
391f0d69a9SSteven Rostedt /*
401f0d69a9SSteven Rostedt  * Using __builtin_constant_p(x) to ignore cases where the return
411f0d69a9SSteven Rostedt  * value is always the same.  This idea is taken from a similar patch
421f0d69a9SSteven Rostedt  * written by Daniel Walker.
431f0d69a9SSteven Rostedt  */
441f0d69a9SSteven Rostedt # ifndef likely
45d45ae1f7SSteven Rostedt (VMware) #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
461f0d69a9SSteven Rostedt # endif
471f0d69a9SSteven Rostedt # ifndef unlikely
48d45ae1f7SSteven Rostedt (VMware) #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
491f0d69a9SSteven Rostedt # endif
502bcd521aSSteven Rostedt 
512bcd521aSSteven Rostedt #ifdef CONFIG_PROFILE_ALL_BRANCHES
522bcd521aSSteven Rostedt /*
532bcd521aSSteven Rostedt  * "Define 'is'", Bill Clinton
542bcd521aSSteven Rostedt  * "Define 'if'", Steven Rostedt
552bcd521aSSteven Rostedt  */
56a15fd609SLinus Torvalds #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
57a15fd609SLinus Torvalds 
58a15fd609SLinus Torvalds #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
59a15fd609SLinus Torvalds 
60a15fd609SLinus Torvalds #define __trace_if_value(cond) ({			\
612bcd521aSSteven Rostedt 	static struct ftrace_branch_data		\
62e04462fbSMiguel Ojeda 		__aligned(4)				\
63bfafddd8SNick Desaulniers 		__section(_ftrace_branch)		\
64a15fd609SLinus Torvalds 		__if_trace = {				\
652bcd521aSSteven Rostedt 			.func = __func__,		\
662bcd521aSSteven Rostedt 			.file = __FILE__,		\
672bcd521aSSteven Rostedt 			.line = __LINE__,		\
682bcd521aSSteven Rostedt 		};					\
69a15fd609SLinus Torvalds 	(cond) ?					\
70a15fd609SLinus Torvalds 		(__if_trace.miss_hit[1]++,1) :		\
71a15fd609SLinus Torvalds 		(__if_trace.miss_hit[0]++,0);		\
72a15fd609SLinus Torvalds })
73a15fd609SLinus Torvalds 
742bcd521aSSteven Rostedt #endif /* CONFIG_PROFILE_ALL_BRANCHES */
752bcd521aSSteven Rostedt 
761f0d69a9SSteven Rostedt #else
771da177e4SLinus Torvalds # define likely(x)	__builtin_expect(!!(x), 1)
781da177e4SLinus Torvalds # define unlikely(x)	__builtin_expect(!!(x), 0)
791f0d69a9SSteven Rostedt #endif
801da177e4SLinus Torvalds 
811da177e4SLinus Torvalds /* Optimization barrier */
821da177e4SLinus Torvalds #ifndef barrier
831da177e4SLinus Torvalds # define barrier() __memory_barrier()
841da177e4SLinus Torvalds #endif
851da177e4SLinus Torvalds 
867829fb09SDaniel Borkmann #ifndef barrier_data
877829fb09SDaniel Borkmann # define barrier_data(ptr) barrier()
887829fb09SDaniel Borkmann #endif
897829fb09SDaniel Borkmann 
90173a3efdSArnd Bergmann /* workaround for GCC PR82365 if needed */
91173a3efdSArnd Bergmann #ifndef barrier_before_unreachable
92173a3efdSArnd Bergmann # define barrier_before_unreachable() do { } while (0)
93173a3efdSArnd Bergmann #endif
94173a3efdSArnd Bergmann 
9538938c87SDavid Daney /* Unreachable code */
96649ea4d5SJosh Poimboeuf #ifdef CONFIG_STACK_VALIDATION
97d0c2e691SJosh Poimboeuf /*
98d0c2e691SJosh Poimboeuf  * These macros help objtool understand GCC code flow for unreachable code.
99d0c2e691SJosh Poimboeuf  * The __COUNTER__ based labels are a hack to make each instance of the macros
100d0c2e691SJosh Poimboeuf  * unique, to convince GCC not to merge duplicate inline asm statements.
101d0c2e691SJosh Poimboeuf  */
102649ea4d5SJosh Poimboeuf #define annotate_reachable() ({						\
10396af6cd0SIngo Molnar 	asm volatile("%c0:\n\t"						\
10496af6cd0SIngo Molnar 		     ".pushsection .discard.reachable\n\t"		\
10596af6cd0SIngo Molnar 		     ".long %c0b - .\n\t"				\
10696af6cd0SIngo Molnar 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
107649ea4d5SJosh Poimboeuf })
108649ea4d5SJosh Poimboeuf #define annotate_unreachable() ({					\
10996af6cd0SIngo Molnar 	asm volatile("%c0:\n\t"						\
11096af6cd0SIngo Molnar 		     ".pushsection .discard.unreachable\n\t"		\
11196af6cd0SIngo Molnar 		     ".long %c0b - .\n\t"				\
11296af6cd0SIngo Molnar 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
113649ea4d5SJosh Poimboeuf })
11496af6cd0SIngo Molnar #define ASM_UNREACHABLE							\
11596af6cd0SIngo Molnar 	"999:\n\t"							\
11696af6cd0SIngo Molnar 	".pushsection .discard.unreachable\n\t"				\
11796af6cd0SIngo Molnar 	".long 999b - .\n\t"						\
11896af6cd0SIngo Molnar 	".popsection\n\t"
11987b512deSJosh Poimboeuf 
12087b512deSJosh Poimboeuf /* Annotate a C jump table to allow objtool to follow the code flow */
121bfafddd8SNick Desaulniers #define __annotate_jump_table __section(.rodata..c_jump_table)
12287b512deSJosh Poimboeuf 
123649ea4d5SJosh Poimboeuf #else
124649ea4d5SJosh Poimboeuf #define annotate_reachable()
125649ea4d5SJosh Poimboeuf #define annotate_unreachable()
12687b512deSJosh Poimboeuf #define __annotate_jump_table
127649ea4d5SJosh Poimboeuf #endif
128649ea4d5SJosh Poimboeuf 
129aa5d1b81SKees Cook #ifndef ASM_UNREACHABLE
130aa5d1b81SKees Cook # define ASM_UNREACHABLE
131aa5d1b81SKees Cook #endif
13238938c87SDavid Daney #ifndef unreachable
133fe0640ebSndesaulniers@google.com # define unreachable() do {		\
134fe0640ebSndesaulniers@google.com 	annotate_unreachable();		\
135fe0640ebSndesaulniers@google.com 	__builtin_unreachable();	\
136fe0640ebSndesaulniers@google.com } while (0)
13738938c87SDavid Daney #endif
13838938c87SDavid Daney 
139b67067f1SNicholas Piggin /*
140b67067f1SNicholas Piggin  * KENTRY - kernel entry point
141b67067f1SNicholas Piggin  * This can be used to annotate symbols (functions or data) that are used
142b67067f1SNicholas Piggin  * without their linker symbol being referenced explicitly. For example,
143b67067f1SNicholas Piggin  * interrupt vector handlers, or functions in the kernel image that are found
144b67067f1SNicholas Piggin  * programatically.
145b67067f1SNicholas Piggin  *
146b67067f1SNicholas Piggin  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
147b67067f1SNicholas Piggin  * are handled in their own way (with KEEP() in linker scripts).
148b67067f1SNicholas Piggin  *
149b67067f1SNicholas Piggin  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
150b67067f1SNicholas Piggin  * linker script. For example an architecture could KEEP() its entire
151b67067f1SNicholas Piggin  * boot/exception vector code rather than annotate each function and data.
152b67067f1SNicholas Piggin  */
153b67067f1SNicholas Piggin #ifndef KENTRY
154b67067f1SNicholas Piggin # define KENTRY(sym)						\
155b67067f1SNicholas Piggin 	extern typeof(sym) sym;					\
156b67067f1SNicholas Piggin 	static const unsigned long __kentry_##sym		\
157b67067f1SNicholas Piggin 	__used							\
158e04462fbSMiguel Ojeda 	__section("___kentry" "+" #sym )			\
159b67067f1SNicholas Piggin 	= (unsigned long)&sym;
160b67067f1SNicholas Piggin #endif
161b67067f1SNicholas Piggin 
1621da177e4SLinus Torvalds #ifndef RELOC_HIDE
1631da177e4SLinus Torvalds # define RELOC_HIDE(ptr, off)					\
1641da177e4SLinus Torvalds   ({ unsigned long __ptr;					\
1651da177e4SLinus Torvalds      __ptr = (unsigned long) (ptr);				\
1661da177e4SLinus Torvalds     (typeof(ptr)) (__ptr + (off)); })
1671da177e4SLinus Torvalds #endif
1681da177e4SLinus Torvalds 
169fe8c8a12SCesar Eduardo Barros #ifndef OPTIMIZER_HIDE_VAR
1703e2ffd65SMichael S. Tsirkin /* Make the optimizer believe the variable can be manipulated arbitrarily. */
1713e2ffd65SMichael S. Tsirkin #define OPTIMIZER_HIDE_VAR(var)						\
1723e2ffd65SMichael S. Tsirkin 	__asm__ ("" : "=r" (var) : "0" (var))
173fe8c8a12SCesar Eduardo Barros #endif
174fe8c8a12SCesar Eduardo Barros 
1756f33d587SRusty Russell /* Not-quite-unique ID. */
1766f33d587SRusty Russell #ifndef __UNIQUE_ID
1776f33d587SRusty Russell # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
1786f33d587SRusty Russell #endif
1796f33d587SRusty Russell 
180230fa253SChristian Borntraeger #include <uapi/linux/types.h>
181230fa253SChristian Borntraeger 
182d976441fSAndrey Ryabinin #define __READ_ONCE_SIZE						\
183d976441fSAndrey Ryabinin ({									\
184d976441fSAndrey Ryabinin 	switch (size) {							\
185d976441fSAndrey Ryabinin 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
186d976441fSAndrey Ryabinin 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
187d976441fSAndrey Ryabinin 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
188d976441fSAndrey Ryabinin 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
189d976441fSAndrey Ryabinin 	default:							\
190d976441fSAndrey Ryabinin 		barrier();						\
191d976441fSAndrey Ryabinin 		__builtin_memcpy((void *)res, (const void *)p, size);	\
192d976441fSAndrey Ryabinin 		barrier();						\
193d976441fSAndrey Ryabinin 	}								\
194d976441fSAndrey Ryabinin })
195d976441fSAndrey Ryabinin 
196d976441fSAndrey Ryabinin static __always_inline
197d976441fSAndrey Ryabinin void __read_once_size(const volatile void *p, void *res, int size)
198230fa253SChristian Borntraeger {
199d976441fSAndrey Ryabinin 	__READ_ONCE_SIZE;
200230fa253SChristian Borntraeger }
201d976441fSAndrey Ryabinin 
202d976441fSAndrey Ryabinin #ifdef CONFIG_KASAN
203d976441fSAndrey Ryabinin /*
204bdb5ac80SAndrey Ryabinin  * We can't declare function 'inline' because __no_sanitize_address confilcts
205d976441fSAndrey Ryabinin  * with inlining. Attempt to inline it may cause a build failure.
206d976441fSAndrey Ryabinin  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
207d976441fSAndrey Ryabinin  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
208d976441fSAndrey Ryabinin  */
209163c8d54SMartin Schwidefsky # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
210d976441fSAndrey Ryabinin #else
211bdb5ac80SAndrey Ryabinin # define __no_kasan_or_inline __always_inline
212bdb5ac80SAndrey Ryabinin #endif
213bdb5ac80SAndrey Ryabinin 
214bdb5ac80SAndrey Ryabinin static __no_kasan_or_inline
215d976441fSAndrey Ryabinin void __read_once_size_nocheck(const volatile void *p, void *res, int size)
216d976441fSAndrey Ryabinin {
217d976441fSAndrey Ryabinin 	__READ_ONCE_SIZE;
218d976441fSAndrey Ryabinin }
219230fa253SChristian Borntraeger 
22043239cbeSChristian Borntraeger static __always_inline void __write_once_size(volatile void *p, void *res, int size)
221230fa253SChristian Borntraeger {
222230fa253SChristian Borntraeger 	switch (size) {
223230fa253SChristian Borntraeger 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
224230fa253SChristian Borntraeger 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
225230fa253SChristian Borntraeger 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
226230fa253SChristian Borntraeger 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
227230fa253SChristian Borntraeger 	default:
228230fa253SChristian Borntraeger 		barrier();
229230fa253SChristian Borntraeger 		__builtin_memcpy((void *)p, (const void *)res, size);
230230fa253SChristian Borntraeger 		barrier();
231230fa253SChristian Borntraeger 	}
232230fa253SChristian Borntraeger }
233230fa253SChristian Borntraeger 
234230fa253SChristian Borntraeger /*
235230fa253SChristian Borntraeger  * Prevent the compiler from merging or refetching reads or writes. The
236230fa253SChristian Borntraeger  * compiler is also forbidden from reordering successive instances of
237b899a850SMark Rutland  * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
238b899a850SMark Rutland  * particular ordering. One way to make the compiler aware of ordering is to
239b899a850SMark Rutland  * put the two invocations of READ_ONCE or WRITE_ONCE in different C
240b899a850SMark Rutland  * statements.
241230fa253SChristian Borntraeger  *
242b899a850SMark Rutland  * These two macros will also work on aggregate data types like structs or
243b899a850SMark Rutland  * unions. If the size of the accessed data type exceeds the word size of
244b899a850SMark Rutland  * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
245b899a850SMark Rutland  * fall back to memcpy(). There's at least two memcpy()s: one for the
246b899a850SMark Rutland  * __builtin_memcpy() and then one for the macro doing the copy of variable
247b899a850SMark Rutland  * - '__u' allocated on the stack.
248230fa253SChristian Borntraeger  *
249230fa253SChristian Borntraeger  * Their two major use cases are: (1) Mediating communication between
250230fa253SChristian Borntraeger  * process-level code and irq/NMI handlers, all running on the same CPU,
251230fa253SChristian Borntraeger  * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
252230fa253SChristian Borntraeger  * mutilate accesses that either do not require ordering or that interact
253230fa253SChristian Borntraeger  * with an explicit memory barrier or atomic instruction that provides the
254230fa253SChristian Borntraeger  * required ordering.
255230fa253SChristian Borntraeger  */
256d1515582SWill Deacon #include <asm/barrier.h>
2577f1e541fSAndrey Ryabinin #include <linux/kasan-checks.h>
258230fa253SChristian Borntraeger 
259d976441fSAndrey Ryabinin #define __READ_ONCE(x, check)						\
260d976441fSAndrey Ryabinin ({									\
261d976441fSAndrey Ryabinin 	union { typeof(x) __val; char __c[1]; } __u;			\
262d976441fSAndrey Ryabinin 	if (check)							\
263d976441fSAndrey Ryabinin 		__read_once_size(&(x), __u.__c, sizeof(x));		\
264d976441fSAndrey Ryabinin 	else								\
265d976441fSAndrey Ryabinin 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
26676ebbe78SWill Deacon 	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
267d976441fSAndrey Ryabinin 	__u.__val;							\
268d976441fSAndrey Ryabinin })
269d976441fSAndrey Ryabinin #define READ_ONCE(x) __READ_ONCE(x, 1)
270d976441fSAndrey Ryabinin 
271d976441fSAndrey Ryabinin /*
272d976441fSAndrey Ryabinin  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
273d976441fSAndrey Ryabinin  * to hide memory access from KASAN.
274d976441fSAndrey Ryabinin  */
275d976441fSAndrey Ryabinin #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
276230fa253SChristian Borntraeger 
2777f1e541fSAndrey Ryabinin static __no_kasan_or_inline
2787f1e541fSAndrey Ryabinin unsigned long read_word_at_a_time(const void *addr)
2797f1e541fSAndrey Ryabinin {
2807f1e541fSAndrey Ryabinin 	kasan_check_read(addr, 1);
2817f1e541fSAndrey Ryabinin 	return *(unsigned long *)addr;
2827f1e541fSAndrey Ryabinin }
2837f1e541fSAndrey Ryabinin 
28443239cbeSChristian Borntraeger #define WRITE_ONCE(x, val) \
285ba33034fSChristian Borntraeger ({							\
286ba33034fSChristian Borntraeger 	union { typeof(x) __val; char __c[1]; } __u =	\
287ba33034fSChristian Borntraeger 		{ .__val = (__force typeof(x)) (val) }; \
288ba33034fSChristian Borntraeger 	__write_once_size(&(x), __u.__c, sizeof(x));	\
289ba33034fSChristian Borntraeger 	__u.__val;					\
290ba33034fSChristian Borntraeger })
291230fa253SChristian Borntraeger 
2921da177e4SLinus Torvalds #endif /* __KERNEL__ */
2931da177e4SLinus Torvalds 
2947290d580SArd Biesheuvel /*
2957290d580SArd Biesheuvel  * Force the compiler to emit 'sym' as a symbol, so that we can reference
2967290d580SArd Biesheuvel  * it from inline assembler. Necessary in case 'sym' could be inlined
2977290d580SArd Biesheuvel  * otherwise, or eliminated entirely due to lack of references that are
2987290d580SArd Biesheuvel  * visible to the compiler.
2997290d580SArd Biesheuvel  */
3007290d580SArd Biesheuvel #define __ADDRESSABLE(sym) \
301bfafddd8SNick Desaulniers 	static void * __section(.discard.addressable) __used \
3027290d580SArd Biesheuvel 		__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
3037290d580SArd Biesheuvel 
3047290d580SArd Biesheuvel /**
3057290d580SArd Biesheuvel  * offset_to_ptr - convert a relative memory offset to an absolute pointer
3067290d580SArd Biesheuvel  * @off:	the address of the 32-bit offset value
3077290d580SArd Biesheuvel  */
3087290d580SArd Biesheuvel static inline void *offset_to_ptr(const int *off)
3097290d580SArd Biesheuvel {
3107290d580SArd Biesheuvel 	return (void *)((unsigned long)off + *off);
3117290d580SArd Biesheuvel }
3127290d580SArd Biesheuvel 
3131da177e4SLinus Torvalds #endif /* __ASSEMBLY__ */
3141da177e4SLinus Torvalds 
3159f0cf4adSArjan van de Ven /* Compile time object size, -1 for unknown */
3169f0cf4adSArjan van de Ven #ifndef __compiletime_object_size
3179f0cf4adSArjan van de Ven # define __compiletime_object_size(obj) -1
3189f0cf4adSArjan van de Ven #endif
3194a312769SArjan van de Ven #ifndef __compiletime_warning
3204a312769SArjan van de Ven # define __compiletime_warning(message)
3214a312769SArjan van de Ven #endif
32263312b6aSArjan van de Ven #ifndef __compiletime_error
32363312b6aSArjan van de Ven # define __compiletime_error(message)
32463312b6aSArjan van de Ven #endif
325c361d3e5SDaniel Santos 
326c03567a8SJoe Stringer #ifdef __OPTIMIZE__
3279a8ab1c3SDaniel Santos # define __compiletime_assert(condition, msg, prefix, suffix)		\
3289a8ab1c3SDaniel Santos 	do {								\
3299a8ab1c3SDaniel Santos 		extern void prefix ## suffix(void) __compiletime_error(msg); \
33081b45683SMasahiro Yamada 		if (!(condition))					\
3319a8ab1c3SDaniel Santos 			prefix ## suffix();				\
3329a8ab1c3SDaniel Santos 	} while (0)
333c03567a8SJoe Stringer #else
334c03567a8SJoe Stringer # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
335c03567a8SJoe Stringer #endif
3369a8ab1c3SDaniel Santos 
3379a8ab1c3SDaniel Santos #define _compiletime_assert(condition, msg, prefix, suffix) \
3389a8ab1c3SDaniel Santos 	__compiletime_assert(condition, msg, prefix, suffix)
3399a8ab1c3SDaniel Santos 
3409a8ab1c3SDaniel Santos /**
3419a8ab1c3SDaniel Santos  * compiletime_assert - break build and emit msg if condition is false
3429a8ab1c3SDaniel Santos  * @condition: a compile-time constant condition to check
3439a8ab1c3SDaniel Santos  * @msg:       a message to emit if condition is false
3449a8ab1c3SDaniel Santos  *
3459a8ab1c3SDaniel Santos  * In tradition of POSIX assert, this macro will break the build if the
3469a8ab1c3SDaniel Santos  * supplied condition is *false*, emitting the supplied error message if the
3479a8ab1c3SDaniel Santos  * compiler has support to do so.
3489a8ab1c3SDaniel Santos  */
3499a8ab1c3SDaniel Santos #define compiletime_assert(condition, msg) \
350af9c5d2eSVegard Nossum 	_compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
3519a8ab1c3SDaniel Santos 
35247933ad4SPeter Zijlstra #define compiletime_assert_atomic_type(t)				\
35347933ad4SPeter Zijlstra 	compiletime_assert(__native_word(t),				\
35447933ad4SPeter Zijlstra 		"Need native word sized stores/loads for atomicity.")
35547933ad4SPeter Zijlstra 
356ec0bbef6SMiguel Ojeda /* &a[0] degrades to a pointer: a different type from an array */
357ec0bbef6SMiguel Ojeda #define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
358ec0bbef6SMiguel Ojeda 
359*a9a3ed1eSBorislav Petkov /*
360*a9a3ed1eSBorislav Petkov  * This is needed in functions which generate the stack canary, see
361*a9a3ed1eSBorislav Petkov  * arch/x86/kernel/smpboot.c::start_secondary() for an example.
362*a9a3ed1eSBorislav Petkov  */
363*a9a3ed1eSBorislav Petkov #define prevent_tail_call_optimization()	mb()
364*a9a3ed1eSBorislav Petkov 
3651da177e4SLinus Torvalds #endif /* __LINUX_COMPILER_H */
366