xref: /linux/include/linux/compiler.h (revision a15fd609ad53a631a927c6680e8fb606f42a712b)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef __LINUX_COMPILER_H
31da177e4SLinus Torvalds #define __LINUX_COMPILER_H
41da177e4SLinus Torvalds 
5d1515582SWill Deacon #include <linux/compiler_types.h>
6d1515582SWill Deacon 
71da177e4SLinus Torvalds #ifndef __ASSEMBLY__
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds #ifdef __KERNEL__
101da177e4SLinus Torvalds 
112ed84eebSSteven Rostedt /*
122ed84eebSSteven Rostedt  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
132ed84eebSSteven Rostedt  * to disable branch tracing on a per file basis.
142ed84eebSSteven Rostedt  */
15d9ad8bc0SBart Van Assche #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16d9ad8bc0SBart Van Assche     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17134e6a03SSteven Rostedt (VMware) void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18d45ae1f7SSteven Rostedt (VMware) 			  int expect, int is_constant);
191f0d69a9SSteven Rostedt 
201f0d69a9SSteven Rostedt #define likely_notrace(x)	__builtin_expect(!!(x), 1)
211f0d69a9SSteven Rostedt #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
221f0d69a9SSteven Rostedt 
23d45ae1f7SSteven Rostedt (VMware) #define __branch_check__(x, expect, is_constant) ({			\
242026d357SMikulas Patocka 			long ______r;					\
25134e6a03SSteven Rostedt (VMware) 			static struct ftrace_likely_data		\
26e04462fbSMiguel Ojeda 				__aligned(4)				\
27e04462fbSMiguel Ojeda 				__section("_ftrace_annotated_branch")	\
281f0d69a9SSteven Rostedt 				______f = {				\
29134e6a03SSteven Rostedt (VMware) 				.data.func = __func__,			\
30134e6a03SSteven Rostedt (VMware) 				.data.file = __FILE__,			\
31134e6a03SSteven Rostedt (VMware) 				.data.line = __LINE__,			\
321f0d69a9SSteven Rostedt 			};						\
33d45ae1f7SSteven Rostedt (VMware) 			______r = __builtin_expect(!!(x), expect);	\
34d45ae1f7SSteven Rostedt (VMware) 			ftrace_likely_update(&______f, ______r,		\
35d45ae1f7SSteven Rostedt (VMware) 					     expect, is_constant);	\
361f0d69a9SSteven Rostedt 			______r;					\
371f0d69a9SSteven Rostedt 		})
381f0d69a9SSteven Rostedt 
391f0d69a9SSteven Rostedt /*
401f0d69a9SSteven Rostedt  * Using __builtin_constant_p(x) to ignore cases where the return
411f0d69a9SSteven Rostedt  * value is always the same.  This idea is taken from a similar patch
421f0d69a9SSteven Rostedt  * written by Daniel Walker.
431f0d69a9SSteven Rostedt  */
441f0d69a9SSteven Rostedt # ifndef likely
45d45ae1f7SSteven Rostedt (VMware) #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
461f0d69a9SSteven Rostedt # endif
471f0d69a9SSteven Rostedt # ifndef unlikely
48d45ae1f7SSteven Rostedt (VMware) #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
491f0d69a9SSteven Rostedt # endif
502bcd521aSSteven Rostedt 
512bcd521aSSteven Rostedt #ifdef CONFIG_PROFILE_ALL_BRANCHES
522bcd521aSSteven Rostedt /*
532bcd521aSSteven Rostedt  * "Define 'is'", Bill Clinton
542bcd521aSSteven Rostedt  * "Define 'if'", Steven Rostedt
552bcd521aSSteven Rostedt  */
56*a15fd609SLinus Torvalds #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
57*a15fd609SLinus Torvalds 
58*a15fd609SLinus Torvalds #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
59*a15fd609SLinus Torvalds 
60*a15fd609SLinus Torvalds #define __trace_if_value(cond) ({			\
612bcd521aSSteven Rostedt 	static struct ftrace_branch_data		\
62e04462fbSMiguel Ojeda 		__aligned(4)				\
63e04462fbSMiguel Ojeda 		__section("_ftrace_branch")		\
64*a15fd609SLinus Torvalds 		__if_trace = {				\
652bcd521aSSteven Rostedt 			.func = __func__,		\
662bcd521aSSteven Rostedt 			.file = __FILE__,		\
672bcd521aSSteven Rostedt 			.line = __LINE__,		\
682bcd521aSSteven Rostedt 		};					\
69*a15fd609SLinus Torvalds 	(cond) ?					\
70*a15fd609SLinus Torvalds 		(__if_trace.miss_hit[1]++,1) :		\
71*a15fd609SLinus Torvalds 		(__if_trace.miss_hit[0]++,0);		\
72*a15fd609SLinus Torvalds })
73*a15fd609SLinus Torvalds 
742bcd521aSSteven Rostedt #endif /* CONFIG_PROFILE_ALL_BRANCHES */
752bcd521aSSteven Rostedt 
761f0d69a9SSteven Rostedt #else
771da177e4SLinus Torvalds # define likely(x)	__builtin_expect(!!(x), 1)
781da177e4SLinus Torvalds # define unlikely(x)	__builtin_expect(!!(x), 0)
791f0d69a9SSteven Rostedt #endif
801da177e4SLinus Torvalds 
811da177e4SLinus Torvalds /* Optimization barrier */
821da177e4SLinus Torvalds #ifndef barrier
831da177e4SLinus Torvalds # define barrier() __memory_barrier()
841da177e4SLinus Torvalds #endif
851da177e4SLinus Torvalds 
867829fb09SDaniel Borkmann #ifndef barrier_data
877829fb09SDaniel Borkmann # define barrier_data(ptr) barrier()
887829fb09SDaniel Borkmann #endif
897829fb09SDaniel Borkmann 
90173a3efdSArnd Bergmann /* workaround for GCC PR82365 if needed */
91173a3efdSArnd Bergmann #ifndef barrier_before_unreachable
92173a3efdSArnd Bergmann # define barrier_before_unreachable() do { } while (0)
93173a3efdSArnd Bergmann #endif
94173a3efdSArnd Bergmann 
9538938c87SDavid Daney /* Unreachable code */
96649ea4d5SJosh Poimboeuf #ifdef CONFIG_STACK_VALIDATION
97d0c2e691SJosh Poimboeuf /*
98d0c2e691SJosh Poimboeuf  * These macros help objtool understand GCC code flow for unreachable code.
99d0c2e691SJosh Poimboeuf  * The __COUNTER__ based labels are a hack to make each instance of the macros
100d0c2e691SJosh Poimboeuf  * unique, to convince GCC not to merge duplicate inline asm statements.
101d0c2e691SJosh Poimboeuf  */
102649ea4d5SJosh Poimboeuf #define annotate_reachable() ({						\
10396af6cd0SIngo Molnar 	asm volatile("%c0:\n\t"						\
10496af6cd0SIngo Molnar 		     ".pushsection .discard.reachable\n\t"		\
10596af6cd0SIngo Molnar 		     ".long %c0b - .\n\t"				\
10696af6cd0SIngo Molnar 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
107649ea4d5SJosh Poimboeuf })
108649ea4d5SJosh Poimboeuf #define annotate_unreachable() ({					\
10996af6cd0SIngo Molnar 	asm volatile("%c0:\n\t"						\
11096af6cd0SIngo Molnar 		     ".pushsection .discard.unreachable\n\t"		\
11196af6cd0SIngo Molnar 		     ".long %c0b - .\n\t"				\
11296af6cd0SIngo Molnar 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
113649ea4d5SJosh Poimboeuf })
11496af6cd0SIngo Molnar #define ASM_UNREACHABLE							\
11596af6cd0SIngo Molnar 	"999:\n\t"							\
11696af6cd0SIngo Molnar 	".pushsection .discard.unreachable\n\t"				\
11796af6cd0SIngo Molnar 	".long 999b - .\n\t"						\
11896af6cd0SIngo Molnar 	".popsection\n\t"
119649ea4d5SJosh Poimboeuf #else
120649ea4d5SJosh Poimboeuf #define annotate_reachable()
121649ea4d5SJosh Poimboeuf #define annotate_unreachable()
122649ea4d5SJosh Poimboeuf #endif
123649ea4d5SJosh Poimboeuf 
124aa5d1b81SKees Cook #ifndef ASM_UNREACHABLE
125aa5d1b81SKees Cook # define ASM_UNREACHABLE
126aa5d1b81SKees Cook #endif
12738938c87SDavid Daney #ifndef unreachable
128fe0640ebSndesaulniers@google.com # define unreachable() do {		\
129fe0640ebSndesaulniers@google.com 	annotate_unreachable();		\
130fe0640ebSndesaulniers@google.com 	__builtin_unreachable();	\
131fe0640ebSndesaulniers@google.com } while (0)
13238938c87SDavid Daney #endif
13338938c87SDavid Daney 
134b67067f1SNicholas Piggin /*
135b67067f1SNicholas Piggin  * KENTRY - kernel entry point
136b67067f1SNicholas Piggin  * This can be used to annotate symbols (functions or data) that are used
137b67067f1SNicholas Piggin  * without their linker symbol being referenced explicitly. For example,
138b67067f1SNicholas Piggin  * interrupt vector handlers, or functions in the kernel image that are found
139b67067f1SNicholas Piggin  * programatically.
140b67067f1SNicholas Piggin  *
141b67067f1SNicholas Piggin  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
142b67067f1SNicholas Piggin  * are handled in their own way (with KEEP() in linker scripts).
143b67067f1SNicholas Piggin  *
144b67067f1SNicholas Piggin  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
145b67067f1SNicholas Piggin  * linker script. For example an architecture could KEEP() its entire
146b67067f1SNicholas Piggin  * boot/exception vector code rather than annotate each function and data.
147b67067f1SNicholas Piggin  */
148b67067f1SNicholas Piggin #ifndef KENTRY
149b67067f1SNicholas Piggin # define KENTRY(sym)						\
150b67067f1SNicholas Piggin 	extern typeof(sym) sym;					\
151b67067f1SNicholas Piggin 	static const unsigned long __kentry_##sym		\
152b67067f1SNicholas Piggin 	__used							\
153e04462fbSMiguel Ojeda 	__section("___kentry" "+" #sym )			\
154b67067f1SNicholas Piggin 	= (unsigned long)&sym;
155b67067f1SNicholas Piggin #endif
156b67067f1SNicholas Piggin 
1571da177e4SLinus Torvalds #ifndef RELOC_HIDE
1581da177e4SLinus Torvalds # define RELOC_HIDE(ptr, off)					\
1591da177e4SLinus Torvalds   ({ unsigned long __ptr;					\
1601da177e4SLinus Torvalds      __ptr = (unsigned long) (ptr);				\
1611da177e4SLinus Torvalds     (typeof(ptr)) (__ptr + (off)); })
1621da177e4SLinus Torvalds #endif
1631da177e4SLinus Torvalds 
164fe8c8a12SCesar Eduardo Barros #ifndef OPTIMIZER_HIDE_VAR
1653e2ffd65SMichael S. Tsirkin /* Make the optimizer believe the variable can be manipulated arbitrarily. */
1663e2ffd65SMichael S. Tsirkin #define OPTIMIZER_HIDE_VAR(var)						\
1673e2ffd65SMichael S. Tsirkin 	__asm__ ("" : "=r" (var) : "0" (var))
168fe8c8a12SCesar Eduardo Barros #endif
169fe8c8a12SCesar Eduardo Barros 
1706f33d587SRusty Russell /* Not-quite-unique ID. */
1716f33d587SRusty Russell #ifndef __UNIQUE_ID
1726f33d587SRusty Russell # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
1736f33d587SRusty Russell #endif
1746f33d587SRusty Russell 
175230fa253SChristian Borntraeger #include <uapi/linux/types.h>
176230fa253SChristian Borntraeger 
177d976441fSAndrey Ryabinin #define __READ_ONCE_SIZE						\
178d976441fSAndrey Ryabinin ({									\
179d976441fSAndrey Ryabinin 	switch (size) {							\
180d976441fSAndrey Ryabinin 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
181d976441fSAndrey Ryabinin 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
182d976441fSAndrey Ryabinin 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
183d976441fSAndrey Ryabinin 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
184d976441fSAndrey Ryabinin 	default:							\
185d976441fSAndrey Ryabinin 		barrier();						\
186d976441fSAndrey Ryabinin 		__builtin_memcpy((void *)res, (const void *)p, size);	\
187d976441fSAndrey Ryabinin 		barrier();						\
188d976441fSAndrey Ryabinin 	}								\
189d976441fSAndrey Ryabinin })
190d976441fSAndrey Ryabinin 
191d976441fSAndrey Ryabinin static __always_inline
192d976441fSAndrey Ryabinin void __read_once_size(const volatile void *p, void *res, int size)
193230fa253SChristian Borntraeger {
194d976441fSAndrey Ryabinin 	__READ_ONCE_SIZE;
195230fa253SChristian Borntraeger }
196d976441fSAndrey Ryabinin 
197d976441fSAndrey Ryabinin #ifdef CONFIG_KASAN
198d976441fSAndrey Ryabinin /*
199bdb5ac80SAndrey Ryabinin  * We can't declare function 'inline' because __no_sanitize_address confilcts
200d976441fSAndrey Ryabinin  * with inlining. Attempt to inline it may cause a build failure.
201d976441fSAndrey Ryabinin  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
202d976441fSAndrey Ryabinin  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
203d976441fSAndrey Ryabinin  */
204163c8d54SMartin Schwidefsky # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
205d976441fSAndrey Ryabinin #else
206bdb5ac80SAndrey Ryabinin # define __no_kasan_or_inline __always_inline
207bdb5ac80SAndrey Ryabinin #endif
208bdb5ac80SAndrey Ryabinin 
209bdb5ac80SAndrey Ryabinin static __no_kasan_or_inline
210d976441fSAndrey Ryabinin void __read_once_size_nocheck(const volatile void *p, void *res, int size)
211d976441fSAndrey Ryabinin {
212d976441fSAndrey Ryabinin 	__READ_ONCE_SIZE;
213d976441fSAndrey Ryabinin }
214230fa253SChristian Borntraeger 
21543239cbeSChristian Borntraeger static __always_inline void __write_once_size(volatile void *p, void *res, int size)
216230fa253SChristian Borntraeger {
217230fa253SChristian Borntraeger 	switch (size) {
218230fa253SChristian Borntraeger 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
219230fa253SChristian Borntraeger 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
220230fa253SChristian Borntraeger 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
221230fa253SChristian Borntraeger 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
222230fa253SChristian Borntraeger 	default:
223230fa253SChristian Borntraeger 		barrier();
224230fa253SChristian Borntraeger 		__builtin_memcpy((void *)p, (const void *)res, size);
225230fa253SChristian Borntraeger 		barrier();
226230fa253SChristian Borntraeger 	}
227230fa253SChristian Borntraeger }
228230fa253SChristian Borntraeger 
229230fa253SChristian Borntraeger /*
230230fa253SChristian Borntraeger  * Prevent the compiler from merging or refetching reads or writes. The
231230fa253SChristian Borntraeger  * compiler is also forbidden from reordering successive instances of
232b899a850SMark Rutland  * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
233b899a850SMark Rutland  * particular ordering. One way to make the compiler aware of ordering is to
234b899a850SMark Rutland  * put the two invocations of READ_ONCE or WRITE_ONCE in different C
235b899a850SMark Rutland  * statements.
236230fa253SChristian Borntraeger  *
237b899a850SMark Rutland  * These two macros will also work on aggregate data types like structs or
238b899a850SMark Rutland  * unions. If the size of the accessed data type exceeds the word size of
239b899a850SMark Rutland  * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
240b899a850SMark Rutland  * fall back to memcpy(). There's at least two memcpy()s: one for the
241b899a850SMark Rutland  * __builtin_memcpy() and then one for the macro doing the copy of variable
242b899a850SMark Rutland  * - '__u' allocated on the stack.
243230fa253SChristian Borntraeger  *
244230fa253SChristian Borntraeger  * Their two major use cases are: (1) Mediating communication between
245230fa253SChristian Borntraeger  * process-level code and irq/NMI handlers, all running on the same CPU,
246230fa253SChristian Borntraeger  * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
247230fa253SChristian Borntraeger  * mutilate accesses that either do not require ordering or that interact
248230fa253SChristian Borntraeger  * with an explicit memory barrier or atomic instruction that provides the
249230fa253SChristian Borntraeger  * required ordering.
250230fa253SChristian Borntraeger  */
251d1515582SWill Deacon #include <asm/barrier.h>
2527f1e541fSAndrey Ryabinin #include <linux/kasan-checks.h>
253230fa253SChristian Borntraeger 
254d976441fSAndrey Ryabinin #define __READ_ONCE(x, check)						\
255d976441fSAndrey Ryabinin ({									\
256d976441fSAndrey Ryabinin 	union { typeof(x) __val; char __c[1]; } __u;			\
257d976441fSAndrey Ryabinin 	if (check)							\
258d976441fSAndrey Ryabinin 		__read_once_size(&(x), __u.__c, sizeof(x));		\
259d976441fSAndrey Ryabinin 	else								\
260d976441fSAndrey Ryabinin 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
26176ebbe78SWill Deacon 	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
262d976441fSAndrey Ryabinin 	__u.__val;							\
263d976441fSAndrey Ryabinin })
264d976441fSAndrey Ryabinin #define READ_ONCE(x) __READ_ONCE(x, 1)
265d976441fSAndrey Ryabinin 
266d976441fSAndrey Ryabinin /*
267d976441fSAndrey Ryabinin  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
268d976441fSAndrey Ryabinin  * to hide memory access from KASAN.
269d976441fSAndrey Ryabinin  */
270d976441fSAndrey Ryabinin #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
271230fa253SChristian Borntraeger 
2727f1e541fSAndrey Ryabinin static __no_kasan_or_inline
2737f1e541fSAndrey Ryabinin unsigned long read_word_at_a_time(const void *addr)
2747f1e541fSAndrey Ryabinin {
2757f1e541fSAndrey Ryabinin 	kasan_check_read(addr, 1);
2767f1e541fSAndrey Ryabinin 	return *(unsigned long *)addr;
2777f1e541fSAndrey Ryabinin }
2787f1e541fSAndrey Ryabinin 
27943239cbeSChristian Borntraeger #define WRITE_ONCE(x, val) \
280ba33034fSChristian Borntraeger ({							\
281ba33034fSChristian Borntraeger 	union { typeof(x) __val; char __c[1]; } __u =	\
282ba33034fSChristian Borntraeger 		{ .__val = (__force typeof(x)) (val) }; \
283ba33034fSChristian Borntraeger 	__write_once_size(&(x), __u.__c, sizeof(x));	\
284ba33034fSChristian Borntraeger 	__u.__val;					\
285ba33034fSChristian Borntraeger })
286230fa253SChristian Borntraeger 
2871da177e4SLinus Torvalds #endif /* __KERNEL__ */
2881da177e4SLinus Torvalds 
2897290d580SArd Biesheuvel /*
2907290d580SArd Biesheuvel  * Force the compiler to emit 'sym' as a symbol, so that we can reference
2917290d580SArd Biesheuvel  * it from inline assembler. Necessary in case 'sym' could be inlined
2927290d580SArd Biesheuvel  * otherwise, or eliminated entirely due to lack of references that are
2937290d580SArd Biesheuvel  * visible to the compiler.
2947290d580SArd Biesheuvel  */
2957290d580SArd Biesheuvel #define __ADDRESSABLE(sym) \
296e04462fbSMiguel Ojeda 	static void * __section(".discard.addressable") __used \
2977290d580SArd Biesheuvel 		__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
2987290d580SArd Biesheuvel 
2997290d580SArd Biesheuvel /**
3007290d580SArd Biesheuvel  * offset_to_ptr - convert a relative memory offset to an absolute pointer
3017290d580SArd Biesheuvel  * @off:	the address of the 32-bit offset value
3027290d580SArd Biesheuvel  */
3037290d580SArd Biesheuvel static inline void *offset_to_ptr(const int *off)
3047290d580SArd Biesheuvel {
3057290d580SArd Biesheuvel 	return (void *)((unsigned long)off + *off);
3067290d580SArd Biesheuvel }
3077290d580SArd Biesheuvel 
3081da177e4SLinus Torvalds #endif /* __ASSEMBLY__ */
3091da177e4SLinus Torvalds 
3109f0cf4adSArjan van de Ven /* Compile time object size, -1 for unknown */
3119f0cf4adSArjan van de Ven #ifndef __compiletime_object_size
3129f0cf4adSArjan van de Ven # define __compiletime_object_size(obj) -1
3139f0cf4adSArjan van de Ven #endif
3144a312769SArjan van de Ven #ifndef __compiletime_warning
3154a312769SArjan van de Ven # define __compiletime_warning(message)
3164a312769SArjan van de Ven #endif
31763312b6aSArjan van de Ven #ifndef __compiletime_error
31863312b6aSArjan van de Ven # define __compiletime_error(message)
31963312b6aSArjan van de Ven #endif
320c361d3e5SDaniel Santos 
321c03567a8SJoe Stringer #ifdef __OPTIMIZE__
3229a8ab1c3SDaniel Santos # define __compiletime_assert(condition, msg, prefix, suffix)		\
3239a8ab1c3SDaniel Santos 	do {								\
3249a8ab1c3SDaniel Santos 		extern void prefix ## suffix(void) __compiletime_error(msg); \
32581b45683SMasahiro Yamada 		if (!(condition))					\
3269a8ab1c3SDaniel Santos 			prefix ## suffix();				\
3279a8ab1c3SDaniel Santos 	} while (0)
328c03567a8SJoe Stringer #else
329c03567a8SJoe Stringer # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
330c03567a8SJoe Stringer #endif
3319a8ab1c3SDaniel Santos 
3329a8ab1c3SDaniel Santos #define _compiletime_assert(condition, msg, prefix, suffix) \
3339a8ab1c3SDaniel Santos 	__compiletime_assert(condition, msg, prefix, suffix)
3349a8ab1c3SDaniel Santos 
3359a8ab1c3SDaniel Santos /**
3369a8ab1c3SDaniel Santos  * compiletime_assert - break build and emit msg if condition is false
3379a8ab1c3SDaniel Santos  * @condition: a compile-time constant condition to check
3389a8ab1c3SDaniel Santos  * @msg:       a message to emit if condition is false
3399a8ab1c3SDaniel Santos  *
3409a8ab1c3SDaniel Santos  * In tradition of POSIX assert, this macro will break the build if the
3419a8ab1c3SDaniel Santos  * supplied condition is *false*, emitting the supplied error message if the
3429a8ab1c3SDaniel Santos  * compiler has support to do so.
3439a8ab1c3SDaniel Santos  */
3449a8ab1c3SDaniel Santos #define compiletime_assert(condition, msg) \
3459a8ab1c3SDaniel Santos 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
3469a8ab1c3SDaniel Santos 
34747933ad4SPeter Zijlstra #define compiletime_assert_atomic_type(t)				\
34847933ad4SPeter Zijlstra 	compiletime_assert(__native_word(t),				\
34947933ad4SPeter Zijlstra 		"Need native word sized stores/loads for atomicity.")
35047933ad4SPeter Zijlstra 
351ec0bbef6SMiguel Ojeda /* &a[0] degrades to a pointer: a different type from an array */
352ec0bbef6SMiguel Ojeda #define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
353ec0bbef6SMiguel Ojeda 
3541da177e4SLinus Torvalds #endif /* __LINUX_COMPILER_H */
355