xref: /linux/include/linux/compiler.h (revision 2209fda323e2fd2a2d0885595fd5097717f8d2aa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_H
3 #define __LINUX_COMPILER_H
4 
5 #include <linux/compiler_types.h>
6 
7 #ifndef __ASSEMBLY__
8 
9 #ifdef __KERNEL__
10 
11 /*
12  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13  * to disable branch tracing on a per file basis.
14  */
15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18 			  int expect, int is_constant);
19 
20 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
21 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
22 
23 #define __branch_check__(x, expect, is_constant) ({			\
24 			long ______r;					\
25 			static struct ftrace_likely_data		\
26 				__attribute__((__aligned__(4)))		\
27 				__attribute__((section("_ftrace_annotated_branch"))) \
28 				______f = {				\
29 				.data.func = __func__,			\
30 				.data.file = __FILE__,			\
31 				.data.line = __LINE__,			\
32 			};						\
33 			______r = __builtin_expect(!!(x), expect);	\
34 			ftrace_likely_update(&______f, ______r,		\
35 					     expect, is_constant);	\
36 			______r;					\
37 		})
38 
39 /*
40  * Using __builtin_constant_p(x) to ignore cases where the return
41  * value is always the same.  This idea is taken from a similar patch
42  * written by Daniel Walker.
43  */
44 # ifndef likely
45 #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
46 # endif
47 # ifndef unlikely
48 #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
49 # endif
50 
51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
52 /*
53  * "Define 'is'", Bill Clinton
54  * "Define 'if'", Steven Rostedt
55  */
56 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
57 #define __trace_if(cond) \
58 	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
59 	({								\
60 		int ______r;						\
61 		static struct ftrace_branch_data			\
62 			__attribute__((__aligned__(4)))			\
63 			__attribute__((section("_ftrace_branch")))	\
64 			______f = {					\
65 				.func = __func__,			\
66 				.file = __FILE__,			\
67 				.line = __LINE__,			\
68 			};						\
69 		______r = !!(cond);					\
70 		______f.miss_hit[______r]++;					\
71 		______r;						\
72 	}))
73 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
74 
75 #else
76 # define likely(x)	__builtin_expect(!!(x), 1)
77 # define unlikely(x)	__builtin_expect(!!(x), 0)
78 #endif
79 
80 /* Optimization barrier */
81 #ifndef barrier
82 # define barrier() __memory_barrier()
83 #endif
84 
85 #ifndef barrier_data
86 # define barrier_data(ptr) barrier()
87 #endif
88 
89 /* workaround for GCC PR82365 if needed */
90 #ifndef barrier_before_unreachable
91 # define barrier_before_unreachable() do { } while (0)
92 #endif
93 
94 /* Unreachable code */
95 #ifdef CONFIG_STACK_VALIDATION
96 /*
97  * These macros help objtool understand GCC code flow for unreachable code.
98  * The __COUNTER__ based labels are a hack to make each instance of the macros
99  * unique, to convince GCC not to merge duplicate inline asm statements.
100  */
101 #define annotate_reachable() ({						\
102 	asm volatile("ANNOTATE_REACHABLE counter=%c0"			\
103 		     : : "i" (__COUNTER__));				\
104 })
105 #define annotate_unreachable() ({					\
106 	asm volatile("ANNOTATE_UNREACHABLE counter=%c0"			\
107 		     : : "i" (__COUNTER__));				\
108 })
109 #else
110 #define annotate_reachable()
111 #define annotate_unreachable()
112 #endif
113 
114 #ifndef ASM_UNREACHABLE
115 # define ASM_UNREACHABLE
116 #endif
117 #ifndef unreachable
118 # define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
119 #endif
120 
121 /*
122  * KENTRY - kernel entry point
123  * This can be used to annotate symbols (functions or data) that are used
124  * without their linker symbol being referenced explicitly. For example,
125  * interrupt vector handlers, or functions in the kernel image that are found
126  * programatically.
127  *
128  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
129  * are handled in their own way (with KEEP() in linker scripts).
130  *
131  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
132  * linker script. For example an architecture could KEEP() its entire
133  * boot/exception vector code rather than annotate each function and data.
134  */
135 #ifndef KENTRY
136 # define KENTRY(sym)						\
137 	extern typeof(sym) sym;					\
138 	static const unsigned long __kentry_##sym		\
139 	__used							\
140 	__attribute__((section("___kentry" "+" #sym ), used))	\
141 	= (unsigned long)&sym;
142 #endif
143 
144 #ifndef RELOC_HIDE
145 # define RELOC_HIDE(ptr, off)					\
146   ({ unsigned long __ptr;					\
147      __ptr = (unsigned long) (ptr);				\
148     (typeof(ptr)) (__ptr + (off)); })
149 #endif
150 
151 #ifndef OPTIMIZER_HIDE_VAR
152 #define OPTIMIZER_HIDE_VAR(var) barrier()
153 #endif
154 
155 /* Not-quite-unique ID. */
156 #ifndef __UNIQUE_ID
157 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
158 #endif
159 
160 #include <uapi/linux/types.h>
161 
162 #define __READ_ONCE_SIZE						\
163 ({									\
164 	switch (size) {							\
165 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
166 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
167 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
168 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
169 	default:							\
170 		barrier();						\
171 		__builtin_memcpy((void *)res, (const void *)p, size);	\
172 		barrier();						\
173 	}								\
174 })
175 
176 static __always_inline
177 void __read_once_size(const volatile void *p, void *res, int size)
178 {
179 	__READ_ONCE_SIZE;
180 }
181 
182 #ifdef CONFIG_KASAN
183 /*
184  * We can't declare function 'inline' because __no_sanitize_address confilcts
185  * with inlining. Attempt to inline it may cause a build failure.
186  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
187  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
188  */
189 # define __no_kasan_or_inline __no_sanitize_address __maybe_unused
190 #else
191 # define __no_kasan_or_inline __always_inline
192 #endif
193 
194 static __no_kasan_or_inline
195 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
196 {
197 	__READ_ONCE_SIZE;
198 }
199 
200 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
201 {
202 	switch (size) {
203 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
204 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
205 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
206 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
207 	default:
208 		barrier();
209 		__builtin_memcpy((void *)p, (const void *)res, size);
210 		barrier();
211 	}
212 }
213 
214 /*
215  * Prevent the compiler from merging or refetching reads or writes. The
216  * compiler is also forbidden from reordering successive instances of
217  * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
218  * particular ordering. One way to make the compiler aware of ordering is to
219  * put the two invocations of READ_ONCE or WRITE_ONCE in different C
220  * statements.
221  *
222  * These two macros will also work on aggregate data types like structs or
223  * unions. If the size of the accessed data type exceeds the word size of
224  * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
225  * fall back to memcpy(). There's at least two memcpy()s: one for the
226  * __builtin_memcpy() and then one for the macro doing the copy of variable
227  * - '__u' allocated on the stack.
228  *
229  * Their two major use cases are: (1) Mediating communication between
230  * process-level code and irq/NMI handlers, all running on the same CPU,
231  * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
232  * mutilate accesses that either do not require ordering or that interact
233  * with an explicit memory barrier or atomic instruction that provides the
234  * required ordering.
235  */
236 #include <asm/barrier.h>
237 #include <linux/kasan-checks.h>
238 
239 #define __READ_ONCE(x, check)						\
240 ({									\
241 	union { typeof(x) __val; char __c[1]; } __u;			\
242 	if (check)							\
243 		__read_once_size(&(x), __u.__c, sizeof(x));		\
244 	else								\
245 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
246 	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
247 	__u.__val;							\
248 })
249 #define READ_ONCE(x) __READ_ONCE(x, 1)
250 
251 /*
252  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
253  * to hide memory access from KASAN.
254  */
255 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
256 
257 static __no_kasan_or_inline
258 unsigned long read_word_at_a_time(const void *addr)
259 {
260 	kasan_check_read(addr, 1);
261 	return *(unsigned long *)addr;
262 }
263 
264 #define WRITE_ONCE(x, val) \
265 ({							\
266 	union { typeof(x) __val; char __c[1]; } __u =	\
267 		{ .__val = (__force typeof(x)) (val) }; \
268 	__write_once_size(&(x), __u.__c, sizeof(x));	\
269 	__u.__val;					\
270 })
271 
272 #endif /* __KERNEL__ */
273 
274 /*
275  * Force the compiler to emit 'sym' as a symbol, so that we can reference
276  * it from inline assembler. Necessary in case 'sym' could be inlined
277  * otherwise, or eliminated entirely due to lack of references that are
278  * visible to the compiler.
279  */
280 #define __ADDRESSABLE(sym) \
281 	static void * __attribute__((section(".discard.addressable"), used)) \
282 		__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
283 
284 /**
285  * offset_to_ptr - convert a relative memory offset to an absolute pointer
286  * @off:	the address of the 32-bit offset value
287  */
288 static inline void *offset_to_ptr(const int *off)
289 {
290 	return (void *)((unsigned long)off + *off);
291 }
292 
293 #else /* __ASSEMBLY__ */
294 
295 #ifdef __KERNEL__
296 #ifndef LINKER_SCRIPT
297 
298 #ifdef CONFIG_STACK_VALIDATION
299 .macro ANNOTATE_UNREACHABLE counter:req
300 \counter:
301 	.pushsection .discard.unreachable
302 	.long \counter\()b -.
303 	.popsection
304 .endm
305 
306 .macro ANNOTATE_REACHABLE counter:req
307 \counter:
308 	.pushsection .discard.reachable
309 	.long \counter\()b -.
310 	.popsection
311 .endm
312 
313 .macro ASM_UNREACHABLE
314 999:
315 	.pushsection .discard.unreachable
316 	.long 999b - .
317 	.popsection
318 .endm
319 #else /* CONFIG_STACK_VALIDATION */
320 .macro ANNOTATE_UNREACHABLE counter:req
321 .endm
322 
323 .macro ANNOTATE_REACHABLE counter:req
324 .endm
325 
326 .macro ASM_UNREACHABLE
327 .endm
328 #endif /* CONFIG_STACK_VALIDATION */
329 
330 #endif /* LINKER_SCRIPT */
331 #endif /* __KERNEL__ */
332 #endif /* __ASSEMBLY__ */
333 
334 #ifndef __optimize
335 # define __optimize(level)
336 #endif
337 
338 /* Compile time object size, -1 for unknown */
339 #ifndef __compiletime_object_size
340 # define __compiletime_object_size(obj) -1
341 #endif
342 #ifndef __compiletime_warning
343 # define __compiletime_warning(message)
344 #endif
345 #ifndef __compiletime_error
346 # define __compiletime_error(message)
347 #endif
348 
349 #ifdef __OPTIMIZE__
350 # define __compiletime_assert(condition, msg, prefix, suffix)		\
351 	do {								\
352 		extern void prefix ## suffix(void) __compiletime_error(msg); \
353 		if (!(condition))					\
354 			prefix ## suffix();				\
355 	} while (0)
356 #else
357 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
358 #endif
359 
360 #define _compiletime_assert(condition, msg, prefix, suffix) \
361 	__compiletime_assert(condition, msg, prefix, suffix)
362 
363 /**
364  * compiletime_assert - break build and emit msg if condition is false
365  * @condition: a compile-time constant condition to check
366  * @msg:       a message to emit if condition is false
367  *
368  * In tradition of POSIX assert, this macro will break the build if the
369  * supplied condition is *false*, emitting the supplied error message if the
370  * compiler has support to do so.
371  */
372 #define compiletime_assert(condition, msg) \
373 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
374 
375 #define compiletime_assert_atomic_type(t)				\
376 	compiletime_assert(__native_word(t),				\
377 		"Need native word sized stores/loads for atomicity.")
378 
379 #endif /* __LINUX_COMPILER_H */
380