1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21da177e4SLinus Torvalds #ifndef __LINUX_COMPILER_H 31da177e4SLinus Torvalds #define __LINUX_COMPILER_H 41da177e4SLinus Torvalds 5d1515582SWill Deacon #include <linux/compiler_types.h> 6d1515582SWill Deacon 71da177e4SLinus Torvalds #ifndef __ASSEMBLY__ 81da177e4SLinus Torvalds 91da177e4SLinus Torvalds #ifdef __KERNEL__ 101da177e4SLinus Torvalds 112ed84eebSSteven Rostedt /* 122ed84eebSSteven Rostedt * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code 132ed84eebSSteven Rostedt * to disable branch tracing on a per file basis. 142ed84eebSSteven Rostedt */ 15134e6a03SSteven Rostedt (VMware) void ftrace_likely_update(struct ftrace_likely_data *f, int val, 16d45ae1f7SSteven Rostedt (VMware) int expect, int is_constant); 17a18ef64fSArnd Bergmann #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ 18a18ef64fSArnd Bergmann && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) 191f0d69a9SSteven Rostedt #define likely_notrace(x) __builtin_expect(!!(x), 1) 201f0d69a9SSteven Rostedt #define unlikely_notrace(x) __builtin_expect(!!(x), 0) 211f0d69a9SSteven Rostedt 22d45ae1f7SSteven Rostedt (VMware) #define __branch_check__(x, expect, is_constant) ({ \ 232026d357SMikulas Patocka long ______r; \ 24134e6a03SSteven Rostedt (VMware) static struct ftrace_likely_data \ 25e04462fbSMiguel Ojeda __aligned(4) \ 2633def849SJoe Perches __section("_ftrace_annotated_branch") \ 271f0d69a9SSteven Rostedt ______f = { \ 28134e6a03SSteven Rostedt (VMware) .data.func = __func__, \ 29134e6a03SSteven Rostedt (VMware) .data.file = __FILE__, \ 30134e6a03SSteven Rostedt (VMware) .data.line = __LINE__, \ 311f0d69a9SSteven Rostedt }; \ 32d45ae1f7SSteven Rostedt (VMware) ______r = __builtin_expect(!!(x), expect); \ 33d45ae1f7SSteven Rostedt (VMware) ftrace_likely_update(&______f, ______r, \ 34d45ae1f7SSteven Rostedt (VMware) expect, is_constant); \ 351f0d69a9SSteven Rostedt ______r; \ 361f0d69a9SSteven Rostedt }) 371f0d69a9SSteven Rostedt 381f0d69a9SSteven Rostedt /* 391f0d69a9SSteven Rostedt * Using __builtin_constant_p(x) to ignore cases where the return 401f0d69a9SSteven Rostedt * value is always the same. This idea is taken from a similar patch 411f0d69a9SSteven Rostedt * written by Daniel Walker. 421f0d69a9SSteven Rostedt */ 431f0d69a9SSteven Rostedt # ifndef likely 44d45ae1f7SSteven Rostedt (VMware) # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) 451f0d69a9SSteven Rostedt # endif 461f0d69a9SSteven Rostedt # ifndef unlikely 47d45ae1f7SSteven Rostedt (VMware) # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) 481f0d69a9SSteven Rostedt # endif 492bcd521aSSteven Rostedt 502bcd521aSSteven Rostedt #ifdef CONFIG_PROFILE_ALL_BRANCHES 512bcd521aSSteven Rostedt /* 522bcd521aSSteven Rostedt * "Define 'is'", Bill Clinton 532bcd521aSSteven Rostedt * "Define 'if'", Steven Rostedt 542bcd521aSSteven Rostedt */ 55a15fd609SLinus Torvalds #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) 56a15fd609SLinus Torvalds 57a15fd609SLinus Torvalds #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) 58a15fd609SLinus Torvalds 59a15fd609SLinus Torvalds #define __trace_if_value(cond) ({ \ 602bcd521aSSteven Rostedt static struct ftrace_branch_data \ 61e04462fbSMiguel Ojeda __aligned(4) \ 6233def849SJoe Perches __section("_ftrace_branch") \ 63a15fd609SLinus Torvalds __if_trace = { \ 642bcd521aSSteven Rostedt .func = __func__, \ 652bcd521aSSteven Rostedt .file = __FILE__, \ 662bcd521aSSteven Rostedt .line = __LINE__, \ 672bcd521aSSteven Rostedt }; \ 68a15fd609SLinus Torvalds (cond) ? \ 69a15fd609SLinus Torvalds (__if_trace.miss_hit[1]++,1) : \ 70a15fd609SLinus Torvalds (__if_trace.miss_hit[0]++,0); \ 71a15fd609SLinus Torvalds }) 72a15fd609SLinus Torvalds 732bcd521aSSteven Rostedt #endif /* CONFIG_PROFILE_ALL_BRANCHES */ 742bcd521aSSteven Rostedt 751f0d69a9SSteven Rostedt #else 761da177e4SLinus Torvalds # define likely(x) __builtin_expect(!!(x), 1) 771da177e4SLinus Torvalds # define unlikely(x) __builtin_expect(!!(x), 0) 782f0df49cSSteven Rostedt (VMware) # define likely_notrace(x) likely(x) 792f0df49cSSteven Rostedt (VMware) # define unlikely_notrace(x) unlikely(x) 801f0d69a9SSteven Rostedt #endif 811da177e4SLinus Torvalds 821da177e4SLinus Torvalds /* Optimization barrier */ 831da177e4SLinus Torvalds #ifndef barrier 843347acc6SArvind Sankar /* The "volatile" is due to gcc bugs */ 853347acc6SArvind Sankar # define barrier() __asm__ __volatile__("": : :"memory") 861da177e4SLinus Torvalds #endif 871da177e4SLinus Torvalds 887829fb09SDaniel Borkmann #ifndef barrier_data 893347acc6SArvind Sankar /* 903347acc6SArvind Sankar * This version is i.e. to prevent dead stores elimination on @ptr 913347acc6SArvind Sankar * where gcc and llvm may behave differently when otherwise using 923347acc6SArvind Sankar * normal barrier(): while gcc behavior gets along with a normal 933347acc6SArvind Sankar * barrier(), llvm needs an explicit input variable to be assumed 943347acc6SArvind Sankar * clobbered. The issue is as follows: while the inline asm might 953347acc6SArvind Sankar * access any memory it wants, the compiler could have fit all of 963347acc6SArvind Sankar * @ptr into memory registers instead, and since @ptr never escaped 973347acc6SArvind Sankar * from that, it proved that the inline asm wasn't touching any of 983347acc6SArvind Sankar * it. This version works well with both compilers, i.e. we're telling 993347acc6SArvind Sankar * the compiler that the inline asm absolutely may see the contents 1003347acc6SArvind Sankar * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 1013347acc6SArvind Sankar */ 1023347acc6SArvind Sankar # define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") 1037829fb09SDaniel Borkmann #endif 1047829fb09SDaniel Borkmann 105173a3efdSArnd Bergmann /* workaround for GCC PR82365 if needed */ 106173a3efdSArnd Bergmann #ifndef barrier_before_unreachable 107173a3efdSArnd Bergmann # define barrier_before_unreachable() do { } while (0) 108173a3efdSArnd Bergmann #endif 109173a3efdSArnd Bergmann 11038938c87SDavid Daney /* Unreachable code */ 11103f16cd0SJosh Poimboeuf #ifdef CONFIG_OBJTOOL 11287b512deSJosh Poimboeuf /* Annotate a C jump table to allow objtool to follow the code flow */ 113*73cfc53cSArd Biesheuvel #define __annotate_jump_table __section(".data.rel.ro.c_jump_table") 11403f16cd0SJosh Poimboeuf #else /* !CONFIG_OBJTOOL */ 11587b512deSJosh Poimboeuf #define __annotate_jump_table 11603f16cd0SJosh Poimboeuf #endif /* CONFIG_OBJTOOL */ 117649ea4d5SJosh Poimboeuf 118c837de38SPeter Zijlstra /* 119c837de38SPeter Zijlstra * Mark a position in code as unreachable. This can be used to 120c837de38SPeter Zijlstra * suppress control flow warnings after asm blocks that transfer 121c837de38SPeter Zijlstra * control elsewhere. 122c837de38SPeter Zijlstra */ 123fe0640ebSndesaulniers@google.com #define unreachable() do { \ 124c837de38SPeter Zijlstra barrier_before_unreachable(); \ 125fe0640ebSndesaulniers@google.com __builtin_unreachable(); \ 126fe0640ebSndesaulniers@google.com } while (0) 12738938c87SDavid Daney 128b67067f1SNicholas Piggin /* 129b67067f1SNicholas Piggin * KENTRY - kernel entry point 130b67067f1SNicholas Piggin * This can be used to annotate symbols (functions or data) that are used 131b67067f1SNicholas Piggin * without their linker symbol being referenced explicitly. For example, 132b67067f1SNicholas Piggin * interrupt vector handlers, or functions in the kernel image that are found 133b67067f1SNicholas Piggin * programatically. 134b67067f1SNicholas Piggin * 135b67067f1SNicholas Piggin * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those 136b67067f1SNicholas Piggin * are handled in their own way (with KEEP() in linker scripts). 137b67067f1SNicholas Piggin * 138b67067f1SNicholas Piggin * KENTRY can be avoided if the symbols in question are marked as KEEP() in the 139b67067f1SNicholas Piggin * linker script. For example an architecture could KEEP() its entire 140b67067f1SNicholas Piggin * boot/exception vector code rather than annotate each function and data. 141b67067f1SNicholas Piggin */ 142b67067f1SNicholas Piggin #ifndef KENTRY 143b67067f1SNicholas Piggin # define KENTRY(sym) \ 144b67067f1SNicholas Piggin extern typeof(sym) sym; \ 145b67067f1SNicholas Piggin static const unsigned long __kentry_##sym \ 146b67067f1SNicholas Piggin __used \ 147a25c13b3SNick Desaulniers __attribute__((__section__("___kentry+" #sym))) \ 148b67067f1SNicholas Piggin = (unsigned long)&sym; 149b67067f1SNicholas Piggin #endif 150b67067f1SNicholas Piggin 1511da177e4SLinus Torvalds #ifndef RELOC_HIDE 1521da177e4SLinus Torvalds # define RELOC_HIDE(ptr, off) \ 1531da177e4SLinus Torvalds ({ unsigned long __ptr; \ 1541da177e4SLinus Torvalds __ptr = (unsigned long) (ptr); \ 1551da177e4SLinus Torvalds (typeof(ptr)) (__ptr + (off)); }) 1561da177e4SLinus Torvalds #endif 1571da177e4SLinus Torvalds 158f6b5f1a5SGuenter Roeck #define absolute_pointer(val) RELOC_HIDE((void *)(val), 0) 159f6b5f1a5SGuenter Roeck 160fe8c8a12SCesar Eduardo Barros #ifndef OPTIMIZER_HIDE_VAR 1613e2ffd65SMichael S. Tsirkin /* Make the optimizer believe the variable can be manipulated arbitrarily. */ 1623e2ffd65SMichael S. Tsirkin #define OPTIMIZER_HIDE_VAR(var) \ 1633e2ffd65SMichael S. Tsirkin __asm__ ("" : "=r" (var) : "0" (var)) 164fe8c8a12SCesar Eduardo Barros #endif 165fe8c8a12SCesar Eduardo Barros 166a8306f2dSNick Desaulniers #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) 1676f33d587SRusty Russell 16837d1a04bSThomas Gleixner /** 16937d1a04bSThomas Gleixner * data_race - mark an expression as containing intentional data races 17037d1a04bSThomas Gleixner * 17137d1a04bSThomas Gleixner * This data_race() macro is useful for situations in which data races 17237d1a04bSThomas Gleixner * should be forgiven. One example is diagnostic code that accesses 17337d1a04bSThomas Gleixner * shared variables but is not a part of the core synchronization design. 174020e6c22SPaul E. McKenney * For example, if accesses to a given variable are protected by a lock, 175020e6c22SPaul E. McKenney * except for diagnostic code, then the accesses under the lock should 176020e6c22SPaul E. McKenney * be plain C-language accesses and those in the diagnostic code should 177020e6c22SPaul E. McKenney * use data_race(). This way, KCSAN will complain if buggy lockless 178020e6c22SPaul E. McKenney * accesses to that variable are introduced, even if the buggy accesses 179020e6c22SPaul E. McKenney * are protected by READ_ONCE() or WRITE_ONCE(). 18037d1a04bSThomas Gleixner * 18137d1a04bSThomas Gleixner * This macro *does not* affect normal code generation, but is a hint 182020e6c22SPaul E. McKenney * to tooling that data races here are to be ignored. If the access must 183020e6c22SPaul E. McKenney * be atomic *and* KCSAN should ignore the access, use both data_race() 184020e6c22SPaul E. McKenney * and READ_ONCE(), for example, data_race(READ_ONCE(x)). 18537d1a04bSThomas Gleixner */ 18637d1a04bSThomas Gleixner #define data_race(expr) \ 187d976441fSAndrey Ryabinin ({ \ 18837d1a04bSThomas Gleixner __kcsan_disable_current(); \ 1897c812814SAlexey Dobriyan __auto_type __v = (expr); \ 19037d1a04bSThomas Gleixner __kcsan_enable_current(); \ 19137d1a04bSThomas Gleixner __v; \ 192d976441fSAndrey Ryabinin }) 193230fa253SChristian Borntraeger 1941da177e4SLinus Torvalds #endif /* __KERNEL__ */ 1951da177e4SLinus Torvalds 1967290d580SArd Biesheuvel /** 1977290d580SArd Biesheuvel * offset_to_ptr - convert a relative memory offset to an absolute pointer 1987290d580SArd Biesheuvel * @off: the address of the 32-bit offset value 1997290d580SArd Biesheuvel */ 2007290d580SArd Biesheuvel static inline void *offset_to_ptr(const int *off) 2017290d580SArd Biesheuvel { 2027290d580SArd Biesheuvel return (void *)((unsigned long)off + *off); 2037290d580SArd Biesheuvel } 2047290d580SArd Biesheuvel 2051da177e4SLinus Torvalds #endif /* __ASSEMBLY__ */ 2061da177e4SLinus Torvalds 2070ef8047bSJuergen Gross #ifdef CONFIG_64BIT 2080ef8047bSJuergen Gross #define ARCH_SEL(a,b) a 2090ef8047bSJuergen Gross #else 2100ef8047bSJuergen Gross #define ARCH_SEL(a,b) b 2110ef8047bSJuergen Gross #endif 2120ef8047bSJuergen Gross 2130ef8047bSJuergen Gross /* 2140ef8047bSJuergen Gross * Force the compiler to emit 'sym' as a symbol, so that we can reference 2150ef8047bSJuergen Gross * it from inline assembler. Necessary in case 'sym' could be inlined 2160ef8047bSJuergen Gross * otherwise, or eliminated entirely due to lack of references that are 2170ef8047bSJuergen Gross * visible to the compiler. 2180ef8047bSJuergen Gross */ 2190ef8047bSJuergen Gross #define ___ADDRESSABLE(sym, __attrs) \ 2200ef8047bSJuergen Gross static void * __used __attrs \ 2210ef8047bSJuergen Gross __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)(uintptr_t)&sym; 2220ef8047bSJuergen Gross 2230ef8047bSJuergen Gross #define __ADDRESSABLE(sym) \ 2240ef8047bSJuergen Gross ___ADDRESSABLE(sym, __section(".discard.addressable")) 2250ef8047bSJuergen Gross 2260ef8047bSJuergen Gross #define __ADDRESSABLE_ASM(sym) \ 2270ef8047bSJuergen Gross .pushsection .discard.addressable,"aw"; \ 2280ef8047bSJuergen Gross .align ARCH_SEL(8,4); \ 2290ef8047bSJuergen Gross ARCH_SEL(.quad, .long) __stringify(sym); \ 2300ef8047bSJuergen Gross .popsection; 2310ef8047bSJuergen Gross 2320ef8047bSJuergen Gross #define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym)) 2330ef8047bSJuergen Gross 234d7a516c6SPhilipp Reisner #ifdef __CHECKER__ 235d7a516c6SPhilipp Reisner #define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0) 236d7a516c6SPhilipp Reisner #else /* __CHECKER__ */ 237d7a516c6SPhilipp Reisner #define __BUILD_BUG_ON_ZERO_MSG(e, msg) ((int)sizeof(struct {_Static_assert(!(e), msg);})) 238d7a516c6SPhilipp Reisner #endif /* __CHECKER__ */ 239d7a516c6SPhilipp Reisner 240ec0bbef6SMiguel Ojeda /* &a[0] degrades to a pointer: a different type from an array */ 241d7a516c6SPhilipp Reisner #define __must_be_array(a) __BUILD_BUG_ON_ZERO_MSG(__same_type((a), &(a)[0]), "must be array") 242ec0bbef6SMiguel Ojeda 243559048d1SKees Cook /* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */ 244d7a516c6SPhilipp Reisner #define __must_be_cstr(p) \ 245d7a516c6SPhilipp Reisner __BUILD_BUG_ON_ZERO_MSG(__annotated(p, nonstring), "must be cstr (NUL-terminated)") 246559048d1SKees Cook 247a9a3ed1eSBorislav Petkov /* 248598f0ac1SDavid Laight * This returns a constant expression while determining if an argument is 249598f0ac1SDavid Laight * a constant expression, most importantly without evaluating the argument. 250598f0ac1SDavid Laight * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> 251c3b9a398SKees Cook * 252c3b9a398SKees Cook * Details: 253c3b9a398SKees Cook * - sizeof() return an integer constant expression, and does not evaluate 254c3b9a398SKees Cook * the value of its operand; it only examines the type of its operand. 255c3b9a398SKees Cook * - The results of comparing two integer constant expressions is also 256c3b9a398SKees Cook * an integer constant expression. 257c3b9a398SKees Cook * - The first literal "8" isn't important. It could be any literal value. 258c3b9a398SKees Cook * - The second literal "8" is to avoid warnings about unaligned pointers; 259c3b9a398SKees Cook * this could otherwise just be "1". 260c3b9a398SKees Cook * - (long)(x) is used to avoid warnings about 64-bit types on 32-bit 261c3b9a398SKees Cook * architectures. 262c3b9a398SKees Cook * - The C Standard defines "null pointer constant", "(void *)0", as 263c3b9a398SKees Cook * distinct from other void pointers. 264c3b9a398SKees Cook * - If (x) is an integer constant expression, then the "* 0l" resolves 265c3b9a398SKees Cook * it into an integer constant expression of value 0. Since it is cast to 266c3b9a398SKees Cook * "void *", this makes the second operand a null pointer constant. 267c3b9a398SKees Cook * - If (x) is not an integer constant expression, then the second operand 268c3b9a398SKees Cook * resolves to a void pointer (but not a null pointer constant: the value 269c3b9a398SKees Cook * is not an integer constant 0). 270c3b9a398SKees Cook * - The conditional operator's third operand, "(int *)8", is an object 271c3b9a398SKees Cook * pointer (to type "int"). 272c3b9a398SKees Cook * - The behavior (including the return type) of the conditional operator 273c3b9a398SKees Cook * ("operand1 ? operand2 : operand3") depends on the kind of expressions 274c3b9a398SKees Cook * given for the second and third operands. This is the central mechanism 275c3b9a398SKees Cook * of the macro: 276c3b9a398SKees Cook * - When one operand is a null pointer constant (i.e. when x is an integer 277c3b9a398SKees Cook * constant expression) and the other is an object pointer (i.e. our 278c3b9a398SKees Cook * third operand), the conditional operator returns the type of the 279d7a62d0aSThorsten Blum * object pointer operand (i.e. "int *"). Here, within the sizeof(), we 280c3b9a398SKees Cook * would then get: 281c3b9a398SKees Cook * sizeof(*((int *)(...)) == sizeof(int) == 4 282c3b9a398SKees Cook * - When one operand is a void pointer (i.e. when x is not an integer 283c3b9a398SKees Cook * constant expression) and the other is an object pointer (i.e. our 284c3b9a398SKees Cook * third operand), the conditional operator returns a "void *" type. 285c3b9a398SKees Cook * Here, within the sizeof(), we would then get: 286c3b9a398SKees Cook * sizeof(*((void *)(...)) == sizeof(void) == 1 287c3b9a398SKees Cook * - The equality comparison to "sizeof(int)" therefore depends on (x): 288c3b9a398SKees Cook * sizeof(int) == sizeof(int) (x) was a constant expression 289c3b9a398SKees Cook * sizeof(int) != sizeof(void) (x) was not a constant expression 290598f0ac1SDavid Laight */ 291598f0ac1SDavid Laight #define __is_constexpr(x) \ 292598f0ac1SDavid Laight (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) 293598f0ac1SDavid Laight 294598f0ac1SDavid Laight /* 295dcf8e563SBart Van Assche * Whether 'type' is a signed type or an unsigned type. Supports scalar types, 296dcf8e563SBart Van Assche * bool and also pointer types. 297dcf8e563SBart Van Assche */ 298dcf8e563SBart Van Assche #define is_signed_type(type) (((type)(-1)) < (__force type)1) 2994b21d25bSKees Cook #define is_unsigned_type(type) (!is_signed_type(type)) 300dcf8e563SBart Van Assche 301dcf8e563SBart Van Assche /* 30222f54687SLinus Torvalds * Useful shorthand for "is this condition known at compile-time?" 30322f54687SLinus Torvalds * 30422f54687SLinus Torvalds * Note that the condition may involve non-constant values, 30522f54687SLinus Torvalds * but the compiler may know enough about the details of the 30622f54687SLinus Torvalds * values to determine that the condition is statically true. 30722f54687SLinus Torvalds */ 30822f54687SLinus Torvalds #define statically_true(x) (__builtin_constant_p(x) && (x)) 30922f54687SLinus Torvalds 31022f54687SLinus Torvalds /* 3114f3d1be4SVincent Mailhol * Similar to statically_true() but produces a constant expression 3124f3d1be4SVincent Mailhol * 3134f3d1be4SVincent Mailhol * To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(), 3144f3d1be4SVincent Mailhol * which require their input to be a constant expression and for which 3154f3d1be4SVincent Mailhol * statically_true() would otherwise fail. 3164f3d1be4SVincent Mailhol * 3174f3d1be4SVincent Mailhol * This is a trade-off: const_true() requires all its operands to be 3184f3d1be4SVincent Mailhol * compile time constants. Else, it would always returns false even on 3194f3d1be4SVincent Mailhol * the most trivial cases like: 3204f3d1be4SVincent Mailhol * 3214f3d1be4SVincent Mailhol * true || non_const_var 3224f3d1be4SVincent Mailhol * 3234f3d1be4SVincent Mailhol * On the opposite, statically_true() is able to fold more complex 3244f3d1be4SVincent Mailhol * tautologies and will return true on expressions such as: 3254f3d1be4SVincent Mailhol * 3264f3d1be4SVincent Mailhol * !(non_const_var * 8 % 4) 3274f3d1be4SVincent Mailhol * 3284f3d1be4SVincent Mailhol * For the general case, statically_true() is better. 3294f3d1be4SVincent Mailhol */ 3304f3d1be4SVincent Mailhol #define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false) 3314f3d1be4SVincent Mailhol 3324f3d1be4SVincent Mailhol /* 333a9a3ed1eSBorislav Petkov * This is needed in functions which generate the stack canary, see 334a9a3ed1eSBorislav Petkov * arch/x86/kernel/smpboot.c::start_secondary() for an example. 335a9a3ed1eSBorislav Petkov */ 336a9a3ed1eSBorislav Petkov #define prevent_tail_call_optimization() mb() 337a9a3ed1eSBorislav Petkov 338e506ea45SWill Deacon #include <asm/rwonce.h> 339e506ea45SWill Deacon 3401da177e4SLinus Torvalds #endif /* __LINUX_COMPILER_H */ 341