1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21da177e4SLinus Torvalds #ifndef __LINUX_COMPILER_H 31da177e4SLinus Torvalds #define __LINUX_COMPILER_H 41da177e4SLinus Torvalds 5d1515582SWill Deacon #include <linux/compiler_types.h> 6d1515582SWill Deacon 71da177e4SLinus Torvalds #ifndef __ASSEMBLY__ 81da177e4SLinus Torvalds 91da177e4SLinus Torvalds #ifdef __KERNEL__ 101da177e4SLinus Torvalds 112ed84eebSSteven Rostedt /* 122ed84eebSSteven Rostedt * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code 132ed84eebSSteven Rostedt * to disable branch tracing on a per file basis. 142ed84eebSSteven Rostedt */ 15d9ad8bc0SBart Van Assche #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ 16d9ad8bc0SBart Van Assche && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) 17134e6a03SSteven Rostedt (VMware) void ftrace_likely_update(struct ftrace_likely_data *f, int val, 18d45ae1f7SSteven Rostedt (VMware) int expect, int is_constant); 191f0d69a9SSteven Rostedt 201f0d69a9SSteven Rostedt #define likely_notrace(x) __builtin_expect(!!(x), 1) 211f0d69a9SSteven Rostedt #define unlikely_notrace(x) __builtin_expect(!!(x), 0) 221f0d69a9SSteven Rostedt 23d45ae1f7SSteven Rostedt (VMware) #define __branch_check__(x, expect, is_constant) ({ \ 242026d357SMikulas Patocka long ______r; \ 25134e6a03SSteven Rostedt (VMware) static struct ftrace_likely_data \ 26e04462fbSMiguel Ojeda __aligned(4) \ 2733def849SJoe Perches __section("_ftrace_annotated_branch") \ 281f0d69a9SSteven Rostedt ______f = { \ 29134e6a03SSteven Rostedt (VMware) .data.func = __func__, \ 30134e6a03SSteven Rostedt (VMware) .data.file = __FILE__, \ 31134e6a03SSteven Rostedt (VMware) .data.line = __LINE__, \ 321f0d69a9SSteven Rostedt }; \ 33d45ae1f7SSteven Rostedt (VMware) ______r = __builtin_expect(!!(x), expect); \ 34d45ae1f7SSteven Rostedt (VMware) ftrace_likely_update(&______f, ______r, \ 35d45ae1f7SSteven Rostedt (VMware) expect, is_constant); \ 361f0d69a9SSteven Rostedt ______r; \ 371f0d69a9SSteven Rostedt }) 381f0d69a9SSteven Rostedt 391f0d69a9SSteven Rostedt /* 401f0d69a9SSteven Rostedt * Using __builtin_constant_p(x) to ignore cases where the return 411f0d69a9SSteven Rostedt * value is always the same. This idea is taken from a similar patch 421f0d69a9SSteven Rostedt * written by Daniel Walker. 431f0d69a9SSteven Rostedt */ 441f0d69a9SSteven Rostedt # ifndef likely 45d45ae1f7SSteven Rostedt (VMware) # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) 461f0d69a9SSteven Rostedt # endif 471f0d69a9SSteven Rostedt # ifndef unlikely 48d45ae1f7SSteven Rostedt (VMware) # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) 491f0d69a9SSteven Rostedt # endif 502bcd521aSSteven Rostedt 512bcd521aSSteven Rostedt #ifdef CONFIG_PROFILE_ALL_BRANCHES 522bcd521aSSteven Rostedt /* 532bcd521aSSteven Rostedt * "Define 'is'", Bill Clinton 542bcd521aSSteven Rostedt * "Define 'if'", Steven Rostedt 552bcd521aSSteven Rostedt */ 56a15fd609SLinus Torvalds #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) 57a15fd609SLinus Torvalds 58a15fd609SLinus Torvalds #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) 59a15fd609SLinus Torvalds 60a15fd609SLinus Torvalds #define __trace_if_value(cond) ({ \ 612bcd521aSSteven Rostedt static struct ftrace_branch_data \ 62e04462fbSMiguel Ojeda __aligned(4) \ 6333def849SJoe Perches __section("_ftrace_branch") \ 64a15fd609SLinus Torvalds __if_trace = { \ 652bcd521aSSteven Rostedt .func = __func__, \ 662bcd521aSSteven Rostedt .file = __FILE__, \ 672bcd521aSSteven Rostedt .line = __LINE__, \ 682bcd521aSSteven Rostedt }; \ 69a15fd609SLinus Torvalds (cond) ? \ 70a15fd609SLinus Torvalds (__if_trace.miss_hit[1]++,1) : \ 71a15fd609SLinus Torvalds (__if_trace.miss_hit[0]++,0); \ 72a15fd609SLinus Torvalds }) 73a15fd609SLinus Torvalds 742bcd521aSSteven Rostedt #endif /* CONFIG_PROFILE_ALL_BRANCHES */ 752bcd521aSSteven Rostedt 761f0d69a9SSteven Rostedt #else 771da177e4SLinus Torvalds # define likely(x) __builtin_expect(!!(x), 1) 781da177e4SLinus Torvalds # define unlikely(x) __builtin_expect(!!(x), 0) 79*2f0df49cSSteven Rostedt (VMware) # define likely_notrace(x) likely(x) 80*2f0df49cSSteven Rostedt (VMware) # define unlikely_notrace(x) unlikely(x) 811f0d69a9SSteven Rostedt #endif 821da177e4SLinus Torvalds 831da177e4SLinus Torvalds /* Optimization barrier */ 841da177e4SLinus Torvalds #ifndef barrier 853347acc6SArvind Sankar /* The "volatile" is due to gcc bugs */ 863347acc6SArvind Sankar # define barrier() __asm__ __volatile__("": : :"memory") 871da177e4SLinus Torvalds #endif 881da177e4SLinus Torvalds 897829fb09SDaniel Borkmann #ifndef barrier_data 903347acc6SArvind Sankar /* 913347acc6SArvind Sankar * This version is i.e. to prevent dead stores elimination on @ptr 923347acc6SArvind Sankar * where gcc and llvm may behave differently when otherwise using 933347acc6SArvind Sankar * normal barrier(): while gcc behavior gets along with a normal 943347acc6SArvind Sankar * barrier(), llvm needs an explicit input variable to be assumed 953347acc6SArvind Sankar * clobbered. The issue is as follows: while the inline asm might 963347acc6SArvind Sankar * access any memory it wants, the compiler could have fit all of 973347acc6SArvind Sankar * @ptr into memory registers instead, and since @ptr never escaped 983347acc6SArvind Sankar * from that, it proved that the inline asm wasn't touching any of 993347acc6SArvind Sankar * it. This version works well with both compilers, i.e. we're telling 1003347acc6SArvind Sankar * the compiler that the inline asm absolutely may see the contents 1013347acc6SArvind Sankar * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 1023347acc6SArvind Sankar */ 1033347acc6SArvind Sankar # define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") 1047829fb09SDaniel Borkmann #endif 1057829fb09SDaniel Borkmann 106173a3efdSArnd Bergmann /* workaround for GCC PR82365 if needed */ 107173a3efdSArnd Bergmann #ifndef barrier_before_unreachable 108173a3efdSArnd Bergmann # define barrier_before_unreachable() do { } while (0) 109173a3efdSArnd Bergmann #endif 110173a3efdSArnd Bergmann 11138938c87SDavid Daney /* Unreachable code */ 112649ea4d5SJosh Poimboeuf #ifdef CONFIG_STACK_VALIDATION 113d0c2e691SJosh Poimboeuf /* 114d0c2e691SJosh Poimboeuf * These macros help objtool understand GCC code flow for unreachable code. 115d0c2e691SJosh Poimboeuf * The __COUNTER__ based labels are a hack to make each instance of the macros 116d0c2e691SJosh Poimboeuf * unique, to convince GCC not to merge duplicate inline asm statements. 117d0c2e691SJosh Poimboeuf */ 118649ea4d5SJosh Poimboeuf #define annotate_reachable() ({ \ 11996af6cd0SIngo Molnar asm volatile("%c0:\n\t" \ 12096af6cd0SIngo Molnar ".pushsection .discard.reachable\n\t" \ 12196af6cd0SIngo Molnar ".long %c0b - .\n\t" \ 12296af6cd0SIngo Molnar ".popsection\n\t" : : "i" (__COUNTER__)); \ 123649ea4d5SJosh Poimboeuf }) 124649ea4d5SJosh Poimboeuf #define annotate_unreachable() ({ \ 12596af6cd0SIngo Molnar asm volatile("%c0:\n\t" \ 12696af6cd0SIngo Molnar ".pushsection .discard.unreachable\n\t" \ 12796af6cd0SIngo Molnar ".long %c0b - .\n\t" \ 12896af6cd0SIngo Molnar ".popsection\n\t" : : "i" (__COUNTER__)); \ 129649ea4d5SJosh Poimboeuf }) 13096af6cd0SIngo Molnar #define ASM_UNREACHABLE \ 13196af6cd0SIngo Molnar "999:\n\t" \ 13296af6cd0SIngo Molnar ".pushsection .discard.unreachable\n\t" \ 13396af6cd0SIngo Molnar ".long 999b - .\n\t" \ 13496af6cd0SIngo Molnar ".popsection\n\t" 13587b512deSJosh Poimboeuf 13687b512deSJosh Poimboeuf /* Annotate a C jump table to allow objtool to follow the code flow */ 13733def849SJoe Perches #define __annotate_jump_table __section(".rodata..c_jump_table") 13887b512deSJosh Poimboeuf 139649ea4d5SJosh Poimboeuf #else 140649ea4d5SJosh Poimboeuf #define annotate_reachable() 141649ea4d5SJosh Poimboeuf #define annotate_unreachable() 14287b512deSJosh Poimboeuf #define __annotate_jump_table 143649ea4d5SJosh Poimboeuf #endif 144649ea4d5SJosh Poimboeuf 145aa5d1b81SKees Cook #ifndef ASM_UNREACHABLE 146aa5d1b81SKees Cook # define ASM_UNREACHABLE 147aa5d1b81SKees Cook #endif 14838938c87SDavid Daney #ifndef unreachable 149fe0640ebSndesaulniers@google.com # define unreachable() do { \ 150fe0640ebSndesaulniers@google.com annotate_unreachable(); \ 151fe0640ebSndesaulniers@google.com __builtin_unreachable(); \ 152fe0640ebSndesaulniers@google.com } while (0) 15338938c87SDavid Daney #endif 15438938c87SDavid Daney 155b67067f1SNicholas Piggin /* 156b67067f1SNicholas Piggin * KENTRY - kernel entry point 157b67067f1SNicholas Piggin * This can be used to annotate symbols (functions or data) that are used 158b67067f1SNicholas Piggin * without their linker symbol being referenced explicitly. For example, 159b67067f1SNicholas Piggin * interrupt vector handlers, or functions in the kernel image that are found 160b67067f1SNicholas Piggin * programatically. 161b67067f1SNicholas Piggin * 162b67067f1SNicholas Piggin * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those 163b67067f1SNicholas Piggin * are handled in their own way (with KEEP() in linker scripts). 164b67067f1SNicholas Piggin * 165b67067f1SNicholas Piggin * KENTRY can be avoided if the symbols in question are marked as KEEP() in the 166b67067f1SNicholas Piggin * linker script. For example an architecture could KEEP() its entire 167b67067f1SNicholas Piggin * boot/exception vector code rather than annotate each function and data. 168b67067f1SNicholas Piggin */ 169b67067f1SNicholas Piggin #ifndef KENTRY 170b67067f1SNicholas Piggin # define KENTRY(sym) \ 171b67067f1SNicholas Piggin extern typeof(sym) sym; \ 172b67067f1SNicholas Piggin static const unsigned long __kentry_##sym \ 173b67067f1SNicholas Piggin __used \ 174a25c13b3SNick Desaulniers __attribute__((__section__("___kentry+" #sym))) \ 175b67067f1SNicholas Piggin = (unsigned long)&sym; 176b67067f1SNicholas Piggin #endif 177b67067f1SNicholas Piggin 1781da177e4SLinus Torvalds #ifndef RELOC_HIDE 1791da177e4SLinus Torvalds # define RELOC_HIDE(ptr, off) \ 1801da177e4SLinus Torvalds ({ unsigned long __ptr; \ 1811da177e4SLinus Torvalds __ptr = (unsigned long) (ptr); \ 1821da177e4SLinus Torvalds (typeof(ptr)) (__ptr + (off)); }) 1831da177e4SLinus Torvalds #endif 1841da177e4SLinus Torvalds 185fe8c8a12SCesar Eduardo Barros #ifndef OPTIMIZER_HIDE_VAR 1863e2ffd65SMichael S. Tsirkin /* Make the optimizer believe the variable can be manipulated arbitrarily. */ 1873e2ffd65SMichael S. Tsirkin #define OPTIMIZER_HIDE_VAR(var) \ 1883e2ffd65SMichael S. Tsirkin __asm__ ("" : "=r" (var) : "0" (var)) 189fe8c8a12SCesar Eduardo Barros #endif 190fe8c8a12SCesar Eduardo Barros 1916f33d587SRusty Russell /* Not-quite-unique ID. */ 1926f33d587SRusty Russell #ifndef __UNIQUE_ID 1936f33d587SRusty Russell # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) 1946f33d587SRusty Russell #endif 1956f33d587SRusty Russell 19637d1a04bSThomas Gleixner /** 19737d1a04bSThomas Gleixner * data_race - mark an expression as containing intentional data races 19837d1a04bSThomas Gleixner * 19937d1a04bSThomas Gleixner * This data_race() macro is useful for situations in which data races 20037d1a04bSThomas Gleixner * should be forgiven. One example is diagnostic code that accesses 20137d1a04bSThomas Gleixner * shared variables but is not a part of the core synchronization design. 20237d1a04bSThomas Gleixner * 20337d1a04bSThomas Gleixner * This macro *does not* affect normal code generation, but is a hint 20437d1a04bSThomas Gleixner * to tooling that data races here are to be ignored. 20537d1a04bSThomas Gleixner */ 20637d1a04bSThomas Gleixner #define data_race(expr) \ 207d976441fSAndrey Ryabinin ({ \ 20895c094fcSMarco Elver __unqual_scalar_typeof(({ expr; })) __v = ({ \ 20937d1a04bSThomas Gleixner __kcsan_disable_current(); \ 21095c094fcSMarco Elver expr; \ 21195c094fcSMarco Elver }); \ 21237d1a04bSThomas Gleixner __kcsan_enable_current(); \ 21337d1a04bSThomas Gleixner __v; \ 214d976441fSAndrey Ryabinin }) 215230fa253SChristian Borntraeger 2161da177e4SLinus Torvalds #endif /* __KERNEL__ */ 2171da177e4SLinus Torvalds 2187290d580SArd Biesheuvel /* 2197290d580SArd Biesheuvel * Force the compiler to emit 'sym' as a symbol, so that we can reference 2207290d580SArd Biesheuvel * it from inline assembler. Necessary in case 'sym' could be inlined 2217290d580SArd Biesheuvel * otherwise, or eliminated entirely due to lack of references that are 2227290d580SArd Biesheuvel * visible to the compiler. 2237290d580SArd Biesheuvel */ 2247290d580SArd Biesheuvel #define __ADDRESSABLE(sym) \ 22533def849SJoe Perches static void * __section(".discard.addressable") __used \ 226563a02b0SJosh Poimboeuf __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym; 2277290d580SArd Biesheuvel 2287290d580SArd Biesheuvel /** 2297290d580SArd Biesheuvel * offset_to_ptr - convert a relative memory offset to an absolute pointer 2307290d580SArd Biesheuvel * @off: the address of the 32-bit offset value 2317290d580SArd Biesheuvel */ 2327290d580SArd Biesheuvel static inline void *offset_to_ptr(const int *off) 2337290d580SArd Biesheuvel { 2347290d580SArd Biesheuvel return (void *)((unsigned long)off + *off); 2357290d580SArd Biesheuvel } 2367290d580SArd Biesheuvel 2371da177e4SLinus Torvalds #endif /* __ASSEMBLY__ */ 2381da177e4SLinus Torvalds 239ec0bbef6SMiguel Ojeda /* &a[0] degrades to a pointer: a different type from an array */ 240ec0bbef6SMiguel Ojeda #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) 241ec0bbef6SMiguel Ojeda 242a9a3ed1eSBorislav Petkov /* 243a9a3ed1eSBorislav Petkov * This is needed in functions which generate the stack canary, see 244a9a3ed1eSBorislav Petkov * arch/x86/kernel/smpboot.c::start_secondary() for an example. 245a9a3ed1eSBorislav Petkov */ 246a9a3ed1eSBorislav Petkov #define prevent_tail_call_optimization() mb() 247a9a3ed1eSBorislav Petkov 248e506ea45SWill Deacon #include <asm/rwonce.h> 249e506ea45SWill Deacon 2501da177e4SLinus Torvalds #endif /* __LINUX_COMPILER_H */ 251