1 #ifndef _ASM_X86_FTRACE_H 2 #define _ASM_X86_FTRACE_H 3 4 #ifdef __ASSEMBLY__ 5 6 /* skip is set if the stack was already partially adjusted */ 7 .macro MCOUNT_SAVE_FRAME skip=0 8 /* 9 * We add enough stack to save all regs. 10 */ 11 subq $(SS+8-\skip), %rsp 12 movq %rax, RAX(%rsp) 13 movq %rcx, RCX(%rsp) 14 movq %rdx, RDX(%rsp) 15 movq %rsi, RSI(%rsp) 16 movq %rdi, RDI(%rsp) 17 movq %r8, R8(%rsp) 18 movq %r9, R9(%rsp) 19 /* Move RIP to its proper location */ 20 movq SS+8(%rsp), %rdx 21 movq %rdx, RIP(%rsp) 22 .endm 23 24 .macro MCOUNT_RESTORE_FRAME skip=0 25 movq R9(%rsp), %r9 26 movq R8(%rsp), %r8 27 movq RDI(%rsp), %rdi 28 movq RSI(%rsp), %rsi 29 movq RDX(%rsp), %rdx 30 movq RCX(%rsp), %rcx 31 movq RAX(%rsp), %rax 32 addq $(SS+8-\skip), %rsp 33 .endm 34 35 #endif 36 37 #ifdef CONFIG_FUNCTION_TRACER 38 #ifdef CC_USING_FENTRY 39 # define MCOUNT_ADDR ((long)(__fentry__)) 40 #else 41 # define MCOUNT_ADDR ((long)(mcount)) 42 #endif 43 #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 44 45 #ifdef CONFIG_DYNAMIC_FTRACE 46 #define ARCH_SUPPORTS_FTRACE_OPS 1 47 #endif 48 49 #ifndef __ASSEMBLY__ 50 extern void mcount(void); 51 extern atomic_t modifying_ftrace_code; 52 extern void __fentry__(void); 53 54 static inline unsigned long ftrace_call_adjust(unsigned long addr) 55 { 56 /* 57 * addr is the address of the mcount call instruction. 58 * recordmcount does the necessary offset calculation. 59 */ 60 return addr; 61 } 62 63 #ifdef CONFIG_DYNAMIC_FTRACE 64 65 struct dyn_arch_ftrace { 66 /* No extra data needed for x86 */ 67 }; 68 69 int ftrace_int3_handler(struct pt_regs *regs); 70 71 #endif /* CONFIG_DYNAMIC_FTRACE */ 72 #endif /* __ASSEMBLY__ */ 73 #endif /* CONFIG_FUNCTION_TRACER */ 74 75 76 #if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS) 77 78 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) 79 #include <asm/compat.h> 80 81 /* 82 * Because ia32 syscalls do not map to x86_64 syscall numbers 83 * this screws up the trace output when tracing a ia32 task. 84 * Instead of reporting bogus syscalls, just do not trace them. 85 * 86 * If the user realy wants these, then they should use the 87 * raw syscall tracepoints with filtering. 88 */ 89 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 90 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) 91 { 92 if (is_compat_task()) 93 return true; 94 return false; 95 } 96 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ 97 #endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */ 98 99 #endif /* _ASM_X86_FTRACE_H */ 100