1/* 2 * arch/arm64/kernel/entry-ftrace.S 3 * 4 * Copyright (C) 2013 Linaro Limited 5 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12#include <linux/linkage.h> 13#include <asm/ftrace.h> 14#include <asm/insn.h> 15 16/* 17 * Gcc with -pg will put the following code in the beginning of each function: 18 * mov x0, x30 19 * bl _mcount 20 * [function's body ...] 21 * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic 22 * ftrace is enabled. 23 * 24 * Please note that x0 as an argument will not be used here because we can 25 * get lr(x30) of instrumented function at any time by winding up call stack 26 * as long as the kernel is compiled without -fomit-frame-pointer. 27 * (or CONFIG_FRAME_POINTER, this is forced on arm64) 28 * 29 * stack layout after mcount_enter in _mcount(): 30 * 31 * current sp/fp => 0:+-----+ 32 * in _mcount() | x29 | -> instrumented function's fp 33 * +-----+ 34 * | x30 | -> _mcount()'s lr (= instrumented function's pc) 35 * old sp => +16:+-----+ 36 * when instrumented | | 37 * function calls | ... | 38 * _mcount() | | 39 * | | 40 * instrumented => +xx:+-----+ 41 * function's fp | x29 | -> parent's fp 42 * +-----+ 43 * | x30 | -> instrumented function's lr (= parent's pc) 44 * +-----+ 45 * | ... | 46 */ 47 48 .macro mcount_enter 49 stp x29, x30, [sp, #-16]! 50 mov x29, sp 51 .endm 52 53 .macro mcount_exit 54 ldp x29, x30, [sp], #16 55 ret 56 .endm 57 58 .macro mcount_adjust_addr rd, rn 59 sub \rd, \rn, #AARCH64_INSN_SIZE 60 .endm 61 62 /* for instrumented function's parent */ 63 .macro mcount_get_parent_fp reg 64 ldr \reg, [x29] 65 ldr \reg, [\reg] 66 .endm 67 68 /* for instrumented function */ 69 .macro mcount_get_pc0 reg 70 mcount_adjust_addr \reg, x30 71 .endm 72 73 .macro mcount_get_pc reg 74 ldr \reg, [x29, #8] 75 mcount_adjust_addr \reg, \reg 76 .endm 77 78 .macro mcount_get_lr reg 79 ldr \reg, [x29] 80 ldr \reg, [\reg, #8] 81 mcount_adjust_addr \reg, \reg 82 .endm 83 84 .macro mcount_get_lr_addr reg 85 ldr \reg, [x29] 86 add \reg, \reg, #8 87 .endm 88 89#ifndef CONFIG_DYNAMIC_FTRACE 90/* 91 * void _mcount(unsigned long return_address) 92 * @return_address: return address to instrumented function 93 * 94 * This function makes calls, if enabled, to: 95 * - tracer function to probe instrumented function's entry, 96 * - ftrace_graph_caller to set up an exit hook 97 */ 98ENTRY(_mcount) 99 mcount_enter 100 101 ldr x0, =ftrace_trace_function 102 ldr x2, [x0] 103 adr x0, ftrace_stub 104 cmp x0, x2 // if (ftrace_trace_function 105 b.eq skip_ftrace_call // != ftrace_stub) { 106 107 mcount_get_pc x0 // function's pc 108 mcount_get_lr x1 // function's lr (= parent's pc) 109 blr x2 // (*ftrace_trace_function)(pc, lr); 110 111#ifndef CONFIG_FUNCTION_GRAPH_TRACER 112skip_ftrace_call: // return; 113 mcount_exit // } 114#else 115 mcount_exit // return; 116 // } 117skip_ftrace_call: 118 ldr x1, =ftrace_graph_return 119 ldr x2, [x1] // if ((ftrace_graph_return 120 cmp x0, x2 // != ftrace_stub) 121 b.ne ftrace_graph_caller 122 123 ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry 124 ldr x2, [x1] // != ftrace_graph_entry_stub)) 125 ldr x0, =ftrace_graph_entry_stub 126 cmp x0, x2 127 b.ne ftrace_graph_caller // ftrace_graph_caller(); 128 129 mcount_exit 130#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 131ENDPROC(_mcount) 132 133#else /* CONFIG_DYNAMIC_FTRACE */ 134/* 135 * _mcount() is used to build the kernel with -pg option, but all the branch 136 * instructions to _mcount() are replaced to NOP initially at kernel start up, 137 * and later on, NOP to branch to ftrace_caller() when enabled or branch to 138 * NOP when disabled per-function base. 139 */ 140ENTRY(_mcount) 141 ret 142ENDPROC(_mcount) 143 144/* 145 * void ftrace_caller(unsigned long return_address) 146 * @return_address: return address to instrumented function 147 * 148 * This function is a counterpart of _mcount() in 'static' ftrace, and 149 * makes calls to: 150 * - tracer function to probe instrumented function's entry, 151 * - ftrace_graph_caller to set up an exit hook 152 */ 153ENTRY(ftrace_caller) 154 mcount_enter 155 156 mcount_get_pc0 x0 // function's pc 157 mcount_get_lr x1 // function's lr 158 159 .global ftrace_call 160ftrace_call: // tracer(pc, lr); 161 nop // This will be replaced with "bl xxx" 162 // where xxx can be any kind of tracer. 163 164#ifdef CONFIG_FUNCTION_GRAPH_TRACER 165 .global ftrace_graph_call 166ftrace_graph_call: // ftrace_graph_caller(); 167 nop // If enabled, this will be replaced 168 // "b ftrace_graph_caller" 169#endif 170 171 mcount_exit 172ENDPROC(ftrace_caller) 173#endif /* CONFIG_DYNAMIC_FTRACE */ 174 175ENTRY(ftrace_stub) 176 ret 177ENDPROC(ftrace_stub) 178 179#ifdef CONFIG_FUNCTION_GRAPH_TRACER 180/* 181 * void ftrace_graph_caller(void) 182 * 183 * Called from _mcount() or ftrace_caller() when function_graph tracer is 184 * selected. 185 * This function w/ prepare_ftrace_return() fakes link register's value on 186 * the call stack in order to intercept instrumented function's return path 187 * and run return_to_handler() later on its exit. 188 */ 189ENTRY(ftrace_graph_caller) 190 mcount_get_lr_addr x0 // pointer to function's saved lr 191 mcount_get_pc x1 // function's pc 192 mcount_get_parent_fp x2 // parent's fp 193 bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp) 194 195 mcount_exit 196ENDPROC(ftrace_graph_caller) 197 198/* 199 * void return_to_handler(void) 200 * 201 * Run ftrace_return_to_handler() before going back to parent. 202 * @fp is checked against the value passed by ftrace_graph_caller() 203 * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled. 204 */ 205ENTRY(return_to_handler) 206 str x0, [sp, #-16]! 207 mov x0, x29 // parent's fp 208 bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); 209 mov x30, x0 // restore the original return address 210 ldr x0, [sp], #16 211 ret 212END(return_to_handler) 213#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 214