1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Dynamic function tracing support. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * 7 * Thanks goes to Ingo Molnar, for suggesting the idea. 8 * Mathieu Desnoyers, for suggesting postponing the modifications. 9 * Arjan van de Ven, for keeping me straight, and explaining to me 10 * the dangers of modifying code on the run. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/spinlock.h> 16 #include <linux/hardirq.h> 17 #include <linux/uaccess.h> 18 #include <linux/ftrace.h> 19 #include <linux/percpu.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/init.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/memory.h> 26 27 #include <trace/syscall.h> 28 29 #include <asm/set_memory.h> 30 #include <asm/kprobes.h> 31 #include <asm/ftrace.h> 32 #include <asm/nops.h> 33 #include <asm/text-patching.h> 34 35 #ifdef CONFIG_DYNAMIC_FTRACE 36 37 static int ftrace_poke_late = 0; 38 39 int ftrace_arch_code_modify_prepare(void) 40 __acquires(&text_mutex) 41 { 42 /* 43 * Need to grab text_mutex to prevent a race from module loading 44 * and live kernel patching from changing the text permissions while 45 * ftrace has it set to "read/write". 46 */ 47 mutex_lock(&text_mutex); 48 ftrace_poke_late = 1; 49 return 0; 50 } 51 52 int ftrace_arch_code_modify_post_process(void) 53 __releases(&text_mutex) 54 { 55 /* 56 * ftrace_make_{call,nop}() may be called during 57 * module load, and we need to finish the text_poke_queue() 58 * that they do, here. 59 */ 60 text_poke_finish(); 61 ftrace_poke_late = 0; 62 mutex_unlock(&text_mutex); 63 return 0; 64 } 65 66 static const char *ftrace_nop_replace(void) 67 { 68 return ideal_nops[NOP_ATOMIC5]; 69 } 70 71 static const char *ftrace_call_replace(unsigned long ip, unsigned long addr) 72 { 73 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr); 74 } 75 76 static int ftrace_verify_code(unsigned long ip, const char *old_code) 77 { 78 char cur_code[MCOUNT_INSN_SIZE]; 79 80 /* 81 * Note: 82 * We are paranoid about modifying text, as if a bug was to happen, it 83 * could cause us to read or write to someplace that could cause harm. 84 * Carefully read and modify the code with probe_kernel_*(), and make 85 * sure what we read is what we expected it to be before modifying it. 86 */ 87 /* read the text we want to modify */ 88 if (probe_kernel_read(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) { 89 WARN_ON(1); 90 return -EFAULT; 91 } 92 93 /* Make sure it is what we expect it to be */ 94 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) { 95 WARN_ON(1); 96 return -EINVAL; 97 } 98 99 return 0; 100 } 101 102 /* 103 * Marked __ref because it calls text_poke_early() which is .init.text. That is 104 * ok because that call will happen early, during boot, when .init sections are 105 * still present. 106 */ 107 static int __ref 108 ftrace_modify_code_direct(unsigned long ip, const char *old_code, 109 const char *new_code) 110 { 111 int ret = ftrace_verify_code(ip, old_code); 112 if (ret) 113 return ret; 114 115 /* replace the text with the new text */ 116 if (ftrace_poke_late) 117 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL); 118 else 119 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE); 120 return 0; 121 } 122 123 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) 124 { 125 unsigned long ip = rec->ip; 126 const char *new, *old; 127 128 old = ftrace_call_replace(ip, addr); 129 new = ftrace_nop_replace(); 130 131 /* 132 * On boot up, and when modules are loaded, the MCOUNT_ADDR 133 * is converted to a nop, and will never become MCOUNT_ADDR 134 * again. This code is either running before SMP (on boot up) 135 * or before the code will ever be executed (module load). 136 * We do not want to use the breakpoint version in this case, 137 * just modify the code directly. 138 */ 139 if (addr == MCOUNT_ADDR) 140 return ftrace_modify_code_direct(ip, old, new); 141 142 /* 143 * x86 overrides ftrace_replace_code -- this function will never be used 144 * in this case. 145 */ 146 WARN_ONCE(1, "invalid use of ftrace_make_nop"); 147 return -EINVAL; 148 } 149 150 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 151 { 152 unsigned long ip = rec->ip; 153 const char *new, *old; 154 155 old = ftrace_nop_replace(); 156 new = ftrace_call_replace(ip, addr); 157 158 /* Should only be called when module is loaded */ 159 return ftrace_modify_code_direct(rec->ip, old, new); 160 } 161 162 /* 163 * Should never be called: 164 * As it is only called by __ftrace_replace_code() which is called by 165 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code() 166 * which is called to turn mcount into nops or nops into function calls 167 * but not to convert a function from not using regs to one that uses 168 * regs, which ftrace_modify_call() is for. 169 */ 170 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 171 unsigned long addr) 172 { 173 WARN_ON(1); 174 return -EINVAL; 175 } 176 177 int ftrace_update_ftrace_func(ftrace_func_t func) 178 { 179 unsigned long ip; 180 const char *new; 181 182 ip = (unsigned long)(&ftrace_call); 183 new = ftrace_call_replace(ip, (unsigned long)func); 184 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 185 186 ip = (unsigned long)(&ftrace_regs_call); 187 new = ftrace_call_replace(ip, (unsigned long)func); 188 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 189 190 return 0; 191 } 192 193 void ftrace_replace_code(int enable) 194 { 195 struct ftrace_rec_iter *iter; 196 struct dyn_ftrace *rec; 197 const char *new, *old; 198 int ret; 199 200 for_ftrace_rec_iter(iter) { 201 rec = ftrace_rec_iter_record(iter); 202 203 switch (ftrace_test_record(rec, enable)) { 204 case FTRACE_UPDATE_IGNORE: 205 default: 206 continue; 207 208 case FTRACE_UPDATE_MAKE_CALL: 209 old = ftrace_nop_replace(); 210 break; 211 212 case FTRACE_UPDATE_MODIFY_CALL: 213 case FTRACE_UPDATE_MAKE_NOP: 214 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec)); 215 break; 216 } 217 218 ret = ftrace_verify_code(rec->ip, old); 219 if (ret) { 220 ftrace_bug(ret, rec); 221 return; 222 } 223 } 224 225 for_ftrace_rec_iter(iter) { 226 rec = ftrace_rec_iter_record(iter); 227 228 switch (ftrace_test_record(rec, enable)) { 229 case FTRACE_UPDATE_IGNORE: 230 default: 231 continue; 232 233 case FTRACE_UPDATE_MAKE_CALL: 234 case FTRACE_UPDATE_MODIFY_CALL: 235 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec)); 236 break; 237 238 case FTRACE_UPDATE_MAKE_NOP: 239 new = ftrace_nop_replace(); 240 break; 241 } 242 243 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL); 244 ftrace_update_record(rec, enable); 245 } 246 text_poke_finish(); 247 } 248 249 void arch_ftrace_update_code(int command) 250 { 251 ftrace_modify_all_code(command); 252 } 253 254 int __init ftrace_dyn_arch_init(void) 255 { 256 return 0; 257 } 258 259 /* Currently only x86_64 supports dynamic trampolines */ 260 #ifdef CONFIG_X86_64 261 262 #ifdef CONFIG_MODULES 263 #include <linux/moduleloader.h> 264 /* Module allocation simplifies allocating memory for code */ 265 static inline void *alloc_tramp(unsigned long size) 266 { 267 return module_alloc(size); 268 } 269 static inline void tramp_free(void *tramp) 270 { 271 module_memfree(tramp); 272 } 273 #else 274 /* Trampolines can only be created if modules are supported */ 275 static inline void *alloc_tramp(unsigned long size) 276 { 277 return NULL; 278 } 279 static inline void tramp_free(void *tramp) { } 280 #endif 281 282 /* Defined as markers to the end of the ftrace default trampolines */ 283 extern void ftrace_regs_caller_end(void); 284 extern void ftrace_epilogue(void); 285 extern void ftrace_caller_op_ptr(void); 286 extern void ftrace_regs_caller_op_ptr(void); 287 288 /* movq function_trace_op(%rip), %rdx */ 289 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */ 290 #define OP_REF_SIZE 7 291 292 /* 293 * The ftrace_ops is passed to the function callback. Since the 294 * trampoline only services a single ftrace_ops, we can pass in 295 * that ops directly. 296 * 297 * The ftrace_op_code_union is used to create a pointer to the 298 * ftrace_ops that will be passed to the callback function. 299 */ 300 union ftrace_op_code_union { 301 char code[OP_REF_SIZE]; 302 struct { 303 char op[3]; 304 int offset; 305 } __attribute__((packed)); 306 }; 307 308 #define RET_SIZE 1 309 310 static unsigned long 311 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) 312 { 313 unsigned long start_offset; 314 unsigned long end_offset; 315 unsigned long op_offset; 316 unsigned long call_offset; 317 unsigned long offset; 318 unsigned long npages; 319 unsigned long size; 320 unsigned long retq; 321 unsigned long *ptr; 322 void *trampoline; 323 void *ip; 324 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */ 325 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; 326 union ftrace_op_code_union op_ptr; 327 int ret; 328 329 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 330 start_offset = (unsigned long)ftrace_regs_caller; 331 end_offset = (unsigned long)ftrace_regs_caller_end; 332 op_offset = (unsigned long)ftrace_regs_caller_op_ptr; 333 call_offset = (unsigned long)ftrace_regs_call; 334 } else { 335 start_offset = (unsigned long)ftrace_caller; 336 end_offset = (unsigned long)ftrace_epilogue; 337 op_offset = (unsigned long)ftrace_caller_op_ptr; 338 call_offset = (unsigned long)ftrace_call; 339 } 340 341 size = end_offset - start_offset; 342 343 /* 344 * Allocate enough size to store the ftrace_caller code, 345 * the iret , as well as the address of the ftrace_ops this 346 * trampoline is used for. 347 */ 348 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *)); 349 if (!trampoline) 350 return 0; 351 352 *tramp_size = size + RET_SIZE + sizeof(void *); 353 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE); 354 355 /* Copy ftrace_caller onto the trampoline memory */ 356 ret = probe_kernel_read(trampoline, (void *)start_offset, size); 357 if (WARN_ON(ret < 0)) 358 goto fail; 359 360 ip = trampoline + size; 361 362 /* The trampoline ends with ret(q) */ 363 retq = (unsigned long)ftrace_stub; 364 ret = probe_kernel_read(ip, (void *)retq, RET_SIZE); 365 if (WARN_ON(ret < 0)) 366 goto fail; 367 368 /* 369 * The address of the ftrace_ops that is used for this trampoline 370 * is stored at the end of the trampoline. This will be used to 371 * load the third parameter for the callback. Basically, that 372 * location at the end of the trampoline takes the place of 373 * the global function_trace_op variable. 374 */ 375 376 ptr = (unsigned long *)(trampoline + size + RET_SIZE); 377 *ptr = (unsigned long)ops; 378 379 op_offset -= start_offset; 380 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); 381 382 /* Are we pointing to the reference? */ 383 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) 384 goto fail; 385 386 /* Load the contents of ptr into the callback parameter */ 387 offset = (unsigned long)ptr; 388 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE; 389 390 op_ptr.offset = offset; 391 392 /* put in the new offset to the ftrace_ops */ 393 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE); 394 395 /* put in the call to the function */ 396 mutex_lock(&text_mutex); 397 call_offset -= start_offset; 398 memcpy(trampoline + call_offset, 399 text_gen_insn(CALL_INSN_OPCODE, 400 trampoline + call_offset, 401 ftrace_ops_get_func(ops)), CALL_INSN_SIZE); 402 mutex_unlock(&text_mutex); 403 404 /* ALLOC_TRAMP flags lets us know we created it */ 405 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; 406 407 set_vm_flush_reset_perms(trampoline); 408 409 set_memory_ro((unsigned long)trampoline, npages); 410 set_memory_x((unsigned long)trampoline, npages); 411 return (unsigned long)trampoline; 412 fail: 413 tramp_free(trampoline); 414 return 0; 415 } 416 417 static unsigned long calc_trampoline_call_offset(bool save_regs) 418 { 419 unsigned long start_offset; 420 unsigned long call_offset; 421 422 if (save_regs) { 423 start_offset = (unsigned long)ftrace_regs_caller; 424 call_offset = (unsigned long)ftrace_regs_call; 425 } else { 426 start_offset = (unsigned long)ftrace_caller; 427 call_offset = (unsigned long)ftrace_call; 428 } 429 430 return call_offset - start_offset; 431 } 432 433 void arch_ftrace_update_trampoline(struct ftrace_ops *ops) 434 { 435 ftrace_func_t func; 436 unsigned long offset; 437 unsigned long ip; 438 unsigned int size; 439 const char *new; 440 441 if (!ops->trampoline) { 442 ops->trampoline = create_trampoline(ops, &size); 443 if (!ops->trampoline) 444 return; 445 ops->trampoline_size = size; 446 return; 447 } 448 449 /* 450 * The ftrace_ops caller may set up its own trampoline. 451 * In such a case, this code must not modify it. 452 */ 453 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 454 return; 455 456 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); 457 ip = ops->trampoline + offset; 458 func = ftrace_ops_get_func(ops); 459 460 mutex_lock(&text_mutex); 461 /* Do a safe modify in case the trampoline is executing */ 462 new = ftrace_call_replace(ip, (unsigned long)func); 463 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 464 mutex_unlock(&text_mutex); 465 } 466 467 /* Return the address of the function the trampoline calls */ 468 static void *addr_from_call(void *ptr) 469 { 470 union text_poke_insn call; 471 int ret; 472 473 ret = probe_kernel_read(&call, ptr, CALL_INSN_SIZE); 474 if (WARN_ON_ONCE(ret < 0)) 475 return NULL; 476 477 /* Make sure this is a call */ 478 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) { 479 pr_warn("Expected E8, got %x\n", call.opcode); 480 return NULL; 481 } 482 483 return ptr + CALL_INSN_SIZE + call.disp; 484 } 485 486 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, 487 unsigned long frame_pointer); 488 489 /* 490 * If the ops->trampoline was not allocated, then it probably 491 * has a static trampoline func, or is the ftrace caller itself. 492 */ 493 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 494 { 495 unsigned long offset; 496 bool save_regs = rec->flags & FTRACE_FL_REGS_EN; 497 void *ptr; 498 499 if (ops && ops->trampoline) { 500 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 501 /* 502 * We only know about function graph tracer setting as static 503 * trampoline. 504 */ 505 if (ops->trampoline == FTRACE_GRAPH_ADDR) 506 return (void *)prepare_ftrace_return; 507 #endif 508 return NULL; 509 } 510 511 offset = calc_trampoline_call_offset(save_regs); 512 513 if (save_regs) 514 ptr = (void *)FTRACE_REGS_ADDR + offset; 515 else 516 ptr = (void *)FTRACE_ADDR + offset; 517 518 return addr_from_call(ptr); 519 } 520 521 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 522 { 523 unsigned long offset; 524 525 /* If we didn't allocate this trampoline, consider it static */ 526 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 527 return static_tramp_func(ops, rec); 528 529 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); 530 return addr_from_call((void *)ops->trampoline + offset); 531 } 532 533 void arch_ftrace_trampoline_free(struct ftrace_ops *ops) 534 { 535 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 536 return; 537 538 tramp_free((void *)ops->trampoline); 539 ops->trampoline = 0; 540 } 541 542 #endif /* CONFIG_X86_64 */ 543 #endif /* CONFIG_DYNAMIC_FTRACE */ 544 545 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 546 547 #ifdef CONFIG_DYNAMIC_FTRACE 548 extern void ftrace_graph_call(void); 549 550 static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) 551 { 552 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr); 553 } 554 555 static int ftrace_mod_jmp(unsigned long ip, void *func) 556 { 557 const char *new; 558 559 new = ftrace_jmp_replace(ip, (unsigned long)func); 560 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 561 return 0; 562 } 563 564 int ftrace_enable_ftrace_graph_caller(void) 565 { 566 unsigned long ip = (unsigned long)(&ftrace_graph_call); 567 568 return ftrace_mod_jmp(ip, &ftrace_graph_caller); 569 } 570 571 int ftrace_disable_ftrace_graph_caller(void) 572 { 573 unsigned long ip = (unsigned long)(&ftrace_graph_call); 574 575 return ftrace_mod_jmp(ip, &ftrace_stub); 576 } 577 578 #endif /* !CONFIG_DYNAMIC_FTRACE */ 579 580 /* 581 * Hook the return address and push it in the stack of return addrs 582 * in current thread info. 583 */ 584 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, 585 unsigned long frame_pointer) 586 { 587 unsigned long return_hooker = (unsigned long)&return_to_handler; 588 unsigned long old; 589 int faulted; 590 591 /* 592 * When resuming from suspend-to-ram, this function can be indirectly 593 * called from early CPU startup code while the CPU is in real mode, 594 * which would fail miserably. Make sure the stack pointer is a 595 * virtual address. 596 * 597 * This check isn't as accurate as virt_addr_valid(), but it should be 598 * good enough for this purpose, and it's fast. 599 */ 600 if (unlikely((long)__builtin_frame_address(0) >= 0)) 601 return; 602 603 if (unlikely(ftrace_graph_is_dead())) 604 return; 605 606 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 607 return; 608 609 /* 610 * Protect against fault, even if it shouldn't 611 * happen. This tool is too much intrusive to 612 * ignore such a protection. 613 */ 614 asm volatile( 615 "1: " _ASM_MOV " (%[parent]), %[old]\n" 616 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" 617 " movl $0, %[faulted]\n" 618 "3:\n" 619 620 ".section .fixup, \"ax\"\n" 621 "4: movl $1, %[faulted]\n" 622 " jmp 3b\n" 623 ".previous\n" 624 625 _ASM_EXTABLE(1b, 4b) 626 _ASM_EXTABLE(2b, 4b) 627 628 : [old] "=&r" (old), [faulted] "=r" (faulted) 629 : [parent] "r" (parent), [return_hooker] "r" (return_hooker) 630 : "memory" 631 ); 632 633 if (unlikely(faulted)) { 634 ftrace_graph_stop(); 635 WARN_ON(1); 636 return; 637 } 638 639 if (function_graph_enter(old, self_addr, frame_pointer, parent)) 640 *parent = old; 641 } 642 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 643