1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Dynamic function tracing support. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * 7 * Thanks goes to Ingo Molnar, for suggesting the idea. 8 * Mathieu Desnoyers, for suggesting postponing the modifications. 9 * Arjan van de Ven, for keeping me straight, and explaining to me 10 * the dangers of modifying code on the run. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/spinlock.h> 16 #include <linux/hardirq.h> 17 #include <linux/uaccess.h> 18 #include <linux/ftrace.h> 19 #include <linux/percpu.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/init.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/memory.h> 26 #include <linux/vmalloc.h> 27 28 #include <trace/syscall.h> 29 30 #include <asm/set_memory.h> 31 #include <asm/kprobes.h> 32 #include <asm/ftrace.h> 33 #include <asm/nops.h> 34 #include <asm/text-patching.h> 35 36 #ifdef CONFIG_DYNAMIC_FTRACE 37 38 static int ftrace_poke_late = 0; 39 40 int ftrace_arch_code_modify_prepare(void) 41 __acquires(&text_mutex) 42 { 43 /* 44 * Need to grab text_mutex to prevent a race from module loading 45 * and live kernel patching from changing the text permissions while 46 * ftrace has it set to "read/write". 47 */ 48 mutex_lock(&text_mutex); 49 ftrace_poke_late = 1; 50 return 0; 51 } 52 53 int ftrace_arch_code_modify_post_process(void) 54 __releases(&text_mutex) 55 { 56 /* 57 * ftrace_make_{call,nop}() may be called during 58 * module load, and we need to finish the text_poke_queue() 59 * that they do, here. 60 */ 61 text_poke_finish(); 62 ftrace_poke_late = 0; 63 mutex_unlock(&text_mutex); 64 return 0; 65 } 66 67 static const char *ftrace_nop_replace(void) 68 { 69 return x86_nops[5]; 70 } 71 72 static const char *ftrace_call_replace(unsigned long ip, unsigned long addr) 73 { 74 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr); 75 } 76 77 static int ftrace_verify_code(unsigned long ip, const char *old_code) 78 { 79 char cur_code[MCOUNT_INSN_SIZE]; 80 81 /* 82 * Note: 83 * We are paranoid about modifying text, as if a bug was to happen, it 84 * could cause us to read or write to someplace that could cause harm. 85 * Carefully read and modify the code with probe_kernel_*(), and make 86 * sure what we read is what we expected it to be before modifying it. 87 */ 88 /* read the text we want to modify */ 89 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) { 90 WARN_ON(1); 91 return -EFAULT; 92 } 93 94 /* Make sure it is what we expect it to be */ 95 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) { 96 WARN_ON(1); 97 return -EINVAL; 98 } 99 100 return 0; 101 } 102 103 /* 104 * Marked __ref because it calls text_poke_early() which is .init.text. That is 105 * ok because that call will happen early, during boot, when .init sections are 106 * still present. 107 */ 108 static int __ref 109 ftrace_modify_code_direct(unsigned long ip, const char *old_code, 110 const char *new_code) 111 { 112 int ret = ftrace_verify_code(ip, old_code); 113 if (ret) 114 return ret; 115 116 /* replace the text with the new text */ 117 if (ftrace_poke_late) 118 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL); 119 else 120 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE); 121 return 0; 122 } 123 124 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) 125 { 126 unsigned long ip = rec->ip; 127 const char *new, *old; 128 129 old = ftrace_call_replace(ip, addr); 130 new = ftrace_nop_replace(); 131 132 /* 133 * On boot up, and when modules are loaded, the MCOUNT_ADDR 134 * is converted to a nop, and will never become MCOUNT_ADDR 135 * again. This code is either running before SMP (on boot up) 136 * or before the code will ever be executed (module load). 137 * We do not want to use the breakpoint version in this case, 138 * just modify the code directly. 139 */ 140 if (addr == MCOUNT_ADDR) 141 return ftrace_modify_code_direct(ip, old, new); 142 143 /* 144 * x86 overrides ftrace_replace_code -- this function will never be used 145 * in this case. 146 */ 147 WARN_ONCE(1, "invalid use of ftrace_make_nop"); 148 return -EINVAL; 149 } 150 151 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 152 { 153 unsigned long ip = rec->ip; 154 const char *new, *old; 155 156 old = ftrace_nop_replace(); 157 new = ftrace_call_replace(ip, addr); 158 159 /* Should only be called when module is loaded */ 160 return ftrace_modify_code_direct(rec->ip, old, new); 161 } 162 163 /* 164 * Should never be called: 165 * As it is only called by __ftrace_replace_code() which is called by 166 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code() 167 * which is called to turn mcount into nops or nops into function calls 168 * but not to convert a function from not using regs to one that uses 169 * regs, which ftrace_modify_call() is for. 170 */ 171 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 172 unsigned long addr) 173 { 174 WARN_ON(1); 175 return -EINVAL; 176 } 177 178 int ftrace_update_ftrace_func(ftrace_func_t func) 179 { 180 unsigned long ip; 181 const char *new; 182 183 ip = (unsigned long)(&ftrace_call); 184 new = ftrace_call_replace(ip, (unsigned long)func); 185 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 186 187 ip = (unsigned long)(&ftrace_regs_call); 188 new = ftrace_call_replace(ip, (unsigned long)func); 189 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 190 191 return 0; 192 } 193 194 void ftrace_replace_code(int enable) 195 { 196 struct ftrace_rec_iter *iter; 197 struct dyn_ftrace *rec; 198 const char *new, *old; 199 int ret; 200 201 for_ftrace_rec_iter(iter) { 202 rec = ftrace_rec_iter_record(iter); 203 204 switch (ftrace_test_record(rec, enable)) { 205 case FTRACE_UPDATE_IGNORE: 206 default: 207 continue; 208 209 case FTRACE_UPDATE_MAKE_CALL: 210 old = ftrace_nop_replace(); 211 break; 212 213 case FTRACE_UPDATE_MODIFY_CALL: 214 case FTRACE_UPDATE_MAKE_NOP: 215 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec)); 216 break; 217 } 218 219 ret = ftrace_verify_code(rec->ip, old); 220 if (ret) { 221 ftrace_bug(ret, rec); 222 return; 223 } 224 } 225 226 for_ftrace_rec_iter(iter) { 227 rec = ftrace_rec_iter_record(iter); 228 229 switch (ftrace_test_record(rec, enable)) { 230 case FTRACE_UPDATE_IGNORE: 231 default: 232 continue; 233 234 case FTRACE_UPDATE_MAKE_CALL: 235 case FTRACE_UPDATE_MODIFY_CALL: 236 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec)); 237 break; 238 239 case FTRACE_UPDATE_MAKE_NOP: 240 new = ftrace_nop_replace(); 241 break; 242 } 243 244 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL); 245 ftrace_update_record(rec, enable); 246 } 247 text_poke_finish(); 248 } 249 250 void arch_ftrace_update_code(int command) 251 { 252 ftrace_modify_all_code(command); 253 } 254 255 /* Currently only x86_64 supports dynamic trampolines */ 256 #ifdef CONFIG_X86_64 257 258 #ifdef CONFIG_MODULES 259 #include <linux/moduleloader.h> 260 /* Module allocation simplifies allocating memory for code */ 261 static inline void *alloc_tramp(unsigned long size) 262 { 263 return module_alloc(size); 264 } 265 static inline void tramp_free(void *tramp) 266 { 267 module_memfree(tramp); 268 } 269 #else 270 /* Trampolines can only be created if modules are supported */ 271 static inline void *alloc_tramp(unsigned long size) 272 { 273 return NULL; 274 } 275 static inline void tramp_free(void *tramp) { } 276 #endif 277 278 /* Defined as markers to the end of the ftrace default trampolines */ 279 extern void ftrace_regs_caller_end(void); 280 extern void ftrace_regs_caller_ret(void); 281 extern void ftrace_caller_end(void); 282 extern void ftrace_caller_op_ptr(void); 283 extern void ftrace_regs_caller_op_ptr(void); 284 extern void ftrace_regs_caller_jmp(void); 285 286 /* movq function_trace_op(%rip), %rdx */ 287 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */ 288 #define OP_REF_SIZE 7 289 290 /* 291 * The ftrace_ops is passed to the function callback. Since the 292 * trampoline only services a single ftrace_ops, we can pass in 293 * that ops directly. 294 * 295 * The ftrace_op_code_union is used to create a pointer to the 296 * ftrace_ops that will be passed to the callback function. 297 */ 298 union ftrace_op_code_union { 299 char code[OP_REF_SIZE]; 300 struct { 301 char op[3]; 302 int offset; 303 } __attribute__((packed)); 304 }; 305 306 #define RET_SIZE 1 + IS_ENABLED(CONFIG_SLS) 307 308 static unsigned long 309 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) 310 { 311 unsigned long start_offset; 312 unsigned long end_offset; 313 unsigned long op_offset; 314 unsigned long call_offset; 315 unsigned long jmp_offset; 316 unsigned long offset; 317 unsigned long npages; 318 unsigned long size; 319 unsigned long retq; 320 unsigned long *ptr; 321 void *trampoline; 322 void *ip; 323 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */ 324 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; 325 union ftrace_op_code_union op_ptr; 326 int ret; 327 328 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 329 start_offset = (unsigned long)ftrace_regs_caller; 330 end_offset = (unsigned long)ftrace_regs_caller_end; 331 op_offset = (unsigned long)ftrace_regs_caller_op_ptr; 332 call_offset = (unsigned long)ftrace_regs_call; 333 jmp_offset = (unsigned long)ftrace_regs_caller_jmp; 334 } else { 335 start_offset = (unsigned long)ftrace_caller; 336 end_offset = (unsigned long)ftrace_caller_end; 337 op_offset = (unsigned long)ftrace_caller_op_ptr; 338 call_offset = (unsigned long)ftrace_call; 339 jmp_offset = 0; 340 } 341 342 size = end_offset - start_offset; 343 344 /* 345 * Allocate enough size to store the ftrace_caller code, 346 * the iret , as well as the address of the ftrace_ops this 347 * trampoline is used for. 348 */ 349 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *)); 350 if (!trampoline) 351 return 0; 352 353 *tramp_size = size + RET_SIZE + sizeof(void *); 354 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE); 355 356 /* Copy ftrace_caller onto the trampoline memory */ 357 ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size); 358 if (WARN_ON(ret < 0)) 359 goto fail; 360 361 ip = trampoline + size; 362 363 /* The trampoline ends with ret(q) */ 364 retq = (unsigned long)ftrace_stub; 365 ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE); 366 if (WARN_ON(ret < 0)) 367 goto fail; 368 369 /* No need to test direct calls on created trampolines */ 370 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 371 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */ 372 ip = trampoline + (jmp_offset - start_offset); 373 if (WARN_ON(*(char *)ip != 0x75)) 374 goto fail; 375 ret = copy_from_kernel_nofault(ip, x86_nops[2], 2); 376 if (ret < 0) 377 goto fail; 378 } 379 380 /* 381 * The address of the ftrace_ops that is used for this trampoline 382 * is stored at the end of the trampoline. This will be used to 383 * load the third parameter for the callback. Basically, that 384 * location at the end of the trampoline takes the place of 385 * the global function_trace_op variable. 386 */ 387 388 ptr = (unsigned long *)(trampoline + size + RET_SIZE); 389 *ptr = (unsigned long)ops; 390 391 op_offset -= start_offset; 392 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); 393 394 /* Are we pointing to the reference? */ 395 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) 396 goto fail; 397 398 /* Load the contents of ptr into the callback parameter */ 399 offset = (unsigned long)ptr; 400 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE; 401 402 op_ptr.offset = offset; 403 404 /* put in the new offset to the ftrace_ops */ 405 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE); 406 407 /* put in the call to the function */ 408 mutex_lock(&text_mutex); 409 call_offset -= start_offset; 410 memcpy(trampoline + call_offset, 411 text_gen_insn(CALL_INSN_OPCODE, 412 trampoline + call_offset, 413 ftrace_ops_get_func(ops)), CALL_INSN_SIZE); 414 mutex_unlock(&text_mutex); 415 416 /* ALLOC_TRAMP flags lets us know we created it */ 417 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; 418 419 set_vm_flush_reset_perms(trampoline); 420 421 if (likely(system_state != SYSTEM_BOOTING)) 422 set_memory_ro((unsigned long)trampoline, npages); 423 set_memory_x((unsigned long)trampoline, npages); 424 return (unsigned long)trampoline; 425 fail: 426 tramp_free(trampoline); 427 return 0; 428 } 429 430 void set_ftrace_ops_ro(void) 431 { 432 struct ftrace_ops *ops; 433 unsigned long start_offset; 434 unsigned long end_offset; 435 unsigned long npages; 436 unsigned long size; 437 438 do_for_each_ftrace_op(ops, ftrace_ops_list) { 439 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 440 continue; 441 442 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 443 start_offset = (unsigned long)ftrace_regs_caller; 444 end_offset = (unsigned long)ftrace_regs_caller_end; 445 } else { 446 start_offset = (unsigned long)ftrace_caller; 447 end_offset = (unsigned long)ftrace_caller_end; 448 } 449 size = end_offset - start_offset; 450 size = size + RET_SIZE + sizeof(void *); 451 npages = DIV_ROUND_UP(size, PAGE_SIZE); 452 set_memory_ro((unsigned long)ops->trampoline, npages); 453 } while_for_each_ftrace_op(ops); 454 } 455 456 static unsigned long calc_trampoline_call_offset(bool save_regs) 457 { 458 unsigned long start_offset; 459 unsigned long call_offset; 460 461 if (save_regs) { 462 start_offset = (unsigned long)ftrace_regs_caller; 463 call_offset = (unsigned long)ftrace_regs_call; 464 } else { 465 start_offset = (unsigned long)ftrace_caller; 466 call_offset = (unsigned long)ftrace_call; 467 } 468 469 return call_offset - start_offset; 470 } 471 472 void arch_ftrace_update_trampoline(struct ftrace_ops *ops) 473 { 474 ftrace_func_t func; 475 unsigned long offset; 476 unsigned long ip; 477 unsigned int size; 478 const char *new; 479 480 if (!ops->trampoline) { 481 ops->trampoline = create_trampoline(ops, &size); 482 if (!ops->trampoline) 483 return; 484 ops->trampoline_size = size; 485 return; 486 } 487 488 /* 489 * The ftrace_ops caller may set up its own trampoline. 490 * In such a case, this code must not modify it. 491 */ 492 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 493 return; 494 495 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); 496 ip = ops->trampoline + offset; 497 func = ftrace_ops_get_func(ops); 498 499 mutex_lock(&text_mutex); 500 /* Do a safe modify in case the trampoline is executing */ 501 new = ftrace_call_replace(ip, (unsigned long)func); 502 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 503 mutex_unlock(&text_mutex); 504 } 505 506 /* Return the address of the function the trampoline calls */ 507 static void *addr_from_call(void *ptr) 508 { 509 union text_poke_insn call; 510 int ret; 511 512 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE); 513 if (WARN_ON_ONCE(ret < 0)) 514 return NULL; 515 516 /* Make sure this is a call */ 517 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) { 518 pr_warn("Expected E8, got %x\n", call.opcode); 519 return NULL; 520 } 521 522 return ptr + CALL_INSN_SIZE + call.disp; 523 } 524 525 void prepare_ftrace_return(unsigned long ip, unsigned long *parent, 526 unsigned long frame_pointer); 527 528 /* 529 * If the ops->trampoline was not allocated, then it probably 530 * has a static trampoline func, or is the ftrace caller itself. 531 */ 532 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 533 { 534 unsigned long offset; 535 bool save_regs = rec->flags & FTRACE_FL_REGS_EN; 536 void *ptr; 537 538 if (ops && ops->trampoline) { 539 #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \ 540 defined(CONFIG_FUNCTION_GRAPH_TRACER) 541 /* 542 * We only know about function graph tracer setting as static 543 * trampoline. 544 */ 545 if (ops->trampoline == FTRACE_GRAPH_ADDR) 546 return (void *)prepare_ftrace_return; 547 #endif 548 return NULL; 549 } 550 551 offset = calc_trampoline_call_offset(save_regs); 552 553 if (save_regs) 554 ptr = (void *)FTRACE_REGS_ADDR + offset; 555 else 556 ptr = (void *)FTRACE_ADDR + offset; 557 558 return addr_from_call(ptr); 559 } 560 561 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 562 { 563 unsigned long offset; 564 565 /* If we didn't allocate this trampoline, consider it static */ 566 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 567 return static_tramp_func(ops, rec); 568 569 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); 570 return addr_from_call((void *)ops->trampoline + offset); 571 } 572 573 void arch_ftrace_trampoline_free(struct ftrace_ops *ops) 574 { 575 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 576 return; 577 578 tramp_free((void *)ops->trampoline); 579 ops->trampoline = 0; 580 } 581 582 #endif /* CONFIG_X86_64 */ 583 #endif /* CONFIG_DYNAMIC_FTRACE */ 584 585 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 586 587 #ifdef CONFIG_DYNAMIC_FTRACE 588 589 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS 590 extern void ftrace_graph_call(void); 591 static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) 592 { 593 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr); 594 } 595 596 static int ftrace_mod_jmp(unsigned long ip, void *func) 597 { 598 const char *new; 599 600 new = ftrace_jmp_replace(ip, (unsigned long)func); 601 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 602 return 0; 603 } 604 605 int ftrace_enable_ftrace_graph_caller(void) 606 { 607 unsigned long ip = (unsigned long)(&ftrace_graph_call); 608 609 return ftrace_mod_jmp(ip, &ftrace_graph_caller); 610 } 611 612 int ftrace_disable_ftrace_graph_caller(void) 613 { 614 unsigned long ip = (unsigned long)(&ftrace_graph_call); 615 616 return ftrace_mod_jmp(ip, &ftrace_stub); 617 } 618 #else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ 619 int ftrace_enable_ftrace_graph_caller(void) 620 { 621 return 0; 622 } 623 624 int ftrace_disable_ftrace_graph_caller(void) 625 { 626 return 0; 627 } 628 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ 629 #endif /* !CONFIG_DYNAMIC_FTRACE */ 630 631 /* 632 * Hook the return address and push it in the stack of return addrs 633 * in current thread info. 634 */ 635 void prepare_ftrace_return(unsigned long ip, unsigned long *parent, 636 unsigned long frame_pointer) 637 { 638 unsigned long return_hooker = (unsigned long)&return_to_handler; 639 int bit; 640 641 /* 642 * When resuming from suspend-to-ram, this function can be indirectly 643 * called from early CPU startup code while the CPU is in real mode, 644 * which would fail miserably. Make sure the stack pointer is a 645 * virtual address. 646 * 647 * This check isn't as accurate as virt_addr_valid(), but it should be 648 * good enough for this purpose, and it's fast. 649 */ 650 if (unlikely((long)__builtin_frame_address(0) >= 0)) 651 return; 652 653 if (unlikely(ftrace_graph_is_dead())) 654 return; 655 656 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 657 return; 658 659 bit = ftrace_test_recursion_trylock(ip, *parent); 660 if (bit < 0) 661 return; 662 663 if (!function_graph_enter(*parent, ip, frame_pointer, parent)) 664 *parent = return_hooker; 665 666 ftrace_test_recursion_unlock(bit); 667 } 668 669 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS 670 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 671 struct ftrace_ops *op, struct ftrace_regs *fregs) 672 { 673 struct pt_regs *regs = &fregs->regs; 674 unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs); 675 676 prepare_ftrace_return(ip, (unsigned long *)stack, 0); 677 } 678 #endif 679 680 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 681