1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Dynamic function tracing support. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * 7 * Thanks goes to Ingo Molnar, for suggesting the idea. 8 * Mathieu Desnoyers, for suggesting postponing the modifications. 9 * Arjan van de Ven, for keeping me straight, and explaining to me 10 * the dangers of modifying code on the run. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/spinlock.h> 16 #include <linux/hardirq.h> 17 #include <linux/uaccess.h> 18 #include <linux/ftrace.h> 19 #include <linux/percpu.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/init.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/memory.h> 26 #include <linux/vmalloc.h> 27 28 #include <trace/syscall.h> 29 30 #include <asm/set_memory.h> 31 #include <asm/kprobes.h> 32 #include <asm/ftrace.h> 33 #include <asm/nops.h> 34 #include <asm/text-patching.h> 35 36 #ifdef CONFIG_DYNAMIC_FTRACE 37 38 static int ftrace_poke_late = 0; 39 40 void ftrace_arch_code_modify_prepare(void) 41 __acquires(&text_mutex) 42 { 43 /* 44 * Need to grab text_mutex to prevent a race from module loading 45 * and live kernel patching from changing the text permissions while 46 * ftrace has it set to "read/write". 47 */ 48 mutex_lock(&text_mutex); 49 ftrace_poke_late = 1; 50 } 51 52 void ftrace_arch_code_modify_post_process(void) 53 __releases(&text_mutex) 54 { 55 /* 56 * ftrace_make_{call,nop}() may be called during 57 * module load, and we need to finish the text_poke_queue() 58 * that they do, here. 59 */ 60 text_poke_finish(); 61 ftrace_poke_late = 0; 62 mutex_unlock(&text_mutex); 63 } 64 65 static const char *ftrace_nop_replace(void) 66 { 67 return x86_nops[5]; 68 } 69 70 static const char *ftrace_call_replace(unsigned long ip, unsigned long addr) 71 { 72 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr); 73 } 74 75 static int ftrace_verify_code(unsigned long ip, const char *old_code) 76 { 77 char cur_code[MCOUNT_INSN_SIZE]; 78 79 /* 80 * Note: 81 * We are paranoid about modifying text, as if a bug was to happen, it 82 * could cause us to read or write to someplace that could cause harm. 83 * Carefully read and modify the code with probe_kernel_*(), and make 84 * sure what we read is what we expected it to be before modifying it. 85 */ 86 /* read the text we want to modify */ 87 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) { 88 WARN_ON(1); 89 return -EFAULT; 90 } 91 92 /* Make sure it is what we expect it to be */ 93 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) { 94 ftrace_expected = old_code; 95 WARN_ON(1); 96 return -EINVAL; 97 } 98 99 return 0; 100 } 101 102 /* 103 * Marked __ref because it calls text_poke_early() which is .init.text. That is 104 * ok because that call will happen early, during boot, when .init sections are 105 * still present. 106 */ 107 static int __ref 108 ftrace_modify_code_direct(unsigned long ip, const char *old_code, 109 const char *new_code) 110 { 111 int ret = ftrace_verify_code(ip, old_code); 112 if (ret) 113 return ret; 114 115 /* replace the text with the new text */ 116 if (ftrace_poke_late) 117 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL); 118 else 119 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE); 120 return 0; 121 } 122 123 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) 124 { 125 unsigned long ip = rec->ip; 126 const char *new, *old; 127 128 old = ftrace_call_replace(ip, addr); 129 new = ftrace_nop_replace(); 130 131 /* 132 * On boot up, and when modules are loaded, the MCOUNT_ADDR 133 * is converted to a nop, and will never become MCOUNT_ADDR 134 * again. This code is either running before SMP (on boot up) 135 * or before the code will ever be executed (module load). 136 * We do not want to use the breakpoint version in this case, 137 * just modify the code directly. 138 */ 139 if (addr == MCOUNT_ADDR) 140 return ftrace_modify_code_direct(ip, old, new); 141 142 /* 143 * x86 overrides ftrace_replace_code -- this function will never be used 144 * in this case. 145 */ 146 WARN_ONCE(1, "invalid use of ftrace_make_nop"); 147 return -EINVAL; 148 } 149 150 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 151 { 152 unsigned long ip = rec->ip; 153 const char *new, *old; 154 155 old = ftrace_nop_replace(); 156 new = ftrace_call_replace(ip, addr); 157 158 /* Should only be called when module is loaded */ 159 return ftrace_modify_code_direct(rec->ip, old, new); 160 } 161 162 /* 163 * Should never be called: 164 * As it is only called by __ftrace_replace_code() which is called by 165 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code() 166 * which is called to turn mcount into nops or nops into function calls 167 * but not to convert a function from not using regs to one that uses 168 * regs, which ftrace_modify_call() is for. 169 */ 170 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 171 unsigned long addr) 172 { 173 WARN_ON(1); 174 return -EINVAL; 175 } 176 177 int ftrace_update_ftrace_func(ftrace_func_t func) 178 { 179 unsigned long ip; 180 const char *new; 181 182 ip = (unsigned long)(&ftrace_call); 183 new = ftrace_call_replace(ip, (unsigned long)func); 184 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 185 186 ip = (unsigned long)(&ftrace_regs_call); 187 new = ftrace_call_replace(ip, (unsigned long)func); 188 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 189 190 return 0; 191 } 192 193 void ftrace_replace_code(int enable) 194 { 195 struct ftrace_rec_iter *iter; 196 struct dyn_ftrace *rec; 197 const char *new, *old; 198 int ret; 199 200 for_ftrace_rec_iter(iter) { 201 rec = ftrace_rec_iter_record(iter); 202 203 switch (ftrace_test_record(rec, enable)) { 204 case FTRACE_UPDATE_IGNORE: 205 default: 206 continue; 207 208 case FTRACE_UPDATE_MAKE_CALL: 209 old = ftrace_nop_replace(); 210 break; 211 212 case FTRACE_UPDATE_MODIFY_CALL: 213 case FTRACE_UPDATE_MAKE_NOP: 214 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec)); 215 break; 216 } 217 218 ret = ftrace_verify_code(rec->ip, old); 219 if (ret) { 220 ftrace_bug(ret, rec); 221 return; 222 } 223 } 224 225 for_ftrace_rec_iter(iter) { 226 rec = ftrace_rec_iter_record(iter); 227 228 switch (ftrace_test_record(rec, enable)) { 229 case FTRACE_UPDATE_IGNORE: 230 default: 231 continue; 232 233 case FTRACE_UPDATE_MAKE_CALL: 234 case FTRACE_UPDATE_MODIFY_CALL: 235 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec)); 236 break; 237 238 case FTRACE_UPDATE_MAKE_NOP: 239 new = ftrace_nop_replace(); 240 break; 241 } 242 243 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL); 244 ftrace_update_record(rec, enable); 245 } 246 text_poke_finish(); 247 } 248 249 void arch_ftrace_update_code(int command) 250 { 251 ftrace_modify_all_code(command); 252 } 253 254 /* Currently only x86_64 supports dynamic trampolines */ 255 #ifdef CONFIG_X86_64 256 257 #ifdef CONFIG_MODULES 258 #include <linux/moduleloader.h> 259 /* Module allocation simplifies allocating memory for code */ 260 static inline void *alloc_tramp(unsigned long size) 261 { 262 return module_alloc(size); 263 } 264 static inline void tramp_free(void *tramp) 265 { 266 module_memfree(tramp); 267 } 268 #else 269 /* Trampolines can only be created if modules are supported */ 270 static inline void *alloc_tramp(unsigned long size) 271 { 272 return NULL; 273 } 274 static inline void tramp_free(void *tramp) { } 275 #endif 276 277 /* Defined as markers to the end of the ftrace default trampolines */ 278 extern void ftrace_regs_caller_end(void); 279 extern void ftrace_regs_caller_ret(void); 280 extern void ftrace_caller_end(void); 281 extern void ftrace_caller_op_ptr(void); 282 extern void ftrace_regs_caller_op_ptr(void); 283 extern void ftrace_regs_caller_jmp(void); 284 285 /* movq function_trace_op(%rip), %rdx */ 286 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */ 287 #define OP_REF_SIZE 7 288 289 /* 290 * The ftrace_ops is passed to the function callback. Since the 291 * trampoline only services a single ftrace_ops, we can pass in 292 * that ops directly. 293 * 294 * The ftrace_op_code_union is used to create a pointer to the 295 * ftrace_ops that will be passed to the callback function. 296 */ 297 union ftrace_op_code_union { 298 char code[OP_REF_SIZE]; 299 struct { 300 char op[3]; 301 int offset; 302 } __attribute__((packed)); 303 }; 304 305 #define RET_SIZE 1 + IS_ENABLED(CONFIG_SLS) 306 307 static unsigned long 308 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) 309 { 310 unsigned long start_offset; 311 unsigned long end_offset; 312 unsigned long op_offset; 313 unsigned long call_offset; 314 unsigned long jmp_offset; 315 unsigned long offset; 316 unsigned long npages; 317 unsigned long size; 318 unsigned long *ptr; 319 void *trampoline; 320 void *ip; 321 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */ 322 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; 323 unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE }; 324 union ftrace_op_code_union op_ptr; 325 int ret; 326 327 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 328 start_offset = (unsigned long)ftrace_regs_caller; 329 end_offset = (unsigned long)ftrace_regs_caller_end; 330 op_offset = (unsigned long)ftrace_regs_caller_op_ptr; 331 call_offset = (unsigned long)ftrace_regs_call; 332 jmp_offset = (unsigned long)ftrace_regs_caller_jmp; 333 } else { 334 start_offset = (unsigned long)ftrace_caller; 335 end_offset = (unsigned long)ftrace_caller_end; 336 op_offset = (unsigned long)ftrace_caller_op_ptr; 337 call_offset = (unsigned long)ftrace_call; 338 jmp_offset = 0; 339 } 340 341 size = end_offset - start_offset; 342 343 /* 344 * Allocate enough size to store the ftrace_caller code, 345 * the iret , as well as the address of the ftrace_ops this 346 * trampoline is used for. 347 */ 348 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *)); 349 if (!trampoline) 350 return 0; 351 352 *tramp_size = size + RET_SIZE + sizeof(void *); 353 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE); 354 355 /* Copy ftrace_caller onto the trampoline memory */ 356 ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size); 357 if (WARN_ON(ret < 0)) 358 goto fail; 359 360 ip = trampoline + size; 361 memcpy(ip, retq, RET_SIZE); 362 363 /* No need to test direct calls on created trampolines */ 364 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 365 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */ 366 ip = trampoline + (jmp_offset - start_offset); 367 if (WARN_ON(*(char *)ip != 0x75)) 368 goto fail; 369 ret = copy_from_kernel_nofault(ip, x86_nops[2], 2); 370 if (ret < 0) 371 goto fail; 372 } 373 374 /* 375 * The address of the ftrace_ops that is used for this trampoline 376 * is stored at the end of the trampoline. This will be used to 377 * load the third parameter for the callback. Basically, that 378 * location at the end of the trampoline takes the place of 379 * the global function_trace_op variable. 380 */ 381 382 ptr = (unsigned long *)(trampoline + size + RET_SIZE); 383 *ptr = (unsigned long)ops; 384 385 op_offset -= start_offset; 386 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); 387 388 /* Are we pointing to the reference? */ 389 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) 390 goto fail; 391 392 /* Load the contents of ptr into the callback parameter */ 393 offset = (unsigned long)ptr; 394 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE; 395 396 op_ptr.offset = offset; 397 398 /* put in the new offset to the ftrace_ops */ 399 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE); 400 401 /* put in the call to the function */ 402 mutex_lock(&text_mutex); 403 call_offset -= start_offset; 404 memcpy(trampoline + call_offset, 405 text_gen_insn(CALL_INSN_OPCODE, 406 trampoline + call_offset, 407 ftrace_ops_get_func(ops)), CALL_INSN_SIZE); 408 mutex_unlock(&text_mutex); 409 410 /* ALLOC_TRAMP flags lets us know we created it */ 411 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; 412 413 set_vm_flush_reset_perms(trampoline); 414 415 if (likely(system_state != SYSTEM_BOOTING)) 416 set_memory_ro((unsigned long)trampoline, npages); 417 set_memory_x((unsigned long)trampoline, npages); 418 return (unsigned long)trampoline; 419 fail: 420 tramp_free(trampoline); 421 return 0; 422 } 423 424 void set_ftrace_ops_ro(void) 425 { 426 struct ftrace_ops *ops; 427 unsigned long start_offset; 428 unsigned long end_offset; 429 unsigned long npages; 430 unsigned long size; 431 432 do_for_each_ftrace_op(ops, ftrace_ops_list) { 433 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 434 continue; 435 436 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 437 start_offset = (unsigned long)ftrace_regs_caller; 438 end_offset = (unsigned long)ftrace_regs_caller_end; 439 } else { 440 start_offset = (unsigned long)ftrace_caller; 441 end_offset = (unsigned long)ftrace_caller_end; 442 } 443 size = end_offset - start_offset; 444 size = size + RET_SIZE + sizeof(void *); 445 npages = DIV_ROUND_UP(size, PAGE_SIZE); 446 set_memory_ro((unsigned long)ops->trampoline, npages); 447 } while_for_each_ftrace_op(ops); 448 } 449 450 static unsigned long calc_trampoline_call_offset(bool save_regs) 451 { 452 unsigned long start_offset; 453 unsigned long call_offset; 454 455 if (save_regs) { 456 start_offset = (unsigned long)ftrace_regs_caller; 457 call_offset = (unsigned long)ftrace_regs_call; 458 } else { 459 start_offset = (unsigned long)ftrace_caller; 460 call_offset = (unsigned long)ftrace_call; 461 } 462 463 return call_offset - start_offset; 464 } 465 466 void arch_ftrace_update_trampoline(struct ftrace_ops *ops) 467 { 468 ftrace_func_t func; 469 unsigned long offset; 470 unsigned long ip; 471 unsigned int size; 472 const char *new; 473 474 if (!ops->trampoline) { 475 ops->trampoline = create_trampoline(ops, &size); 476 if (!ops->trampoline) 477 return; 478 ops->trampoline_size = size; 479 return; 480 } 481 482 /* 483 * The ftrace_ops caller may set up its own trampoline. 484 * In such a case, this code must not modify it. 485 */ 486 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 487 return; 488 489 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); 490 ip = ops->trampoline + offset; 491 func = ftrace_ops_get_func(ops); 492 493 mutex_lock(&text_mutex); 494 /* Do a safe modify in case the trampoline is executing */ 495 new = ftrace_call_replace(ip, (unsigned long)func); 496 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 497 mutex_unlock(&text_mutex); 498 } 499 500 /* Return the address of the function the trampoline calls */ 501 static void *addr_from_call(void *ptr) 502 { 503 union text_poke_insn call; 504 int ret; 505 506 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE); 507 if (WARN_ON_ONCE(ret < 0)) 508 return NULL; 509 510 /* Make sure this is a call */ 511 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) { 512 pr_warn("Expected E8, got %x\n", call.opcode); 513 return NULL; 514 } 515 516 return ptr + CALL_INSN_SIZE + call.disp; 517 } 518 519 void prepare_ftrace_return(unsigned long ip, unsigned long *parent, 520 unsigned long frame_pointer); 521 522 /* 523 * If the ops->trampoline was not allocated, then it probably 524 * has a static trampoline func, or is the ftrace caller itself. 525 */ 526 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 527 { 528 unsigned long offset; 529 bool save_regs = rec->flags & FTRACE_FL_REGS_EN; 530 void *ptr; 531 532 if (ops && ops->trampoline) { 533 #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \ 534 defined(CONFIG_FUNCTION_GRAPH_TRACER) 535 /* 536 * We only know about function graph tracer setting as static 537 * trampoline. 538 */ 539 if (ops->trampoline == FTRACE_GRAPH_ADDR) 540 return (void *)prepare_ftrace_return; 541 #endif 542 return NULL; 543 } 544 545 offset = calc_trampoline_call_offset(save_regs); 546 547 if (save_regs) 548 ptr = (void *)FTRACE_REGS_ADDR + offset; 549 else 550 ptr = (void *)FTRACE_ADDR + offset; 551 552 return addr_from_call(ptr); 553 } 554 555 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 556 { 557 unsigned long offset; 558 559 /* If we didn't allocate this trampoline, consider it static */ 560 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 561 return static_tramp_func(ops, rec); 562 563 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); 564 return addr_from_call((void *)ops->trampoline + offset); 565 } 566 567 void arch_ftrace_trampoline_free(struct ftrace_ops *ops) 568 { 569 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 570 return; 571 572 tramp_free((void *)ops->trampoline); 573 ops->trampoline = 0; 574 } 575 576 #endif /* CONFIG_X86_64 */ 577 #endif /* CONFIG_DYNAMIC_FTRACE */ 578 579 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 580 581 #if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) 582 extern void ftrace_graph_call(void); 583 static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) 584 { 585 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr); 586 } 587 588 static int ftrace_mod_jmp(unsigned long ip, void *func) 589 { 590 const char *new; 591 592 new = ftrace_jmp_replace(ip, (unsigned long)func); 593 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 594 return 0; 595 } 596 597 int ftrace_enable_ftrace_graph_caller(void) 598 { 599 unsigned long ip = (unsigned long)(&ftrace_graph_call); 600 601 return ftrace_mod_jmp(ip, &ftrace_graph_caller); 602 } 603 604 int ftrace_disable_ftrace_graph_caller(void) 605 { 606 unsigned long ip = (unsigned long)(&ftrace_graph_call); 607 608 return ftrace_mod_jmp(ip, &ftrace_stub); 609 } 610 #endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ 611 612 /* 613 * Hook the return address and push it in the stack of return addrs 614 * in current thread info. 615 */ 616 void prepare_ftrace_return(unsigned long ip, unsigned long *parent, 617 unsigned long frame_pointer) 618 { 619 unsigned long return_hooker = (unsigned long)&return_to_handler; 620 int bit; 621 622 /* 623 * When resuming from suspend-to-ram, this function can be indirectly 624 * called from early CPU startup code while the CPU is in real mode, 625 * which would fail miserably. Make sure the stack pointer is a 626 * virtual address. 627 * 628 * This check isn't as accurate as virt_addr_valid(), but it should be 629 * good enough for this purpose, and it's fast. 630 */ 631 if (unlikely((long)__builtin_frame_address(0) >= 0)) 632 return; 633 634 if (unlikely(ftrace_graph_is_dead())) 635 return; 636 637 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 638 return; 639 640 bit = ftrace_test_recursion_trylock(ip, *parent); 641 if (bit < 0) 642 return; 643 644 if (!function_graph_enter(*parent, ip, frame_pointer, parent)) 645 *parent = return_hooker; 646 647 ftrace_test_recursion_unlock(bit); 648 } 649 650 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS 651 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 652 struct ftrace_ops *op, struct ftrace_regs *fregs) 653 { 654 struct pt_regs *regs = &fregs->regs; 655 unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs); 656 657 prepare_ftrace_return(ip, (unsigned long *)stack, 0); 658 } 659 #endif 660 661 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 662