1 /* 2 * Code for replacing ftrace calls with jumps. 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * 6 * Thanks goes to Ingo Molnar, for suggesting the idea. 7 * Mathieu Desnoyers, for suggesting postponing the modifications. 8 * Arjan van de Ven, for keeping me straight, and explaining to me 9 * the dangers of modifying code on the run. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/spinlock.h> 15 #include <linux/hardirq.h> 16 #include <linux/uaccess.h> 17 #include <linux/ftrace.h> 18 #include <linux/percpu.h> 19 #include <linux/sched.h> 20 #include <linux/init.h> 21 #include <linux/list.h> 22 #include <linux/module.h> 23 24 #include <trace/syscall.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/kprobes.h> 28 #include <asm/ftrace.h> 29 #include <asm/nops.h> 30 31 #ifdef CONFIG_DYNAMIC_FTRACE 32 33 int ftrace_arch_code_modify_prepare(void) 34 { 35 set_kernel_text_rw(); 36 set_all_modules_text_rw(); 37 return 0; 38 } 39 40 int ftrace_arch_code_modify_post_process(void) 41 { 42 set_all_modules_text_ro(); 43 set_kernel_text_ro(); 44 return 0; 45 } 46 47 union ftrace_code_union { 48 char code[MCOUNT_INSN_SIZE]; 49 struct { 50 char e8; 51 int offset; 52 } __attribute__((packed)); 53 }; 54 55 static int ftrace_calc_offset(long ip, long addr) 56 { 57 return (int)(addr - ip); 58 } 59 60 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 61 { 62 static union ftrace_code_union calc; 63 64 calc.e8 = 0xe8; 65 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); 66 67 /* 68 * No locking needed, this must be called via kstop_machine 69 * which in essence is like running on a uniprocessor machine. 70 */ 71 return calc.code; 72 } 73 74 static inline int 75 within(unsigned long addr, unsigned long start, unsigned long end) 76 { 77 return addr >= start && addr < end; 78 } 79 80 static int 81 do_ftrace_mod_code(unsigned long ip, const void *new_code) 82 { 83 /* 84 * On x86_64, kernel text mappings are mapped read-only with 85 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead 86 * of the kernel text mapping to modify the kernel text. 87 * 88 * For 32bit kernels, these mappings are same and we can use 89 * kernel identity mapping to modify code. 90 */ 91 if (within(ip, (unsigned long)_text, (unsigned long)_etext)) 92 ip = (unsigned long)__va(__pa(ip)); 93 94 return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE); 95 } 96 97 static const unsigned char *ftrace_nop_replace(void) 98 { 99 return ideal_nops[NOP_ATOMIC5]; 100 } 101 102 static int 103 ftrace_modify_code(unsigned long ip, unsigned const char *old_code, 104 unsigned const char *new_code) 105 { 106 unsigned char replaced[MCOUNT_INSN_SIZE]; 107 108 /* 109 * Note: Due to modules and __init, code can 110 * disappear and change, we need to protect against faulting 111 * as well as code changing. We do this by using the 112 * probe_kernel_* functions. 113 * 114 * No real locking needed, this code is run through 115 * kstop_machine, or before SMP starts. 116 */ 117 118 /* read the text we want to modify */ 119 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) 120 return -EFAULT; 121 122 /* Make sure it is what we expect it to be */ 123 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) 124 return -EINVAL; 125 126 /* replace the text with the new text */ 127 if (do_ftrace_mod_code(ip, new_code)) 128 return -EPERM; 129 130 sync_core(); 131 132 return 0; 133 } 134 135 int ftrace_make_nop(struct module *mod, 136 struct dyn_ftrace *rec, unsigned long addr) 137 { 138 unsigned const char *new, *old; 139 unsigned long ip = rec->ip; 140 141 old = ftrace_call_replace(ip, addr); 142 new = ftrace_nop_replace(); 143 144 return ftrace_modify_code(rec->ip, old, new); 145 } 146 147 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 148 { 149 unsigned const char *new, *old; 150 unsigned long ip = rec->ip; 151 152 old = ftrace_nop_replace(); 153 new = ftrace_call_replace(ip, addr); 154 155 return ftrace_modify_code(rec->ip, old, new); 156 } 157 158 int ftrace_update_ftrace_func(ftrace_func_t func) 159 { 160 unsigned long ip = (unsigned long)(&ftrace_call); 161 unsigned char old[MCOUNT_INSN_SIZE], *new; 162 int ret; 163 164 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); 165 new = ftrace_call_replace(ip, (unsigned long)func); 166 ret = ftrace_modify_code(ip, old, new); 167 168 return ret; 169 } 170 171 int modifying_ftrace_code __read_mostly; 172 173 /* 174 * A breakpoint was added to the code address we are about to 175 * modify, and this is the handle that will just skip over it. 176 * We are either changing a nop into a trace call, or a trace 177 * call to a nop. While the change is taking place, we treat 178 * it just like it was a nop. 179 */ 180 int ftrace_int3_handler(struct pt_regs *regs) 181 { 182 if (WARN_ON_ONCE(!regs)) 183 return 0; 184 185 if (!ftrace_location(regs->ip - 1)) 186 return 0; 187 188 regs->ip += MCOUNT_INSN_SIZE - 1; 189 190 return 1; 191 } 192 193 static int ftrace_write(unsigned long ip, const char *val, int size) 194 { 195 /* 196 * On x86_64, kernel text mappings are mapped read-only with 197 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead 198 * of the kernel text mapping to modify the kernel text. 199 * 200 * For 32bit kernels, these mappings are same and we can use 201 * kernel identity mapping to modify code. 202 */ 203 if (within(ip, (unsigned long)_text, (unsigned long)_etext)) 204 ip = (unsigned long)__va(__pa(ip)); 205 206 return probe_kernel_write((void *)ip, val, size); 207 } 208 209 static int add_break(unsigned long ip, const char *old) 210 { 211 unsigned char replaced[MCOUNT_INSN_SIZE]; 212 unsigned char brk = BREAKPOINT_INSTRUCTION; 213 214 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) 215 return -EFAULT; 216 217 /* Make sure it is what we expect it to be */ 218 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0) 219 return -EINVAL; 220 221 if (ftrace_write(ip, &brk, 1)) 222 return -EPERM; 223 224 return 0; 225 } 226 227 static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr) 228 { 229 unsigned const char *old; 230 unsigned long ip = rec->ip; 231 232 old = ftrace_call_replace(ip, addr); 233 234 return add_break(rec->ip, old); 235 } 236 237 238 static int add_brk_on_nop(struct dyn_ftrace *rec) 239 { 240 unsigned const char *old; 241 242 old = ftrace_nop_replace(); 243 244 return add_break(rec->ip, old); 245 } 246 247 static int add_breakpoints(struct dyn_ftrace *rec, int enable) 248 { 249 unsigned long ftrace_addr; 250 int ret; 251 252 ret = ftrace_test_record(rec, enable); 253 254 ftrace_addr = (unsigned long)FTRACE_ADDR; 255 256 switch (ret) { 257 case FTRACE_UPDATE_IGNORE: 258 return 0; 259 260 case FTRACE_UPDATE_MAKE_CALL: 261 /* converting nop to call */ 262 return add_brk_on_nop(rec); 263 264 case FTRACE_UPDATE_MAKE_NOP: 265 /* converting a call to a nop */ 266 return add_brk_on_call(rec, ftrace_addr); 267 } 268 return 0; 269 } 270 271 /* 272 * On error, we need to remove breakpoints. This needs to 273 * be done caefully. If the address does not currently have a 274 * breakpoint, we know we are done. Otherwise, we look at the 275 * remaining 4 bytes of the instruction. If it matches a nop 276 * we replace the breakpoint with the nop. Otherwise we replace 277 * it with the call instruction. 278 */ 279 static int remove_breakpoint(struct dyn_ftrace *rec) 280 { 281 unsigned char ins[MCOUNT_INSN_SIZE]; 282 unsigned char brk = BREAKPOINT_INSTRUCTION; 283 const unsigned char *nop; 284 unsigned long ftrace_addr; 285 unsigned long ip = rec->ip; 286 287 /* If we fail the read, just give up */ 288 if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE)) 289 return -EFAULT; 290 291 /* If this does not have a breakpoint, we are done */ 292 if (ins[0] != brk) 293 return -1; 294 295 nop = ftrace_nop_replace(); 296 297 /* 298 * If the last 4 bytes of the instruction do not match 299 * a nop, then we assume that this is a call to ftrace_addr. 300 */ 301 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) { 302 /* 303 * For extra paranoidism, we check if the breakpoint is on 304 * a call that would actually jump to the ftrace_addr. 305 * If not, don't touch the breakpoint, we make just create 306 * a disaster. 307 */ 308 ftrace_addr = (unsigned long)FTRACE_ADDR; 309 nop = ftrace_call_replace(ip, ftrace_addr); 310 311 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) 312 return -EINVAL; 313 } 314 315 return probe_kernel_write((void *)ip, &nop[0], 1); 316 } 317 318 static int add_update_code(unsigned long ip, unsigned const char *new) 319 { 320 /* skip breakpoint */ 321 ip++; 322 new++; 323 if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1)) 324 return -EPERM; 325 return 0; 326 } 327 328 static int add_update_call(struct dyn_ftrace *rec, unsigned long addr) 329 { 330 unsigned long ip = rec->ip; 331 unsigned const char *new; 332 333 new = ftrace_call_replace(ip, addr); 334 return add_update_code(ip, new); 335 } 336 337 static int add_update_nop(struct dyn_ftrace *rec) 338 { 339 unsigned long ip = rec->ip; 340 unsigned const char *new; 341 342 new = ftrace_nop_replace(); 343 return add_update_code(ip, new); 344 } 345 346 static int add_update(struct dyn_ftrace *rec, int enable) 347 { 348 unsigned long ftrace_addr; 349 int ret; 350 351 ret = ftrace_test_record(rec, enable); 352 353 ftrace_addr = (unsigned long)FTRACE_ADDR; 354 355 switch (ret) { 356 case FTRACE_UPDATE_IGNORE: 357 return 0; 358 359 case FTRACE_UPDATE_MAKE_CALL: 360 /* converting nop to call */ 361 return add_update_call(rec, ftrace_addr); 362 363 case FTRACE_UPDATE_MAKE_NOP: 364 /* converting a call to a nop */ 365 return add_update_nop(rec); 366 } 367 368 return 0; 369 } 370 371 static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr) 372 { 373 unsigned long ip = rec->ip; 374 unsigned const char *new; 375 376 new = ftrace_call_replace(ip, addr); 377 378 if (ftrace_write(ip, new, 1)) 379 return -EPERM; 380 381 return 0; 382 } 383 384 static int finish_update_nop(struct dyn_ftrace *rec) 385 { 386 unsigned long ip = rec->ip; 387 unsigned const char *new; 388 389 new = ftrace_nop_replace(); 390 391 if (ftrace_write(ip, new, 1)) 392 return -EPERM; 393 return 0; 394 } 395 396 static int finish_update(struct dyn_ftrace *rec, int enable) 397 { 398 unsigned long ftrace_addr; 399 int ret; 400 401 ret = ftrace_update_record(rec, enable); 402 403 ftrace_addr = (unsigned long)FTRACE_ADDR; 404 405 switch (ret) { 406 case FTRACE_UPDATE_IGNORE: 407 return 0; 408 409 case FTRACE_UPDATE_MAKE_CALL: 410 /* converting nop to call */ 411 return finish_update_call(rec, ftrace_addr); 412 413 case FTRACE_UPDATE_MAKE_NOP: 414 /* converting a call to a nop */ 415 return finish_update_nop(rec); 416 } 417 418 return 0; 419 } 420 421 static void do_sync_core(void *data) 422 { 423 sync_core(); 424 } 425 426 static void run_sync(void) 427 { 428 int enable_irqs = irqs_disabled(); 429 430 /* We may be called with interrupts disbled (on bootup). */ 431 if (enable_irqs) 432 local_irq_enable(); 433 on_each_cpu(do_sync_core, NULL, 1); 434 if (enable_irqs) 435 local_irq_disable(); 436 } 437 438 void ftrace_replace_code(int enable) 439 { 440 struct ftrace_rec_iter *iter; 441 struct dyn_ftrace *rec; 442 const char *report = "adding breakpoints"; 443 int count = 0; 444 int ret; 445 446 for_ftrace_rec_iter(iter) { 447 rec = ftrace_rec_iter_record(iter); 448 449 ret = add_breakpoints(rec, enable); 450 if (ret) 451 goto remove_breakpoints; 452 count++; 453 } 454 455 run_sync(); 456 457 report = "updating code"; 458 459 for_ftrace_rec_iter(iter) { 460 rec = ftrace_rec_iter_record(iter); 461 462 ret = add_update(rec, enable); 463 if (ret) 464 goto remove_breakpoints; 465 } 466 467 run_sync(); 468 469 report = "removing breakpoints"; 470 471 for_ftrace_rec_iter(iter) { 472 rec = ftrace_rec_iter_record(iter); 473 474 ret = finish_update(rec, enable); 475 if (ret) 476 goto remove_breakpoints; 477 } 478 479 run_sync(); 480 481 return; 482 483 remove_breakpoints: 484 ftrace_bug(ret, rec ? rec->ip : 0); 485 printk(KERN_WARNING "Failed on %s (%d):\n", report, count); 486 for_ftrace_rec_iter(iter) { 487 rec = ftrace_rec_iter_record(iter); 488 remove_breakpoint(rec); 489 } 490 } 491 492 void arch_ftrace_update_code(int command) 493 { 494 modifying_ftrace_code++; 495 496 ftrace_modify_all_code(command); 497 498 modifying_ftrace_code--; 499 } 500 501 int __init ftrace_dyn_arch_init(void *data) 502 { 503 /* The return code is retured via data */ 504 *(unsigned long *)data = 0; 505 506 return 0; 507 } 508 #endif 509 510 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 511 512 #ifdef CONFIG_DYNAMIC_FTRACE 513 extern void ftrace_graph_call(void); 514 515 static int ftrace_mod_jmp(unsigned long ip, 516 int old_offset, int new_offset) 517 { 518 unsigned char code[MCOUNT_INSN_SIZE]; 519 520 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) 521 return -EFAULT; 522 523 if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) 524 return -EINVAL; 525 526 *(int *)(&code[1]) = new_offset; 527 528 if (do_ftrace_mod_code(ip, &code)) 529 return -EPERM; 530 531 return 0; 532 } 533 534 int ftrace_enable_ftrace_graph_caller(void) 535 { 536 unsigned long ip = (unsigned long)(&ftrace_graph_call); 537 int old_offset, new_offset; 538 539 old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); 540 new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); 541 542 return ftrace_mod_jmp(ip, old_offset, new_offset); 543 } 544 545 int ftrace_disable_ftrace_graph_caller(void) 546 { 547 unsigned long ip = (unsigned long)(&ftrace_graph_call); 548 int old_offset, new_offset; 549 550 old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); 551 new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); 552 553 return ftrace_mod_jmp(ip, old_offset, new_offset); 554 } 555 556 #endif /* !CONFIG_DYNAMIC_FTRACE */ 557 558 /* 559 * Hook the return address and push it in the stack of return addrs 560 * in current thread info. 561 */ 562 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 563 unsigned long frame_pointer) 564 { 565 unsigned long old; 566 int faulted; 567 struct ftrace_graph_ent trace; 568 unsigned long return_hooker = (unsigned long) 569 &return_to_handler; 570 571 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 572 return; 573 574 /* 575 * Protect against fault, even if it shouldn't 576 * happen. This tool is too much intrusive to 577 * ignore such a protection. 578 */ 579 asm volatile( 580 "1: " _ASM_MOV " (%[parent]), %[old]\n" 581 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" 582 " movl $0, %[faulted]\n" 583 "3:\n" 584 585 ".section .fixup, \"ax\"\n" 586 "4: movl $1, %[faulted]\n" 587 " jmp 3b\n" 588 ".previous\n" 589 590 _ASM_EXTABLE(1b, 4b) 591 _ASM_EXTABLE(2b, 4b) 592 593 : [old] "=&r" (old), [faulted] "=r" (faulted) 594 : [parent] "r" (parent), [return_hooker] "r" (return_hooker) 595 : "memory" 596 ); 597 598 if (unlikely(faulted)) { 599 ftrace_graph_stop(); 600 WARN_ON(1); 601 return; 602 } 603 604 trace.func = self_addr; 605 trace.depth = current->curr_ret_stack + 1; 606 607 /* Only trace if the calling function expects to */ 608 if (!ftrace_graph_entry(&trace)) { 609 *parent = old; 610 return; 611 } 612 613 if (ftrace_push_return_trace(old, self_addr, &trace.depth, 614 frame_pointer) == -EBUSY) { 615 *parent = old; 616 return; 617 } 618 } 619 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 620