1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Infrastructure for profiling code inserted by 'gcc -pg'. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Originally ported from the -rt patch by: 9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 10 * 11 * Based on code in the latency_tracer, that is: 12 * 13 * Copyright (C) 2004-2006 Ingo Molnar 14 * Copyright (C) 2004 Nadia Yvette Chambers 15 */ 16 17 #include <linux/stop_machine.h> 18 #include <linux/clocksource.h> 19 #include <linux/sched/task.h> 20 #include <linux/kallsyms.h> 21 #include <linux/security.h> 22 #include <linux/seq_file.h> 23 #include <linux/tracefs.h> 24 #include <linux/hardirq.h> 25 #include <linux/kthread.h> 26 #include <linux/uaccess.h> 27 #include <linux/bsearch.h> 28 #include <linux/module.h> 29 #include <linux/ftrace.h> 30 #include <linux/sysctl.h> 31 #include <linux/slab.h> 32 #include <linux/ctype.h> 33 #include <linux/sort.h> 34 #include <linux/list.h> 35 #include <linux/hash.h> 36 #include <linux/rcupdate.h> 37 #include <linux/kprobes.h> 38 39 #include <trace/events/sched.h> 40 41 #include <asm/sections.h> 42 #include <asm/setup.h> 43 44 #include "ftrace_internal.h" 45 #include "trace_output.h" 46 #include "trace_stat.h" 47 48 #define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__" 49 50 #define FTRACE_WARN_ON(cond) \ 51 ({ \ 52 int ___r = cond; \ 53 if (WARN_ON(___r)) \ 54 ftrace_kill(); \ 55 ___r; \ 56 }) 57 58 #define FTRACE_WARN_ON_ONCE(cond) \ 59 ({ \ 60 int ___r = cond; \ 61 if (WARN_ON_ONCE(___r)) \ 62 ftrace_kill(); \ 63 ___r; \ 64 }) 65 66 /* hash bits for specific function selection */ 67 #define FTRACE_HASH_DEFAULT_BITS 10 68 #define FTRACE_HASH_MAX_BITS 12 69 70 #ifdef CONFIG_DYNAMIC_FTRACE 71 #define INIT_OPS_HASH(opsname) \ 72 .func_hash = &opsname.local_hash, \ 73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 74 #else 75 #define INIT_OPS_HASH(opsname) 76 #endif 77 78 enum { 79 FTRACE_MODIFY_ENABLE_FL = (1 << 0), 80 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), 81 }; 82 83 struct ftrace_ops ftrace_list_end __read_mostly = { 84 .func = ftrace_stub, 85 .flags = FTRACE_OPS_FL_STUB, 86 INIT_OPS_HASH(ftrace_list_end) 87 }; 88 89 /* ftrace_enabled is a method to turn ftrace on or off */ 90 int ftrace_enabled __read_mostly; 91 static int __maybe_unused last_ftrace_enabled; 92 93 /* Current function tracing op */ 94 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 95 /* What to set function_trace_op to */ 96 static struct ftrace_ops *set_function_trace_op; 97 98 static bool ftrace_pids_enabled(struct ftrace_ops *ops) 99 { 100 struct trace_array *tr; 101 102 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) 103 return false; 104 105 tr = ops->private; 106 107 return tr->function_pids != NULL || tr->function_no_pids != NULL; 108 } 109 110 static void ftrace_update_trampoline(struct ftrace_ops *ops); 111 112 /* 113 * ftrace_disabled is set when an anomaly is discovered. 114 * ftrace_disabled is much stronger than ftrace_enabled. 115 */ 116 static int ftrace_disabled __read_mostly; 117 118 DEFINE_MUTEX(ftrace_lock); 119 120 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; 121 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 122 struct ftrace_ops global_ops; 123 124 /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */ 125 void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 126 struct ftrace_ops *op, struct ftrace_regs *fregs); 127 128 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS 129 /* 130 * Stub used to invoke the list ops without requiring a separate trampoline. 131 */ 132 const struct ftrace_ops ftrace_list_ops = { 133 .func = ftrace_ops_list_func, 134 .flags = FTRACE_OPS_FL_STUB, 135 }; 136 137 static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip, 138 struct ftrace_ops *op, 139 struct ftrace_regs *fregs) 140 { 141 /* do nothing */ 142 } 143 144 /* 145 * Stub used when a call site is disabled. May be called transiently by threads 146 * which have made it into ftrace_caller but haven't yet recovered the ops at 147 * the point the call site is disabled. 148 */ 149 const struct ftrace_ops ftrace_nop_ops = { 150 .func = ftrace_ops_nop_func, 151 .flags = FTRACE_OPS_FL_STUB, 152 }; 153 #endif 154 155 static inline void ftrace_ops_init(struct ftrace_ops *ops) 156 { 157 #ifdef CONFIG_DYNAMIC_FTRACE 158 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 159 mutex_init(&ops->local_hash.regex_lock); 160 ops->func_hash = &ops->local_hash; 161 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 162 } 163 #endif 164 } 165 166 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 167 struct ftrace_ops *op, struct ftrace_regs *fregs) 168 { 169 struct trace_array *tr = op->private; 170 int pid; 171 172 if (tr) { 173 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); 174 if (pid == FTRACE_PID_IGNORE) 175 return; 176 if (pid != FTRACE_PID_TRACE && 177 pid != current->pid) 178 return; 179 } 180 181 op->saved_func(ip, parent_ip, op, fregs); 182 } 183 184 static void ftrace_sync_ipi(void *data) 185 { 186 /* Probably not needed, but do it anyway */ 187 smp_rmb(); 188 } 189 190 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) 191 { 192 /* 193 * If this is a dynamic or RCU ops, or we force list func, 194 * then it needs to call the list anyway. 195 */ 196 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || 197 FTRACE_FORCE_LIST_FUNC) 198 return ftrace_ops_list_func; 199 200 return ftrace_ops_get_func(ops); 201 } 202 203 static void update_ftrace_function(void) 204 { 205 ftrace_func_t func; 206 207 /* 208 * Prepare the ftrace_ops that the arch callback will use. 209 * If there's only one ftrace_ops registered, the ftrace_ops_list 210 * will point to the ops we want. 211 */ 212 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, 213 lockdep_is_held(&ftrace_lock)); 214 215 /* If there's no ftrace_ops registered, just call the stub function */ 216 if (set_function_trace_op == &ftrace_list_end) { 217 func = ftrace_stub; 218 219 /* 220 * If we are at the end of the list and this ops is 221 * recursion safe and not dynamic and the arch supports passing ops, 222 * then have the mcount trampoline call the function directly. 223 */ 224 } else if (rcu_dereference_protected(ftrace_ops_list->next, 225 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 226 func = ftrace_ops_get_list_func(ftrace_ops_list); 227 228 } else { 229 /* Just use the default ftrace_ops */ 230 set_function_trace_op = &ftrace_list_end; 231 func = ftrace_ops_list_func; 232 } 233 234 update_function_graph_func(); 235 236 /* If there's no change, then do nothing more here */ 237 if (ftrace_trace_function == func) 238 return; 239 240 /* 241 * If we are using the list function, it doesn't care 242 * about the function_trace_ops. 243 */ 244 if (func == ftrace_ops_list_func) { 245 ftrace_trace_function = func; 246 /* 247 * Don't even bother setting function_trace_ops, 248 * it would be racy to do so anyway. 249 */ 250 return; 251 } 252 253 #ifndef CONFIG_DYNAMIC_FTRACE 254 /* 255 * For static tracing, we need to be a bit more careful. 256 * The function change takes affect immediately. Thus, 257 * we need to coordinate the setting of the function_trace_ops 258 * with the setting of the ftrace_trace_function. 259 * 260 * Set the function to the list ops, which will call the 261 * function we want, albeit indirectly, but it handles the 262 * ftrace_ops and doesn't depend on function_trace_op. 263 */ 264 ftrace_trace_function = ftrace_ops_list_func; 265 /* 266 * Make sure all CPUs see this. Yes this is slow, but static 267 * tracing is slow and nasty to have enabled. 268 */ 269 synchronize_rcu_tasks_rude(); 270 /* Now all cpus are using the list ops. */ 271 function_trace_op = set_function_trace_op; 272 /* Make sure the function_trace_op is visible on all CPUs */ 273 smp_wmb(); 274 /* Nasty way to force a rmb on all cpus */ 275 smp_call_function(ftrace_sync_ipi, NULL, 1); 276 /* OK, we are all set to update the ftrace_trace_function now! */ 277 #endif /* !CONFIG_DYNAMIC_FTRACE */ 278 279 ftrace_trace_function = func; 280 } 281 282 static void add_ftrace_ops(struct ftrace_ops __rcu **list, 283 struct ftrace_ops *ops) 284 { 285 rcu_assign_pointer(ops->next, *list); 286 287 /* 288 * We are entering ops into the list but another 289 * CPU might be walking that list. We need to make sure 290 * the ops->next pointer is valid before another CPU sees 291 * the ops pointer included into the list. 292 */ 293 rcu_assign_pointer(*list, ops); 294 } 295 296 static int remove_ftrace_ops(struct ftrace_ops __rcu **list, 297 struct ftrace_ops *ops) 298 { 299 struct ftrace_ops **p; 300 301 /* 302 * If we are removing the last function, then simply point 303 * to the ftrace_stub. 304 */ 305 if (rcu_dereference_protected(*list, 306 lockdep_is_held(&ftrace_lock)) == ops && 307 rcu_dereference_protected(ops->next, 308 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 309 *list = &ftrace_list_end; 310 return 0; 311 } 312 313 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) 314 if (*p == ops) 315 break; 316 317 if (*p != ops) 318 return -1; 319 320 *p = (*p)->next; 321 return 0; 322 } 323 324 static void ftrace_update_trampoline(struct ftrace_ops *ops); 325 326 int __register_ftrace_function(struct ftrace_ops *ops) 327 { 328 if (ops->flags & FTRACE_OPS_FL_DELETED) 329 return -EINVAL; 330 331 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 332 return -EBUSY; 333 334 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS 335 /* 336 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 337 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. 338 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. 339 */ 340 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && 341 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) 342 return -EINVAL; 343 344 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) 345 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; 346 #endif 347 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) 348 return -EBUSY; 349 350 if (!is_kernel_core_data((unsigned long)ops)) 351 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 352 353 add_ftrace_ops(&ftrace_ops_list, ops); 354 355 /* Always save the function, and reset at unregistering */ 356 ops->saved_func = ops->func; 357 358 if (ftrace_pids_enabled(ops)) 359 ops->func = ftrace_pid_func; 360 361 ftrace_update_trampoline(ops); 362 363 if (ftrace_enabled) 364 update_ftrace_function(); 365 366 return 0; 367 } 368 369 int __unregister_ftrace_function(struct ftrace_ops *ops) 370 { 371 int ret; 372 373 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 374 return -EBUSY; 375 376 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 377 378 if (ret < 0) 379 return ret; 380 381 if (ftrace_enabled) 382 update_ftrace_function(); 383 384 ops->func = ops->saved_func; 385 386 return 0; 387 } 388 389 static void ftrace_update_pid_func(void) 390 { 391 struct ftrace_ops *op; 392 393 /* Only do something if we are tracing something */ 394 if (ftrace_trace_function == ftrace_stub) 395 return; 396 397 do_for_each_ftrace_op(op, ftrace_ops_list) { 398 if (op->flags & FTRACE_OPS_FL_PID) { 399 op->func = ftrace_pids_enabled(op) ? 400 ftrace_pid_func : op->saved_func; 401 ftrace_update_trampoline(op); 402 } 403 } while_for_each_ftrace_op(op); 404 405 update_ftrace_function(); 406 } 407 408 #ifdef CONFIG_FUNCTION_PROFILER 409 struct ftrace_profile { 410 struct hlist_node node; 411 unsigned long ip; 412 unsigned long counter; 413 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 414 unsigned long long time; 415 unsigned long long time_squared; 416 #endif 417 }; 418 419 struct ftrace_profile_page { 420 struct ftrace_profile_page *next; 421 unsigned long index; 422 struct ftrace_profile records[]; 423 }; 424 425 struct ftrace_profile_stat { 426 atomic_t disabled; 427 struct hlist_head *hash; 428 struct ftrace_profile_page *pages; 429 struct ftrace_profile_page *start; 430 struct tracer_stat stat; 431 }; 432 433 #define PROFILE_RECORDS_SIZE \ 434 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) 435 436 #define PROFILES_PER_PAGE \ 437 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) 438 439 static int ftrace_profile_enabled __read_mostly; 440 441 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ 442 static DEFINE_MUTEX(ftrace_profile_lock); 443 444 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); 445 446 #define FTRACE_PROFILE_HASH_BITS 10 447 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) 448 449 static void * 450 function_stat_next(void *v, int idx) 451 { 452 struct ftrace_profile *rec = v; 453 struct ftrace_profile_page *pg; 454 455 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 456 457 again: 458 if (idx != 0) 459 rec++; 460 461 if ((void *)rec >= (void *)&pg->records[pg->index]) { 462 pg = pg->next; 463 if (!pg) 464 return NULL; 465 rec = &pg->records[0]; 466 if (!rec->counter) 467 goto again; 468 } 469 470 return rec; 471 } 472 473 static void *function_stat_start(struct tracer_stat *trace) 474 { 475 struct ftrace_profile_stat *stat = 476 container_of(trace, struct ftrace_profile_stat, stat); 477 478 if (!stat || !stat->start) 479 return NULL; 480 481 return function_stat_next(&stat->start->records[0], 0); 482 } 483 484 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 485 /* function graph compares on total time */ 486 static int function_stat_cmp(const void *p1, const void *p2) 487 { 488 const struct ftrace_profile *a = p1; 489 const struct ftrace_profile *b = p2; 490 491 if (a->time < b->time) 492 return -1; 493 if (a->time > b->time) 494 return 1; 495 else 496 return 0; 497 } 498 #else 499 /* not function graph compares against hits */ 500 static int function_stat_cmp(const void *p1, const void *p2) 501 { 502 const struct ftrace_profile *a = p1; 503 const struct ftrace_profile *b = p2; 504 505 if (a->counter < b->counter) 506 return -1; 507 if (a->counter > b->counter) 508 return 1; 509 else 510 return 0; 511 } 512 #endif 513 514 static int function_stat_headers(struct seq_file *m) 515 { 516 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 517 seq_puts(m, " Function " 518 "Hit Time Avg s^2\n" 519 " -------- " 520 "--- ---- --- ---\n"); 521 #else 522 seq_puts(m, " Function Hit\n" 523 " -------- ---\n"); 524 #endif 525 return 0; 526 } 527 528 static int function_stat_show(struct seq_file *m, void *v) 529 { 530 struct ftrace_profile *rec = v; 531 char str[KSYM_SYMBOL_LEN]; 532 int ret = 0; 533 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 534 static struct trace_seq s; 535 unsigned long long avg; 536 unsigned long long stddev; 537 #endif 538 mutex_lock(&ftrace_profile_lock); 539 540 /* we raced with function_profile_reset() */ 541 if (unlikely(rec->counter == 0)) { 542 ret = -EBUSY; 543 goto out; 544 } 545 546 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 547 avg = div64_ul(rec->time, rec->counter); 548 if (tracing_thresh && (avg < tracing_thresh)) 549 goto out; 550 #endif 551 552 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 553 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 554 555 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 556 seq_puts(m, " "); 557 558 /* Sample standard deviation (s^2) */ 559 if (rec->counter <= 1) 560 stddev = 0; 561 else { 562 /* 563 * Apply Welford's method: 564 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) 565 */ 566 stddev = rec->counter * rec->time_squared - 567 rec->time * rec->time; 568 569 /* 570 * Divide only 1000 for ns^2 -> us^2 conversion. 571 * trace_print_graph_duration will divide 1000 again. 572 */ 573 stddev = div64_ul(stddev, 574 rec->counter * (rec->counter - 1) * 1000); 575 } 576 577 trace_seq_init(&s); 578 trace_print_graph_duration(rec->time, &s); 579 trace_seq_puts(&s, " "); 580 trace_print_graph_duration(avg, &s); 581 trace_seq_puts(&s, " "); 582 trace_print_graph_duration(stddev, &s); 583 trace_print_seq(m, &s); 584 #endif 585 seq_putc(m, '\n'); 586 out: 587 mutex_unlock(&ftrace_profile_lock); 588 589 return ret; 590 } 591 592 static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 593 { 594 struct ftrace_profile_page *pg; 595 596 pg = stat->pages = stat->start; 597 598 while (pg) { 599 memset(pg->records, 0, PROFILE_RECORDS_SIZE); 600 pg->index = 0; 601 pg = pg->next; 602 } 603 604 memset(stat->hash, 0, 605 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); 606 } 607 608 static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) 609 { 610 struct ftrace_profile_page *pg; 611 int functions; 612 int pages; 613 int i; 614 615 /* If we already allocated, do nothing */ 616 if (stat->pages) 617 return 0; 618 619 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); 620 if (!stat->pages) 621 return -ENOMEM; 622 623 #ifdef CONFIG_DYNAMIC_FTRACE 624 functions = ftrace_update_tot_cnt; 625 #else 626 /* 627 * We do not know the number of functions that exist because 628 * dynamic tracing is what counts them. With past experience 629 * we have around 20K functions. That should be more than enough. 630 * It is highly unlikely we will execute every function in 631 * the kernel. 632 */ 633 functions = 20000; 634 #endif 635 636 pg = stat->start = stat->pages; 637 638 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); 639 640 for (i = 1; i < pages; i++) { 641 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 642 if (!pg->next) 643 goto out_free; 644 pg = pg->next; 645 } 646 647 return 0; 648 649 out_free: 650 pg = stat->start; 651 while (pg) { 652 unsigned long tmp = (unsigned long)pg; 653 654 pg = pg->next; 655 free_page(tmp); 656 } 657 658 stat->pages = NULL; 659 stat->start = NULL; 660 661 return -ENOMEM; 662 } 663 664 static int ftrace_profile_init_cpu(int cpu) 665 { 666 struct ftrace_profile_stat *stat; 667 int size; 668 669 stat = &per_cpu(ftrace_profile_stats, cpu); 670 671 if (stat->hash) { 672 /* If the profile is already created, simply reset it */ 673 ftrace_profile_reset(stat); 674 return 0; 675 } 676 677 /* 678 * We are profiling all functions, but usually only a few thousand 679 * functions are hit. We'll make a hash of 1024 items. 680 */ 681 size = FTRACE_PROFILE_HASH_SIZE; 682 683 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); 684 685 if (!stat->hash) 686 return -ENOMEM; 687 688 /* Preallocate the function profiling pages */ 689 if (ftrace_profile_pages_init(stat) < 0) { 690 kfree(stat->hash); 691 stat->hash = NULL; 692 return -ENOMEM; 693 } 694 695 return 0; 696 } 697 698 static int ftrace_profile_init(void) 699 { 700 int cpu; 701 int ret = 0; 702 703 for_each_possible_cpu(cpu) { 704 ret = ftrace_profile_init_cpu(cpu); 705 if (ret) 706 break; 707 } 708 709 return ret; 710 } 711 712 /* interrupts must be disabled */ 713 static struct ftrace_profile * 714 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) 715 { 716 struct ftrace_profile *rec; 717 struct hlist_head *hhd; 718 unsigned long key; 719 720 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); 721 hhd = &stat->hash[key]; 722 723 if (hlist_empty(hhd)) 724 return NULL; 725 726 hlist_for_each_entry_rcu_notrace(rec, hhd, node) { 727 if (rec->ip == ip) 728 return rec; 729 } 730 731 return NULL; 732 } 733 734 static void ftrace_add_profile(struct ftrace_profile_stat *stat, 735 struct ftrace_profile *rec) 736 { 737 unsigned long key; 738 739 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); 740 hlist_add_head_rcu(&rec->node, &stat->hash[key]); 741 } 742 743 /* 744 * The memory is already allocated, this simply finds a new record to use. 745 */ 746 static struct ftrace_profile * 747 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) 748 { 749 struct ftrace_profile *rec = NULL; 750 751 /* prevent recursion (from NMIs) */ 752 if (atomic_inc_return(&stat->disabled) != 1) 753 goto out; 754 755 /* 756 * Try to find the function again since an NMI 757 * could have added it 758 */ 759 rec = ftrace_find_profiled_func(stat, ip); 760 if (rec) 761 goto out; 762 763 if (stat->pages->index == PROFILES_PER_PAGE) { 764 if (!stat->pages->next) 765 goto out; 766 stat->pages = stat->pages->next; 767 } 768 769 rec = &stat->pages->records[stat->pages->index++]; 770 rec->ip = ip; 771 ftrace_add_profile(stat, rec); 772 773 out: 774 atomic_dec(&stat->disabled); 775 776 return rec; 777 } 778 779 static void 780 function_profile_call(unsigned long ip, unsigned long parent_ip, 781 struct ftrace_ops *ops, struct ftrace_regs *fregs) 782 { 783 struct ftrace_profile_stat *stat; 784 struct ftrace_profile *rec; 785 unsigned long flags; 786 787 if (!ftrace_profile_enabled) 788 return; 789 790 local_irq_save(flags); 791 792 stat = this_cpu_ptr(&ftrace_profile_stats); 793 if (!stat->hash || !ftrace_profile_enabled) 794 goto out; 795 796 rec = ftrace_find_profiled_func(stat, ip); 797 if (!rec) { 798 rec = ftrace_profile_alloc(stat, ip); 799 if (!rec) 800 goto out; 801 } 802 803 rec->counter++; 804 out: 805 local_irq_restore(flags); 806 } 807 808 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 809 static bool fgraph_graph_time = true; 810 811 void ftrace_graph_graph_time_control(bool enable) 812 { 813 fgraph_graph_time = enable; 814 } 815 816 static int profile_graph_entry(struct ftrace_graph_ent *trace) 817 { 818 struct ftrace_ret_stack *ret_stack; 819 820 function_profile_call(trace->func, 0, NULL, NULL); 821 822 /* If function graph is shutting down, ret_stack can be NULL */ 823 if (!current->ret_stack) 824 return 0; 825 826 ret_stack = ftrace_graph_get_ret_stack(current, 0); 827 if (ret_stack) 828 ret_stack->subtime = 0; 829 830 return 1; 831 } 832 833 static void profile_graph_return(struct ftrace_graph_ret *trace) 834 { 835 struct ftrace_ret_stack *ret_stack; 836 struct ftrace_profile_stat *stat; 837 unsigned long long calltime; 838 struct ftrace_profile *rec; 839 unsigned long flags; 840 841 local_irq_save(flags); 842 stat = this_cpu_ptr(&ftrace_profile_stats); 843 if (!stat->hash || !ftrace_profile_enabled) 844 goto out; 845 846 /* If the calltime was zero'd ignore it */ 847 if (!trace->calltime) 848 goto out; 849 850 calltime = trace->rettime - trace->calltime; 851 852 if (!fgraph_graph_time) { 853 854 /* Append this call time to the parent time to subtract */ 855 ret_stack = ftrace_graph_get_ret_stack(current, 1); 856 if (ret_stack) 857 ret_stack->subtime += calltime; 858 859 ret_stack = ftrace_graph_get_ret_stack(current, 0); 860 if (ret_stack && ret_stack->subtime < calltime) 861 calltime -= ret_stack->subtime; 862 else 863 calltime = 0; 864 } 865 866 rec = ftrace_find_profiled_func(stat, trace->func); 867 if (rec) { 868 rec->time += calltime; 869 rec->time_squared += calltime * calltime; 870 } 871 872 out: 873 local_irq_restore(flags); 874 } 875 876 static struct fgraph_ops fprofiler_ops = { 877 .entryfunc = &profile_graph_entry, 878 .retfunc = &profile_graph_return, 879 }; 880 881 static int register_ftrace_profiler(void) 882 { 883 return register_ftrace_graph(&fprofiler_ops); 884 } 885 886 static void unregister_ftrace_profiler(void) 887 { 888 unregister_ftrace_graph(&fprofiler_ops); 889 } 890 #else 891 static struct ftrace_ops ftrace_profile_ops __read_mostly = { 892 .func = function_profile_call, 893 .flags = FTRACE_OPS_FL_INITIALIZED, 894 INIT_OPS_HASH(ftrace_profile_ops) 895 }; 896 897 static int register_ftrace_profiler(void) 898 { 899 return register_ftrace_function(&ftrace_profile_ops); 900 } 901 902 static void unregister_ftrace_profiler(void) 903 { 904 unregister_ftrace_function(&ftrace_profile_ops); 905 } 906 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 907 908 static ssize_t 909 ftrace_profile_write(struct file *filp, const char __user *ubuf, 910 size_t cnt, loff_t *ppos) 911 { 912 unsigned long val; 913 int ret; 914 915 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 916 if (ret) 917 return ret; 918 919 val = !!val; 920 921 mutex_lock(&ftrace_profile_lock); 922 if (ftrace_profile_enabled ^ val) { 923 if (val) { 924 ret = ftrace_profile_init(); 925 if (ret < 0) { 926 cnt = ret; 927 goto out; 928 } 929 930 ret = register_ftrace_profiler(); 931 if (ret < 0) { 932 cnt = ret; 933 goto out; 934 } 935 ftrace_profile_enabled = 1; 936 } else { 937 ftrace_profile_enabled = 0; 938 /* 939 * unregister_ftrace_profiler calls stop_machine 940 * so this acts like an synchronize_rcu. 941 */ 942 unregister_ftrace_profiler(); 943 } 944 } 945 out: 946 mutex_unlock(&ftrace_profile_lock); 947 948 *ppos += cnt; 949 950 return cnt; 951 } 952 953 static ssize_t 954 ftrace_profile_read(struct file *filp, char __user *ubuf, 955 size_t cnt, loff_t *ppos) 956 { 957 char buf[64]; /* big enough to hold a number */ 958 int r; 959 960 r = sprintf(buf, "%u\n", ftrace_profile_enabled); 961 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 962 } 963 964 static const struct file_operations ftrace_profile_fops = { 965 .open = tracing_open_generic, 966 .read = ftrace_profile_read, 967 .write = ftrace_profile_write, 968 .llseek = default_llseek, 969 }; 970 971 /* used to initialize the real stat files */ 972 static struct tracer_stat function_stats __initdata = { 973 .name = "functions", 974 .stat_start = function_stat_start, 975 .stat_next = function_stat_next, 976 .stat_cmp = function_stat_cmp, 977 .stat_headers = function_stat_headers, 978 .stat_show = function_stat_show 979 }; 980 981 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 982 { 983 struct ftrace_profile_stat *stat; 984 char *name; 985 int ret; 986 int cpu; 987 988 for_each_possible_cpu(cpu) { 989 stat = &per_cpu(ftrace_profile_stats, cpu); 990 991 name = kasprintf(GFP_KERNEL, "function%d", cpu); 992 if (!name) { 993 /* 994 * The files created are permanent, if something happens 995 * we still do not free memory. 996 */ 997 WARN(1, 998 "Could not allocate stat file for cpu %d\n", 999 cpu); 1000 return; 1001 } 1002 stat->stat = function_stats; 1003 stat->stat.name = name; 1004 ret = register_stat_tracer(&stat->stat); 1005 if (ret) { 1006 WARN(1, 1007 "Could not register function stat for cpu %d\n", 1008 cpu); 1009 kfree(name); 1010 return; 1011 } 1012 } 1013 1014 trace_create_file("function_profile_enabled", 1015 TRACE_MODE_WRITE, d_tracer, NULL, 1016 &ftrace_profile_fops); 1017 } 1018 1019 #else /* CONFIG_FUNCTION_PROFILER */ 1020 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 1021 { 1022 } 1023 #endif /* CONFIG_FUNCTION_PROFILER */ 1024 1025 #ifdef CONFIG_DYNAMIC_FTRACE 1026 1027 static struct ftrace_ops *removed_ops; 1028 1029 /* 1030 * Set when doing a global update, like enabling all recs or disabling them. 1031 * It is not set when just updating a single ftrace_ops. 1032 */ 1033 static bool update_all_ops; 1034 1035 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 1036 # error Dynamic ftrace depends on MCOUNT_RECORD 1037 #endif 1038 1039 struct ftrace_func_probe { 1040 struct ftrace_probe_ops *probe_ops; 1041 struct ftrace_ops ops; 1042 struct trace_array *tr; 1043 struct list_head list; 1044 void *data; 1045 int ref; 1046 }; 1047 1048 /* 1049 * We make these constant because no one should touch them, 1050 * but they are used as the default "empty hash", to avoid allocating 1051 * it all the time. These are in a read only section such that if 1052 * anyone does try to modify it, it will cause an exception. 1053 */ 1054 static const struct hlist_head empty_buckets[1]; 1055 static const struct ftrace_hash empty_hash = { 1056 .buckets = (struct hlist_head *)empty_buckets, 1057 }; 1058 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1059 1060 struct ftrace_ops global_ops = { 1061 .func = ftrace_stub, 1062 .local_hash.notrace_hash = EMPTY_HASH, 1063 .local_hash.filter_hash = EMPTY_HASH, 1064 INIT_OPS_HASH(global_ops) 1065 .flags = FTRACE_OPS_FL_INITIALIZED | 1066 FTRACE_OPS_FL_PID, 1067 }; 1068 1069 /* 1070 * Used by the stack unwinder to know about dynamic ftrace trampolines. 1071 */ 1072 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) 1073 { 1074 struct ftrace_ops *op = NULL; 1075 1076 /* 1077 * Some of the ops may be dynamically allocated, 1078 * they are freed after a synchronize_rcu(). 1079 */ 1080 preempt_disable_notrace(); 1081 1082 do_for_each_ftrace_op(op, ftrace_ops_list) { 1083 /* 1084 * This is to check for dynamically allocated trampolines. 1085 * Trampolines that are in kernel text will have 1086 * core_kernel_text() return true. 1087 */ 1088 if (op->trampoline && op->trampoline_size) 1089 if (addr >= op->trampoline && 1090 addr < op->trampoline + op->trampoline_size) { 1091 preempt_enable_notrace(); 1092 return op; 1093 } 1094 } while_for_each_ftrace_op(op); 1095 preempt_enable_notrace(); 1096 1097 return NULL; 1098 } 1099 1100 /* 1101 * This is used by __kernel_text_address() to return true if the 1102 * address is on a dynamically allocated trampoline that would 1103 * not return true for either core_kernel_text() or 1104 * is_module_text_address(). 1105 */ 1106 bool is_ftrace_trampoline(unsigned long addr) 1107 { 1108 return ftrace_ops_trampoline(addr) != NULL; 1109 } 1110 1111 struct ftrace_page { 1112 struct ftrace_page *next; 1113 struct dyn_ftrace *records; 1114 int index; 1115 int order; 1116 }; 1117 1118 #define ENTRY_SIZE sizeof(struct dyn_ftrace) 1119 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1120 1121 static struct ftrace_page *ftrace_pages_start; 1122 static struct ftrace_page *ftrace_pages; 1123 1124 static __always_inline unsigned long 1125 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) 1126 { 1127 if (hash->size_bits > 0) 1128 return hash_long(ip, hash->size_bits); 1129 1130 return 0; 1131 } 1132 1133 /* Only use this function if ftrace_hash_empty() has already been tested */ 1134 static __always_inline struct ftrace_func_entry * 1135 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1136 { 1137 unsigned long key; 1138 struct ftrace_func_entry *entry; 1139 struct hlist_head *hhd; 1140 1141 key = ftrace_hash_key(hash, ip); 1142 hhd = &hash->buckets[key]; 1143 1144 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { 1145 if (entry->ip == ip) 1146 return entry; 1147 } 1148 return NULL; 1149 } 1150 1151 /** 1152 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash 1153 * @hash: The hash to look at 1154 * @ip: The instruction pointer to test 1155 * 1156 * Search a given @hash to see if a given instruction pointer (@ip) 1157 * exists in it. 1158 * 1159 * Returns the entry that holds the @ip if found. NULL otherwise. 1160 */ 1161 struct ftrace_func_entry * 1162 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1163 { 1164 if (ftrace_hash_empty(hash)) 1165 return NULL; 1166 1167 return __ftrace_lookup_ip(hash, ip); 1168 } 1169 1170 static void __add_hash_entry(struct ftrace_hash *hash, 1171 struct ftrace_func_entry *entry) 1172 { 1173 struct hlist_head *hhd; 1174 unsigned long key; 1175 1176 key = ftrace_hash_key(hash, entry->ip); 1177 hhd = &hash->buckets[key]; 1178 hlist_add_head(&entry->hlist, hhd); 1179 hash->count++; 1180 } 1181 1182 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) 1183 { 1184 struct ftrace_func_entry *entry; 1185 1186 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1187 if (!entry) 1188 return -ENOMEM; 1189 1190 entry->ip = ip; 1191 __add_hash_entry(hash, entry); 1192 1193 return 0; 1194 } 1195 1196 static void 1197 free_hash_entry(struct ftrace_hash *hash, 1198 struct ftrace_func_entry *entry) 1199 { 1200 hlist_del(&entry->hlist); 1201 kfree(entry); 1202 hash->count--; 1203 } 1204 1205 static void 1206 remove_hash_entry(struct ftrace_hash *hash, 1207 struct ftrace_func_entry *entry) 1208 { 1209 hlist_del_rcu(&entry->hlist); 1210 hash->count--; 1211 } 1212 1213 static void ftrace_hash_clear(struct ftrace_hash *hash) 1214 { 1215 struct hlist_head *hhd; 1216 struct hlist_node *tn; 1217 struct ftrace_func_entry *entry; 1218 int size = 1 << hash->size_bits; 1219 int i; 1220 1221 if (!hash->count) 1222 return; 1223 1224 for (i = 0; i < size; i++) { 1225 hhd = &hash->buckets[i]; 1226 hlist_for_each_entry_safe(entry, tn, hhd, hlist) 1227 free_hash_entry(hash, entry); 1228 } 1229 FTRACE_WARN_ON(hash->count); 1230 } 1231 1232 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) 1233 { 1234 list_del(&ftrace_mod->list); 1235 kfree(ftrace_mod->module); 1236 kfree(ftrace_mod->func); 1237 kfree(ftrace_mod); 1238 } 1239 1240 static void clear_ftrace_mod_list(struct list_head *head) 1241 { 1242 struct ftrace_mod_load *p, *n; 1243 1244 /* stack tracer isn't supported yet */ 1245 if (!head) 1246 return; 1247 1248 mutex_lock(&ftrace_lock); 1249 list_for_each_entry_safe(p, n, head, list) 1250 free_ftrace_mod(p); 1251 mutex_unlock(&ftrace_lock); 1252 } 1253 1254 static void free_ftrace_hash(struct ftrace_hash *hash) 1255 { 1256 if (!hash || hash == EMPTY_HASH) 1257 return; 1258 ftrace_hash_clear(hash); 1259 kfree(hash->buckets); 1260 kfree(hash); 1261 } 1262 1263 static void __free_ftrace_hash_rcu(struct rcu_head *rcu) 1264 { 1265 struct ftrace_hash *hash; 1266 1267 hash = container_of(rcu, struct ftrace_hash, rcu); 1268 free_ftrace_hash(hash); 1269 } 1270 1271 static void free_ftrace_hash_rcu(struct ftrace_hash *hash) 1272 { 1273 if (!hash || hash == EMPTY_HASH) 1274 return; 1275 call_rcu(&hash->rcu, __free_ftrace_hash_rcu); 1276 } 1277 1278 /** 1279 * ftrace_free_filter - remove all filters for an ftrace_ops 1280 * @ops - the ops to remove the filters from 1281 */ 1282 void ftrace_free_filter(struct ftrace_ops *ops) 1283 { 1284 ftrace_ops_init(ops); 1285 free_ftrace_hash(ops->func_hash->filter_hash); 1286 free_ftrace_hash(ops->func_hash->notrace_hash); 1287 } 1288 EXPORT_SYMBOL_GPL(ftrace_free_filter); 1289 1290 static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1291 { 1292 struct ftrace_hash *hash; 1293 int size; 1294 1295 hash = kzalloc(sizeof(*hash), GFP_KERNEL); 1296 if (!hash) 1297 return NULL; 1298 1299 size = 1 << size_bits; 1300 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); 1301 1302 if (!hash->buckets) { 1303 kfree(hash); 1304 return NULL; 1305 } 1306 1307 hash->size_bits = size_bits; 1308 1309 return hash; 1310 } 1311 1312 1313 static int ftrace_add_mod(struct trace_array *tr, 1314 const char *func, const char *module, 1315 int enable) 1316 { 1317 struct ftrace_mod_load *ftrace_mod; 1318 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; 1319 1320 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL); 1321 if (!ftrace_mod) 1322 return -ENOMEM; 1323 1324 INIT_LIST_HEAD(&ftrace_mod->list); 1325 ftrace_mod->func = kstrdup(func, GFP_KERNEL); 1326 ftrace_mod->module = kstrdup(module, GFP_KERNEL); 1327 ftrace_mod->enable = enable; 1328 1329 if (!ftrace_mod->func || !ftrace_mod->module) 1330 goto out_free; 1331 1332 list_add(&ftrace_mod->list, mod_head); 1333 1334 return 0; 1335 1336 out_free: 1337 free_ftrace_mod(ftrace_mod); 1338 1339 return -ENOMEM; 1340 } 1341 1342 static struct ftrace_hash * 1343 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) 1344 { 1345 struct ftrace_func_entry *entry; 1346 struct ftrace_hash *new_hash; 1347 int size; 1348 int ret; 1349 int i; 1350 1351 new_hash = alloc_ftrace_hash(size_bits); 1352 if (!new_hash) 1353 return NULL; 1354 1355 if (hash) 1356 new_hash->flags = hash->flags; 1357 1358 /* Empty hash? */ 1359 if (ftrace_hash_empty(hash)) 1360 return new_hash; 1361 1362 size = 1 << hash->size_bits; 1363 for (i = 0; i < size; i++) { 1364 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 1365 ret = add_hash_entry(new_hash, entry->ip); 1366 if (ret < 0) 1367 goto free_hash; 1368 } 1369 } 1370 1371 FTRACE_WARN_ON(new_hash->count != hash->count); 1372 1373 return new_hash; 1374 1375 free_hash: 1376 free_ftrace_hash(new_hash); 1377 return NULL; 1378 } 1379 1380 static void 1381 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); 1382 static void 1383 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); 1384 1385 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1386 struct ftrace_hash *new_hash); 1387 1388 static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size) 1389 { 1390 struct ftrace_func_entry *entry; 1391 struct ftrace_hash *new_hash; 1392 struct hlist_head *hhd; 1393 struct hlist_node *tn; 1394 int bits = 0; 1395 int i; 1396 1397 /* 1398 * Use around half the size (max bit of it), but 1399 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits). 1400 */ 1401 bits = fls(size / 2); 1402 1403 /* Don't allocate too much */ 1404 if (bits > FTRACE_HASH_MAX_BITS) 1405 bits = FTRACE_HASH_MAX_BITS; 1406 1407 new_hash = alloc_ftrace_hash(bits); 1408 if (!new_hash) 1409 return NULL; 1410 1411 new_hash->flags = src->flags; 1412 1413 size = 1 << src->size_bits; 1414 for (i = 0; i < size; i++) { 1415 hhd = &src->buckets[i]; 1416 hlist_for_each_entry_safe(entry, tn, hhd, hlist) { 1417 remove_hash_entry(src, entry); 1418 __add_hash_entry(new_hash, entry); 1419 } 1420 } 1421 return new_hash; 1422 } 1423 1424 static struct ftrace_hash * 1425 __ftrace_hash_move(struct ftrace_hash *src) 1426 { 1427 int size = src->count; 1428 1429 /* 1430 * If the new source is empty, just return the empty_hash. 1431 */ 1432 if (ftrace_hash_empty(src)) 1433 return EMPTY_HASH; 1434 1435 return dup_hash(src, size); 1436 } 1437 1438 static int 1439 ftrace_hash_move(struct ftrace_ops *ops, int enable, 1440 struct ftrace_hash **dst, struct ftrace_hash *src) 1441 { 1442 struct ftrace_hash *new_hash; 1443 int ret; 1444 1445 /* Reject setting notrace hash on IPMODIFY ftrace_ops */ 1446 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) 1447 return -EINVAL; 1448 1449 new_hash = __ftrace_hash_move(src); 1450 if (!new_hash) 1451 return -ENOMEM; 1452 1453 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ 1454 if (enable) { 1455 /* IPMODIFY should be updated only when filter_hash updating */ 1456 ret = ftrace_hash_ipmodify_update(ops, new_hash); 1457 if (ret < 0) { 1458 free_ftrace_hash(new_hash); 1459 return ret; 1460 } 1461 } 1462 1463 /* 1464 * Remove the current set, update the hash and add 1465 * them back. 1466 */ 1467 ftrace_hash_rec_disable_modify(ops, enable); 1468 1469 rcu_assign_pointer(*dst, new_hash); 1470 1471 ftrace_hash_rec_enable_modify(ops, enable); 1472 1473 return 0; 1474 } 1475 1476 static bool hash_contains_ip(unsigned long ip, 1477 struct ftrace_ops_hash *hash) 1478 { 1479 /* 1480 * The function record is a match if it exists in the filter 1481 * hash and not in the notrace hash. Note, an empty hash is 1482 * considered a match for the filter hash, but an empty 1483 * notrace hash is considered not in the notrace hash. 1484 */ 1485 return (ftrace_hash_empty(hash->filter_hash) || 1486 __ftrace_lookup_ip(hash->filter_hash, ip)) && 1487 (ftrace_hash_empty(hash->notrace_hash) || 1488 !__ftrace_lookup_ip(hash->notrace_hash, ip)); 1489 } 1490 1491 /* 1492 * Test the hashes for this ops to see if we want to call 1493 * the ops->func or not. 1494 * 1495 * It's a match if the ip is in the ops->filter_hash or 1496 * the filter_hash does not exist or is empty, 1497 * AND 1498 * the ip is not in the ops->notrace_hash. 1499 * 1500 * This needs to be called with preemption disabled as 1501 * the hashes are freed with call_rcu(). 1502 */ 1503 int 1504 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1505 { 1506 struct ftrace_ops_hash hash; 1507 int ret; 1508 1509 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 1510 /* 1511 * There's a small race when adding ops that the ftrace handler 1512 * that wants regs, may be called without them. We can not 1513 * allow that handler to be called if regs is NULL. 1514 */ 1515 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) 1516 return 0; 1517 #endif 1518 1519 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); 1520 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); 1521 1522 if (hash_contains_ip(ip, &hash)) 1523 ret = 1; 1524 else 1525 ret = 0; 1526 1527 return ret; 1528 } 1529 1530 /* 1531 * This is a double for. Do not use 'break' to break out of the loop, 1532 * you must use a goto. 1533 */ 1534 #define do_for_each_ftrace_rec(pg, rec) \ 1535 for (pg = ftrace_pages_start; pg; pg = pg->next) { \ 1536 int _____i; \ 1537 for (_____i = 0; _____i < pg->index; _____i++) { \ 1538 rec = &pg->records[_____i]; 1539 1540 #define while_for_each_ftrace_rec() \ 1541 } \ 1542 } 1543 1544 1545 static int ftrace_cmp_recs(const void *a, const void *b) 1546 { 1547 const struct dyn_ftrace *key = a; 1548 const struct dyn_ftrace *rec = b; 1549 1550 if (key->flags < rec->ip) 1551 return -1; 1552 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) 1553 return 1; 1554 return 0; 1555 } 1556 1557 static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) 1558 { 1559 struct ftrace_page *pg; 1560 struct dyn_ftrace *rec = NULL; 1561 struct dyn_ftrace key; 1562 1563 key.ip = start; 1564 key.flags = end; /* overload flags, as it is unsigned long */ 1565 1566 for (pg = ftrace_pages_start; pg; pg = pg->next) { 1567 if (end < pg->records[0].ip || 1568 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 1569 continue; 1570 rec = bsearch(&key, pg->records, pg->index, 1571 sizeof(struct dyn_ftrace), 1572 ftrace_cmp_recs); 1573 if (rec) 1574 break; 1575 } 1576 return rec; 1577 } 1578 1579 /** 1580 * ftrace_location_range - return the first address of a traced location 1581 * if it touches the given ip range 1582 * @start: start of range to search. 1583 * @end: end of range to search (inclusive). @end points to the last byte 1584 * to check. 1585 * 1586 * Returns rec->ip if the related ftrace location is a least partly within 1587 * the given address range. That is, the first address of the instruction 1588 * that is either a NOP or call to the function tracer. It checks the ftrace 1589 * internal tables to determine if the address belongs or not. 1590 */ 1591 unsigned long ftrace_location_range(unsigned long start, unsigned long end) 1592 { 1593 struct dyn_ftrace *rec; 1594 1595 rec = lookup_rec(start, end); 1596 if (rec) 1597 return rec->ip; 1598 1599 return 0; 1600 } 1601 1602 /** 1603 * ftrace_location - return the ftrace location 1604 * @ip: the instruction pointer to check 1605 * 1606 * If @ip matches the ftrace location, return @ip. 1607 * If @ip matches sym+0, return sym's ftrace location. 1608 * Otherwise, return 0. 1609 */ 1610 unsigned long ftrace_location(unsigned long ip) 1611 { 1612 struct dyn_ftrace *rec; 1613 unsigned long offset; 1614 unsigned long size; 1615 1616 rec = lookup_rec(ip, ip); 1617 if (!rec) { 1618 if (!kallsyms_lookup_size_offset(ip, &size, &offset)) 1619 goto out; 1620 1621 /* map sym+0 to __fentry__ */ 1622 if (!offset) 1623 rec = lookup_rec(ip, ip + size - 1); 1624 } 1625 1626 if (rec) 1627 return rec->ip; 1628 1629 out: 1630 return 0; 1631 } 1632 1633 /** 1634 * ftrace_text_reserved - return true if range contains an ftrace location 1635 * @start: start of range to search 1636 * @end: end of range to search (inclusive). @end points to the last byte to check. 1637 * 1638 * Returns 1 if @start and @end contains a ftrace location. 1639 * That is, the instruction that is either a NOP or call to 1640 * the function tracer. It checks the ftrace internal tables to 1641 * determine if the address belongs or not. 1642 */ 1643 int ftrace_text_reserved(const void *start, const void *end) 1644 { 1645 unsigned long ret; 1646 1647 ret = ftrace_location_range((unsigned long)start, 1648 (unsigned long)end); 1649 1650 return (int)!!ret; 1651 } 1652 1653 /* Test if ops registered to this rec needs regs */ 1654 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) 1655 { 1656 struct ftrace_ops *ops; 1657 bool keep_regs = false; 1658 1659 for (ops = ftrace_ops_list; 1660 ops != &ftrace_list_end; ops = ops->next) { 1661 /* pass rec in as regs to have non-NULL val */ 1662 if (ftrace_ops_test(ops, rec->ip, rec)) { 1663 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1664 keep_regs = true; 1665 break; 1666 } 1667 } 1668 } 1669 1670 return keep_regs; 1671 } 1672 1673 static struct ftrace_ops * 1674 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); 1675 static struct ftrace_ops * 1676 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude); 1677 static struct ftrace_ops * 1678 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); 1679 1680 static bool skip_record(struct dyn_ftrace *rec) 1681 { 1682 /* 1683 * At boot up, weak functions are set to disable. Function tracing 1684 * can be enabled before they are, and they still need to be disabled now. 1685 * If the record is disabled, still continue if it is marked as already 1686 * enabled (this is needed to keep the accounting working). 1687 */ 1688 return rec->flags & FTRACE_FL_DISABLED && 1689 !(rec->flags & FTRACE_FL_ENABLED); 1690 } 1691 1692 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, 1693 int filter_hash, 1694 bool inc) 1695 { 1696 struct ftrace_hash *hash; 1697 struct ftrace_hash *other_hash; 1698 struct ftrace_page *pg; 1699 struct dyn_ftrace *rec; 1700 bool update = false; 1701 int count = 0; 1702 int all = false; 1703 1704 /* Only update if the ops has been registered */ 1705 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1706 return false; 1707 1708 /* 1709 * In the filter_hash case: 1710 * If the count is zero, we update all records. 1711 * Otherwise we just update the items in the hash. 1712 * 1713 * In the notrace_hash case: 1714 * We enable the update in the hash. 1715 * As disabling notrace means enabling the tracing, 1716 * and enabling notrace means disabling, the inc variable 1717 * gets inversed. 1718 */ 1719 if (filter_hash) { 1720 hash = ops->func_hash->filter_hash; 1721 other_hash = ops->func_hash->notrace_hash; 1722 if (ftrace_hash_empty(hash)) 1723 all = true; 1724 } else { 1725 inc = !inc; 1726 hash = ops->func_hash->notrace_hash; 1727 other_hash = ops->func_hash->filter_hash; 1728 /* 1729 * If the notrace hash has no items, 1730 * then there's nothing to do. 1731 */ 1732 if (ftrace_hash_empty(hash)) 1733 return false; 1734 } 1735 1736 do_for_each_ftrace_rec(pg, rec) { 1737 int in_other_hash = 0; 1738 int in_hash = 0; 1739 int match = 0; 1740 1741 if (skip_record(rec)) 1742 continue; 1743 1744 if (all) { 1745 /* 1746 * Only the filter_hash affects all records. 1747 * Update if the record is not in the notrace hash. 1748 */ 1749 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) 1750 match = 1; 1751 } else { 1752 in_hash = !!ftrace_lookup_ip(hash, rec->ip); 1753 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); 1754 1755 /* 1756 * If filter_hash is set, we want to match all functions 1757 * that are in the hash but not in the other hash. 1758 * 1759 * If filter_hash is not set, then we are decrementing. 1760 * That means we match anything that is in the hash 1761 * and also in the other_hash. That is, we need to turn 1762 * off functions in the other hash because they are disabled 1763 * by this hash. 1764 */ 1765 if (filter_hash && in_hash && !in_other_hash) 1766 match = 1; 1767 else if (!filter_hash && in_hash && 1768 (in_other_hash || ftrace_hash_empty(other_hash))) 1769 match = 1; 1770 } 1771 if (!match) 1772 continue; 1773 1774 if (inc) { 1775 rec->flags++; 1776 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) 1777 return false; 1778 1779 if (ops->flags & FTRACE_OPS_FL_DIRECT) 1780 rec->flags |= FTRACE_FL_DIRECT; 1781 1782 /* 1783 * If there's only a single callback registered to a 1784 * function, and the ops has a trampoline registered 1785 * for it, then we can call it directly. 1786 */ 1787 if (ftrace_rec_count(rec) == 1 && ops->trampoline) 1788 rec->flags |= FTRACE_FL_TRAMP; 1789 else 1790 /* 1791 * If we are adding another function callback 1792 * to this function, and the previous had a 1793 * custom trampoline in use, then we need to go 1794 * back to the default trampoline. 1795 */ 1796 rec->flags &= ~FTRACE_FL_TRAMP; 1797 1798 /* 1799 * If any ops wants regs saved for this function 1800 * then all ops will get saved regs. 1801 */ 1802 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 1803 rec->flags |= FTRACE_FL_REGS; 1804 } else { 1805 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) 1806 return false; 1807 rec->flags--; 1808 1809 /* 1810 * Only the internal direct_ops should have the 1811 * DIRECT flag set. Thus, if it is removing a 1812 * function, then that function should no longer 1813 * be direct. 1814 */ 1815 if (ops->flags & FTRACE_OPS_FL_DIRECT) 1816 rec->flags &= ~FTRACE_FL_DIRECT; 1817 1818 /* 1819 * If the rec had REGS enabled and the ops that is 1820 * being removed had REGS set, then see if there is 1821 * still any ops for this record that wants regs. 1822 * If not, we can stop recording them. 1823 */ 1824 if (ftrace_rec_count(rec) > 0 && 1825 rec->flags & FTRACE_FL_REGS && 1826 ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1827 if (!test_rec_ops_needs_regs(rec)) 1828 rec->flags &= ~FTRACE_FL_REGS; 1829 } 1830 1831 /* 1832 * The TRAMP needs to be set only if rec count 1833 * is decremented to one, and the ops that is 1834 * left has a trampoline. As TRAMP can only be 1835 * enabled if there is only a single ops attached 1836 * to it. 1837 */ 1838 if (ftrace_rec_count(rec) == 1 && 1839 ftrace_find_tramp_ops_any_other(rec, ops)) 1840 rec->flags |= FTRACE_FL_TRAMP; 1841 else 1842 rec->flags &= ~FTRACE_FL_TRAMP; 1843 1844 /* 1845 * flags will be cleared in ftrace_check_record() 1846 * if rec count is zero. 1847 */ 1848 } 1849 1850 /* 1851 * If the rec has a single associated ops, and ops->func can be 1852 * called directly, allow the call site to call via the ops. 1853 */ 1854 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) && 1855 ftrace_rec_count(rec) == 1 && 1856 ftrace_ops_get_func(ops) == ops->func) 1857 rec->flags |= FTRACE_FL_CALL_OPS; 1858 else 1859 rec->flags &= ~FTRACE_FL_CALL_OPS; 1860 1861 count++; 1862 1863 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ 1864 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE; 1865 1866 /* Shortcut, if we handled all records, we are done. */ 1867 if (!all && count == hash->count) 1868 return update; 1869 } while_for_each_ftrace_rec(); 1870 1871 return update; 1872 } 1873 1874 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops, 1875 int filter_hash) 1876 { 1877 return __ftrace_hash_rec_update(ops, filter_hash, 0); 1878 } 1879 1880 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops, 1881 int filter_hash) 1882 { 1883 return __ftrace_hash_rec_update(ops, filter_hash, 1); 1884 } 1885 1886 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, 1887 int filter_hash, int inc) 1888 { 1889 struct ftrace_ops *op; 1890 1891 __ftrace_hash_rec_update(ops, filter_hash, inc); 1892 1893 if (ops->func_hash != &global_ops.local_hash) 1894 return; 1895 1896 /* 1897 * If the ops shares the global_ops hash, then we need to update 1898 * all ops that are enabled and use this hash. 1899 */ 1900 do_for_each_ftrace_op(op, ftrace_ops_list) { 1901 /* Already done */ 1902 if (op == ops) 1903 continue; 1904 if (op->func_hash == &global_ops.local_hash) 1905 __ftrace_hash_rec_update(op, filter_hash, inc); 1906 } while_for_each_ftrace_op(op); 1907 } 1908 1909 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, 1910 int filter_hash) 1911 { 1912 ftrace_hash_rec_update_modify(ops, filter_hash, 0); 1913 } 1914 1915 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, 1916 int filter_hash) 1917 { 1918 ftrace_hash_rec_update_modify(ops, filter_hash, 1); 1919 } 1920 1921 /* 1922 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK 1923 * or no-needed to update, -EBUSY if it detects a conflict of the flag 1924 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. 1925 * Note that old_hash and new_hash has below meanings 1926 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) 1927 * - If the hash is EMPTY_HASH, it hits nothing 1928 * - Anything else hits the recs which match the hash entries. 1929 * 1930 * DIRECT ops does not have IPMODIFY flag, but we still need to check it 1931 * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call 1932 * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with 1933 * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate 1934 * the return value to the caller and eventually to the owner of the DIRECT 1935 * ops. 1936 */ 1937 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, 1938 struct ftrace_hash *old_hash, 1939 struct ftrace_hash *new_hash) 1940 { 1941 struct ftrace_page *pg; 1942 struct dyn_ftrace *rec, *end = NULL; 1943 int in_old, in_new; 1944 bool is_ipmodify, is_direct; 1945 1946 /* Only update if the ops has been registered */ 1947 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1948 return 0; 1949 1950 is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY; 1951 is_direct = ops->flags & FTRACE_OPS_FL_DIRECT; 1952 1953 /* neither IPMODIFY nor DIRECT, skip */ 1954 if (!is_ipmodify && !is_direct) 1955 return 0; 1956 1957 if (WARN_ON_ONCE(is_ipmodify && is_direct)) 1958 return 0; 1959 1960 /* 1961 * Since the IPMODIFY and DIRECT are very address sensitive 1962 * actions, we do not allow ftrace_ops to set all functions to new 1963 * hash. 1964 */ 1965 if (!new_hash || !old_hash) 1966 return -EINVAL; 1967 1968 /* Update rec->flags */ 1969 do_for_each_ftrace_rec(pg, rec) { 1970 1971 if (rec->flags & FTRACE_FL_DISABLED) 1972 continue; 1973 1974 /* We need to update only differences of filter_hash */ 1975 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1976 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1977 if (in_old == in_new) 1978 continue; 1979 1980 if (in_new) { 1981 if (rec->flags & FTRACE_FL_IPMODIFY) { 1982 int ret; 1983 1984 /* Cannot have two ipmodify on same rec */ 1985 if (is_ipmodify) 1986 goto rollback; 1987 1988 FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT); 1989 1990 /* 1991 * Another ops with IPMODIFY is already 1992 * attached. We are now attaching a direct 1993 * ops. Run SHARE_IPMODIFY_SELF, to check 1994 * whether sharing is supported. 1995 */ 1996 if (!ops->ops_func) 1997 return -EBUSY; 1998 ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF); 1999 if (ret) 2000 return ret; 2001 } else if (is_ipmodify) { 2002 rec->flags |= FTRACE_FL_IPMODIFY; 2003 } 2004 } else if (is_ipmodify) { 2005 rec->flags &= ~FTRACE_FL_IPMODIFY; 2006 } 2007 } while_for_each_ftrace_rec(); 2008 2009 return 0; 2010 2011 rollback: 2012 end = rec; 2013 2014 /* Roll back what we did above */ 2015 do_for_each_ftrace_rec(pg, rec) { 2016 2017 if (rec->flags & FTRACE_FL_DISABLED) 2018 continue; 2019 2020 if (rec == end) 2021 goto err_out; 2022 2023 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 2024 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 2025 if (in_old == in_new) 2026 continue; 2027 2028 if (in_new) 2029 rec->flags &= ~FTRACE_FL_IPMODIFY; 2030 else 2031 rec->flags |= FTRACE_FL_IPMODIFY; 2032 } while_for_each_ftrace_rec(); 2033 2034 err_out: 2035 return -EBUSY; 2036 } 2037 2038 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) 2039 { 2040 struct ftrace_hash *hash = ops->func_hash->filter_hash; 2041 2042 if (ftrace_hash_empty(hash)) 2043 hash = NULL; 2044 2045 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); 2046 } 2047 2048 /* Disabling always succeeds */ 2049 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) 2050 { 2051 struct ftrace_hash *hash = ops->func_hash->filter_hash; 2052 2053 if (ftrace_hash_empty(hash)) 2054 hash = NULL; 2055 2056 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); 2057 } 2058 2059 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 2060 struct ftrace_hash *new_hash) 2061 { 2062 struct ftrace_hash *old_hash = ops->func_hash->filter_hash; 2063 2064 if (ftrace_hash_empty(old_hash)) 2065 old_hash = NULL; 2066 2067 if (ftrace_hash_empty(new_hash)) 2068 new_hash = NULL; 2069 2070 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); 2071 } 2072 2073 static void print_ip_ins(const char *fmt, const unsigned char *p) 2074 { 2075 char ins[MCOUNT_INSN_SIZE]; 2076 2077 if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) { 2078 printk(KERN_CONT "%s[FAULT] %px\n", fmt, p); 2079 return; 2080 } 2081 2082 printk(KERN_CONT "%s", fmt); 2083 pr_cont("%*phC", MCOUNT_INSN_SIZE, ins); 2084 } 2085 2086 enum ftrace_bug_type ftrace_bug_type; 2087 const void *ftrace_expected; 2088 2089 static void print_bug_type(void) 2090 { 2091 switch (ftrace_bug_type) { 2092 case FTRACE_BUG_UNKNOWN: 2093 break; 2094 case FTRACE_BUG_INIT: 2095 pr_info("Initializing ftrace call sites\n"); 2096 break; 2097 case FTRACE_BUG_NOP: 2098 pr_info("Setting ftrace call site to NOP\n"); 2099 break; 2100 case FTRACE_BUG_CALL: 2101 pr_info("Setting ftrace call site to call ftrace function\n"); 2102 break; 2103 case FTRACE_BUG_UPDATE: 2104 pr_info("Updating ftrace call site to call a different ftrace function\n"); 2105 break; 2106 } 2107 } 2108 2109 /** 2110 * ftrace_bug - report and shutdown function tracer 2111 * @failed: The failed type (EFAULT, EINVAL, EPERM) 2112 * @rec: The record that failed 2113 * 2114 * The arch code that enables or disables the function tracing 2115 * can call ftrace_bug() when it has detected a problem in 2116 * modifying the code. @failed should be one of either: 2117 * EFAULT - if the problem happens on reading the @ip address 2118 * EINVAL - if what is read at @ip is not what was expected 2119 * EPERM - if the problem happens on writing to the @ip address 2120 */ 2121 void ftrace_bug(int failed, struct dyn_ftrace *rec) 2122 { 2123 unsigned long ip = rec ? rec->ip : 0; 2124 2125 pr_info("------------[ ftrace bug ]------------\n"); 2126 2127 switch (failed) { 2128 case -EFAULT: 2129 pr_info("ftrace faulted on modifying "); 2130 print_ip_sym(KERN_INFO, ip); 2131 break; 2132 case -EINVAL: 2133 pr_info("ftrace failed to modify "); 2134 print_ip_sym(KERN_INFO, ip); 2135 print_ip_ins(" actual: ", (unsigned char *)ip); 2136 pr_cont("\n"); 2137 if (ftrace_expected) { 2138 print_ip_ins(" expected: ", ftrace_expected); 2139 pr_cont("\n"); 2140 } 2141 break; 2142 case -EPERM: 2143 pr_info("ftrace faulted on writing "); 2144 print_ip_sym(KERN_INFO, ip); 2145 break; 2146 default: 2147 pr_info("ftrace faulted on unknown error "); 2148 print_ip_sym(KERN_INFO, ip); 2149 } 2150 print_bug_type(); 2151 if (rec) { 2152 struct ftrace_ops *ops = NULL; 2153 2154 pr_info("ftrace record flags: %lx\n", rec->flags); 2155 pr_cont(" (%ld)%s%s", ftrace_rec_count(rec), 2156 rec->flags & FTRACE_FL_REGS ? " R" : " ", 2157 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " "); 2158 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2159 ops = ftrace_find_tramp_ops_any(rec); 2160 if (ops) { 2161 do { 2162 pr_cont("\ttramp: %pS (%pS)", 2163 (void *)ops->trampoline, 2164 (void *)ops->func); 2165 ops = ftrace_find_tramp_ops_next(rec, ops); 2166 } while (ops); 2167 } else 2168 pr_cont("\ttramp: ERROR!"); 2169 2170 } 2171 ip = ftrace_get_addr_curr(rec); 2172 pr_cont("\n expected tramp: %lx\n", ip); 2173 } 2174 2175 FTRACE_WARN_ON_ONCE(1); 2176 } 2177 2178 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) 2179 { 2180 unsigned long flag = 0UL; 2181 2182 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2183 2184 if (skip_record(rec)) 2185 return FTRACE_UPDATE_IGNORE; 2186 2187 /* 2188 * If we are updating calls: 2189 * 2190 * If the record has a ref count, then we need to enable it 2191 * because someone is using it. 2192 * 2193 * Otherwise we make sure its disabled. 2194 * 2195 * If we are disabling calls, then disable all records that 2196 * are enabled. 2197 */ 2198 if (enable && ftrace_rec_count(rec)) 2199 flag = FTRACE_FL_ENABLED; 2200 2201 /* 2202 * If enabling and the REGS flag does not match the REGS_EN, or 2203 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore 2204 * this record. Set flags to fail the compare against ENABLED. 2205 * Same for direct calls. 2206 */ 2207 if (flag) { 2208 if (!(rec->flags & FTRACE_FL_REGS) != 2209 !(rec->flags & FTRACE_FL_REGS_EN)) 2210 flag |= FTRACE_FL_REGS; 2211 2212 if (!(rec->flags & FTRACE_FL_TRAMP) != 2213 !(rec->flags & FTRACE_FL_TRAMP_EN)) 2214 flag |= FTRACE_FL_TRAMP; 2215 2216 /* 2217 * Direct calls are special, as count matters. 2218 * We must test the record for direct, if the 2219 * DIRECT and DIRECT_EN do not match, but only 2220 * if the count is 1. That's because, if the 2221 * count is something other than one, we do not 2222 * want the direct enabled (it will be done via the 2223 * direct helper). But if DIRECT_EN is set, and 2224 * the count is not one, we need to clear it. 2225 * 2226 */ 2227 if (ftrace_rec_count(rec) == 1) { 2228 if (!(rec->flags & FTRACE_FL_DIRECT) != 2229 !(rec->flags & FTRACE_FL_DIRECT_EN)) 2230 flag |= FTRACE_FL_DIRECT; 2231 } else if (rec->flags & FTRACE_FL_DIRECT_EN) { 2232 flag |= FTRACE_FL_DIRECT; 2233 } 2234 2235 /* 2236 * Ops calls are special, as count matters. 2237 * As with direct calls, they must only be enabled when count 2238 * is one, otherwise they'll be handled via the list ops. 2239 */ 2240 if (ftrace_rec_count(rec) == 1) { 2241 if (!(rec->flags & FTRACE_FL_CALL_OPS) != 2242 !(rec->flags & FTRACE_FL_CALL_OPS_EN)) 2243 flag |= FTRACE_FL_CALL_OPS; 2244 } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) { 2245 flag |= FTRACE_FL_CALL_OPS; 2246 } 2247 } 2248 2249 /* If the state of this record hasn't changed, then do nothing */ 2250 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 2251 return FTRACE_UPDATE_IGNORE; 2252 2253 if (flag) { 2254 /* Save off if rec is being enabled (for return value) */ 2255 flag ^= rec->flags & FTRACE_FL_ENABLED; 2256 2257 if (update) { 2258 rec->flags |= FTRACE_FL_ENABLED; 2259 if (flag & FTRACE_FL_REGS) { 2260 if (rec->flags & FTRACE_FL_REGS) 2261 rec->flags |= FTRACE_FL_REGS_EN; 2262 else 2263 rec->flags &= ~FTRACE_FL_REGS_EN; 2264 } 2265 if (flag & FTRACE_FL_TRAMP) { 2266 if (rec->flags & FTRACE_FL_TRAMP) 2267 rec->flags |= FTRACE_FL_TRAMP_EN; 2268 else 2269 rec->flags &= ~FTRACE_FL_TRAMP_EN; 2270 } 2271 2272 if (flag & FTRACE_FL_DIRECT) { 2273 /* 2274 * If there's only one user (direct_ops helper) 2275 * then we can call the direct function 2276 * directly (no ftrace trampoline). 2277 */ 2278 if (ftrace_rec_count(rec) == 1) { 2279 if (rec->flags & FTRACE_FL_DIRECT) 2280 rec->flags |= FTRACE_FL_DIRECT_EN; 2281 else 2282 rec->flags &= ~FTRACE_FL_DIRECT_EN; 2283 } else { 2284 /* 2285 * Can only call directly if there's 2286 * only one callback to the function. 2287 */ 2288 rec->flags &= ~FTRACE_FL_DIRECT_EN; 2289 } 2290 } 2291 2292 if (flag & FTRACE_FL_CALL_OPS) { 2293 if (ftrace_rec_count(rec) == 1) { 2294 if (rec->flags & FTRACE_FL_CALL_OPS) 2295 rec->flags |= FTRACE_FL_CALL_OPS_EN; 2296 else 2297 rec->flags &= ~FTRACE_FL_CALL_OPS_EN; 2298 } else { 2299 /* 2300 * Can only call directly if there's 2301 * only one set of associated ops. 2302 */ 2303 rec->flags &= ~FTRACE_FL_CALL_OPS_EN; 2304 } 2305 } 2306 } 2307 2308 /* 2309 * If this record is being updated from a nop, then 2310 * return UPDATE_MAKE_CALL. 2311 * Otherwise, 2312 * return UPDATE_MODIFY_CALL to tell the caller to convert 2313 * from the save regs, to a non-save regs function or 2314 * vice versa, or from a trampoline call. 2315 */ 2316 if (flag & FTRACE_FL_ENABLED) { 2317 ftrace_bug_type = FTRACE_BUG_CALL; 2318 return FTRACE_UPDATE_MAKE_CALL; 2319 } 2320 2321 ftrace_bug_type = FTRACE_BUG_UPDATE; 2322 return FTRACE_UPDATE_MODIFY_CALL; 2323 } 2324 2325 if (update) { 2326 /* If there's no more users, clear all flags */ 2327 if (!ftrace_rec_count(rec)) 2328 rec->flags &= FTRACE_FL_DISABLED; 2329 else 2330 /* 2331 * Just disable the record, but keep the ops TRAMP 2332 * and REGS states. The _EN flags must be disabled though. 2333 */ 2334 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | 2335 FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN | 2336 FTRACE_FL_CALL_OPS_EN); 2337 } 2338 2339 ftrace_bug_type = FTRACE_BUG_NOP; 2340 return FTRACE_UPDATE_MAKE_NOP; 2341 } 2342 2343 /** 2344 * ftrace_update_record - set a record that now is tracing or not 2345 * @rec: the record to update 2346 * @enable: set to true if the record is tracing, false to force disable 2347 * 2348 * The records that represent all functions that can be traced need 2349 * to be updated when tracing has been enabled. 2350 */ 2351 int ftrace_update_record(struct dyn_ftrace *rec, bool enable) 2352 { 2353 return ftrace_check_record(rec, enable, true); 2354 } 2355 2356 /** 2357 * ftrace_test_record - check if the record has been enabled or not 2358 * @rec: the record to test 2359 * @enable: set to true to check if enabled, false if it is disabled 2360 * 2361 * The arch code may need to test if a record is already set to 2362 * tracing to determine how to modify the function code that it 2363 * represents. 2364 */ 2365 int ftrace_test_record(struct dyn_ftrace *rec, bool enable) 2366 { 2367 return ftrace_check_record(rec, enable, false); 2368 } 2369 2370 static struct ftrace_ops * 2371 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) 2372 { 2373 struct ftrace_ops *op; 2374 unsigned long ip = rec->ip; 2375 2376 do_for_each_ftrace_op(op, ftrace_ops_list) { 2377 2378 if (!op->trampoline) 2379 continue; 2380 2381 if (hash_contains_ip(ip, op->func_hash)) 2382 return op; 2383 } while_for_each_ftrace_op(op); 2384 2385 return NULL; 2386 } 2387 2388 static struct ftrace_ops * 2389 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude) 2390 { 2391 struct ftrace_ops *op; 2392 unsigned long ip = rec->ip; 2393 2394 do_for_each_ftrace_op(op, ftrace_ops_list) { 2395 2396 if (op == op_exclude || !op->trampoline) 2397 continue; 2398 2399 if (hash_contains_ip(ip, op->func_hash)) 2400 return op; 2401 } while_for_each_ftrace_op(op); 2402 2403 return NULL; 2404 } 2405 2406 static struct ftrace_ops * 2407 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, 2408 struct ftrace_ops *op) 2409 { 2410 unsigned long ip = rec->ip; 2411 2412 while_for_each_ftrace_op(op) { 2413 2414 if (!op->trampoline) 2415 continue; 2416 2417 if (hash_contains_ip(ip, op->func_hash)) 2418 return op; 2419 } 2420 2421 return NULL; 2422 } 2423 2424 static struct ftrace_ops * 2425 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) 2426 { 2427 struct ftrace_ops *op; 2428 unsigned long ip = rec->ip; 2429 2430 /* 2431 * Need to check removed ops first. 2432 * If they are being removed, and this rec has a tramp, 2433 * and this rec is in the ops list, then it would be the 2434 * one with the tramp. 2435 */ 2436 if (removed_ops) { 2437 if (hash_contains_ip(ip, &removed_ops->old_hash)) 2438 return removed_ops; 2439 } 2440 2441 /* 2442 * Need to find the current trampoline for a rec. 2443 * Now, a trampoline is only attached to a rec if there 2444 * was a single 'ops' attached to it. But this can be called 2445 * when we are adding another op to the rec or removing the 2446 * current one. Thus, if the op is being added, we can 2447 * ignore it because it hasn't attached itself to the rec 2448 * yet. 2449 * 2450 * If an ops is being modified (hooking to different functions) 2451 * then we don't care about the new functions that are being 2452 * added, just the old ones (that are probably being removed). 2453 * 2454 * If we are adding an ops to a function that already is using 2455 * a trampoline, it needs to be removed (trampolines are only 2456 * for single ops connected), then an ops that is not being 2457 * modified also needs to be checked. 2458 */ 2459 do_for_each_ftrace_op(op, ftrace_ops_list) { 2460 2461 if (!op->trampoline) 2462 continue; 2463 2464 /* 2465 * If the ops is being added, it hasn't gotten to 2466 * the point to be removed from this tree yet. 2467 */ 2468 if (op->flags & FTRACE_OPS_FL_ADDING) 2469 continue; 2470 2471 2472 /* 2473 * If the ops is being modified and is in the old 2474 * hash, then it is probably being removed from this 2475 * function. 2476 */ 2477 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && 2478 hash_contains_ip(ip, &op->old_hash)) 2479 return op; 2480 /* 2481 * If the ops is not being added or modified, and it's 2482 * in its normal filter hash, then this must be the one 2483 * we want! 2484 */ 2485 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && 2486 hash_contains_ip(ip, op->func_hash)) 2487 return op; 2488 2489 } while_for_each_ftrace_op(op); 2490 2491 return NULL; 2492 } 2493 2494 static struct ftrace_ops * 2495 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) 2496 { 2497 struct ftrace_ops *op; 2498 unsigned long ip = rec->ip; 2499 2500 do_for_each_ftrace_op(op, ftrace_ops_list) { 2501 /* pass rec in as regs to have non-NULL val */ 2502 if (hash_contains_ip(ip, op->func_hash)) 2503 return op; 2504 } while_for_each_ftrace_op(op); 2505 2506 return NULL; 2507 } 2508 2509 struct ftrace_ops * 2510 ftrace_find_unique_ops(struct dyn_ftrace *rec) 2511 { 2512 struct ftrace_ops *op, *found = NULL; 2513 unsigned long ip = rec->ip; 2514 2515 do_for_each_ftrace_op(op, ftrace_ops_list) { 2516 2517 if (hash_contains_ip(ip, op->func_hash)) { 2518 if (found) 2519 return NULL; 2520 found = op; 2521 } 2522 2523 } while_for_each_ftrace_op(op); 2524 2525 return found; 2526 } 2527 2528 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 2529 /* Protected by rcu_tasks for reading, and direct_mutex for writing */ 2530 static struct ftrace_hash *direct_functions = EMPTY_HASH; 2531 static DEFINE_MUTEX(direct_mutex); 2532 int ftrace_direct_func_count; 2533 2534 /* 2535 * Search the direct_functions hash to see if the given instruction pointer 2536 * has a direct caller attached to it. 2537 */ 2538 unsigned long ftrace_find_rec_direct(unsigned long ip) 2539 { 2540 struct ftrace_func_entry *entry; 2541 2542 entry = __ftrace_lookup_ip(direct_functions, ip); 2543 if (!entry) 2544 return 0; 2545 2546 return entry->direct; 2547 } 2548 2549 static struct ftrace_func_entry* 2550 ftrace_add_rec_direct(unsigned long ip, unsigned long addr, 2551 struct ftrace_hash **free_hash) 2552 { 2553 struct ftrace_func_entry *entry; 2554 2555 if (ftrace_hash_empty(direct_functions) || 2556 direct_functions->count > 2 * (1 << direct_functions->size_bits)) { 2557 struct ftrace_hash *new_hash; 2558 int size = ftrace_hash_empty(direct_functions) ? 0 : 2559 direct_functions->count + 1; 2560 2561 if (size < 32) 2562 size = 32; 2563 2564 new_hash = dup_hash(direct_functions, size); 2565 if (!new_hash) 2566 return NULL; 2567 2568 *free_hash = direct_functions; 2569 direct_functions = new_hash; 2570 } 2571 2572 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 2573 if (!entry) 2574 return NULL; 2575 2576 entry->ip = ip; 2577 entry->direct = addr; 2578 __add_hash_entry(direct_functions, entry); 2579 return entry; 2580 } 2581 2582 static void call_direct_funcs(unsigned long ip, unsigned long pip, 2583 struct ftrace_ops *ops, struct ftrace_regs *fregs) 2584 { 2585 unsigned long addr; 2586 2587 addr = ftrace_find_rec_direct(ip); 2588 if (!addr) 2589 return; 2590 2591 arch_ftrace_set_direct_caller(fregs, addr); 2592 } 2593 2594 struct ftrace_ops direct_ops = { 2595 .func = call_direct_funcs, 2596 .flags = FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS 2597 | FTRACE_OPS_FL_PERMANENT, 2598 /* 2599 * By declaring the main trampoline as this trampoline 2600 * it will never have one allocated for it. Allocated 2601 * trampolines should not call direct functions. 2602 * The direct_ops should only be called by the builtin 2603 * ftrace_regs_caller trampoline. 2604 */ 2605 .trampoline = FTRACE_REGS_ADDR, 2606 }; 2607 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 2608 2609 /** 2610 * ftrace_get_addr_new - Get the call address to set to 2611 * @rec: The ftrace record descriptor 2612 * 2613 * If the record has the FTRACE_FL_REGS set, that means that it 2614 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS 2615 * is not set, then it wants to convert to the normal callback. 2616 * 2617 * Returns the address of the trampoline to set to 2618 */ 2619 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) 2620 { 2621 struct ftrace_ops *ops; 2622 unsigned long addr; 2623 2624 if ((rec->flags & FTRACE_FL_DIRECT) && 2625 (ftrace_rec_count(rec) == 1)) { 2626 addr = ftrace_find_rec_direct(rec->ip); 2627 if (addr) 2628 return addr; 2629 WARN_ON_ONCE(1); 2630 } 2631 2632 /* Trampolines take precedence over regs */ 2633 if (rec->flags & FTRACE_FL_TRAMP) { 2634 ops = ftrace_find_tramp_ops_new(rec); 2635 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { 2636 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", 2637 (void *)rec->ip, (void *)rec->ip, rec->flags); 2638 /* Ftrace is shutting down, return anything */ 2639 return (unsigned long)FTRACE_ADDR; 2640 } 2641 return ops->trampoline; 2642 } 2643 2644 if (rec->flags & FTRACE_FL_REGS) 2645 return (unsigned long)FTRACE_REGS_ADDR; 2646 else 2647 return (unsigned long)FTRACE_ADDR; 2648 } 2649 2650 /** 2651 * ftrace_get_addr_curr - Get the call address that is already there 2652 * @rec: The ftrace record descriptor 2653 * 2654 * The FTRACE_FL_REGS_EN is set when the record already points to 2655 * a function that saves all the regs. Basically the '_EN' version 2656 * represents the current state of the function. 2657 * 2658 * Returns the address of the trampoline that is currently being called 2659 */ 2660 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) 2661 { 2662 struct ftrace_ops *ops; 2663 unsigned long addr; 2664 2665 /* Direct calls take precedence over trampolines */ 2666 if (rec->flags & FTRACE_FL_DIRECT_EN) { 2667 addr = ftrace_find_rec_direct(rec->ip); 2668 if (addr) 2669 return addr; 2670 WARN_ON_ONCE(1); 2671 } 2672 2673 /* Trampolines take precedence over regs */ 2674 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2675 ops = ftrace_find_tramp_ops_curr(rec); 2676 if (FTRACE_WARN_ON(!ops)) { 2677 pr_warn("Bad trampoline accounting at: %p (%pS)\n", 2678 (void *)rec->ip, (void *)rec->ip); 2679 /* Ftrace is shutting down, return anything */ 2680 return (unsigned long)FTRACE_ADDR; 2681 } 2682 return ops->trampoline; 2683 } 2684 2685 if (rec->flags & FTRACE_FL_REGS_EN) 2686 return (unsigned long)FTRACE_REGS_ADDR; 2687 else 2688 return (unsigned long)FTRACE_ADDR; 2689 } 2690 2691 static int 2692 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable) 2693 { 2694 unsigned long ftrace_old_addr; 2695 unsigned long ftrace_addr; 2696 int ret; 2697 2698 ftrace_addr = ftrace_get_addr_new(rec); 2699 2700 /* This needs to be done before we call ftrace_update_record */ 2701 ftrace_old_addr = ftrace_get_addr_curr(rec); 2702 2703 ret = ftrace_update_record(rec, enable); 2704 2705 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2706 2707 switch (ret) { 2708 case FTRACE_UPDATE_IGNORE: 2709 return 0; 2710 2711 case FTRACE_UPDATE_MAKE_CALL: 2712 ftrace_bug_type = FTRACE_BUG_CALL; 2713 return ftrace_make_call(rec, ftrace_addr); 2714 2715 case FTRACE_UPDATE_MAKE_NOP: 2716 ftrace_bug_type = FTRACE_BUG_NOP; 2717 return ftrace_make_nop(NULL, rec, ftrace_old_addr); 2718 2719 case FTRACE_UPDATE_MODIFY_CALL: 2720 ftrace_bug_type = FTRACE_BUG_UPDATE; 2721 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 2722 } 2723 2724 return -1; /* unknown ftrace bug */ 2725 } 2726 2727 void __weak ftrace_replace_code(int mod_flags) 2728 { 2729 struct dyn_ftrace *rec; 2730 struct ftrace_page *pg; 2731 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; 2732 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; 2733 int failed; 2734 2735 if (unlikely(ftrace_disabled)) 2736 return; 2737 2738 do_for_each_ftrace_rec(pg, rec) { 2739 2740 if (skip_record(rec)) 2741 continue; 2742 2743 failed = __ftrace_replace_code(rec, enable); 2744 if (failed) { 2745 ftrace_bug(failed, rec); 2746 /* Stop processing */ 2747 return; 2748 } 2749 if (schedulable) 2750 cond_resched(); 2751 } while_for_each_ftrace_rec(); 2752 } 2753 2754 struct ftrace_rec_iter { 2755 struct ftrace_page *pg; 2756 int index; 2757 }; 2758 2759 /** 2760 * ftrace_rec_iter_start - start up iterating over traced functions 2761 * 2762 * Returns an iterator handle that is used to iterate over all 2763 * the records that represent address locations where functions 2764 * are traced. 2765 * 2766 * May return NULL if no records are available. 2767 */ 2768 struct ftrace_rec_iter *ftrace_rec_iter_start(void) 2769 { 2770 /* 2771 * We only use a single iterator. 2772 * Protected by the ftrace_lock mutex. 2773 */ 2774 static struct ftrace_rec_iter ftrace_rec_iter; 2775 struct ftrace_rec_iter *iter = &ftrace_rec_iter; 2776 2777 iter->pg = ftrace_pages_start; 2778 iter->index = 0; 2779 2780 /* Could have empty pages */ 2781 while (iter->pg && !iter->pg->index) 2782 iter->pg = iter->pg->next; 2783 2784 if (!iter->pg) 2785 return NULL; 2786 2787 return iter; 2788 } 2789 2790 /** 2791 * ftrace_rec_iter_next - get the next record to process. 2792 * @iter: The handle to the iterator. 2793 * 2794 * Returns the next iterator after the given iterator @iter. 2795 */ 2796 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) 2797 { 2798 iter->index++; 2799 2800 if (iter->index >= iter->pg->index) { 2801 iter->pg = iter->pg->next; 2802 iter->index = 0; 2803 2804 /* Could have empty pages */ 2805 while (iter->pg && !iter->pg->index) 2806 iter->pg = iter->pg->next; 2807 } 2808 2809 if (!iter->pg) 2810 return NULL; 2811 2812 return iter; 2813 } 2814 2815 /** 2816 * ftrace_rec_iter_record - get the record at the iterator location 2817 * @iter: The current iterator location 2818 * 2819 * Returns the record that the current @iter is at. 2820 */ 2821 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) 2822 { 2823 return &iter->pg->records[iter->index]; 2824 } 2825 2826 static int 2827 ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec) 2828 { 2829 int ret; 2830 2831 if (unlikely(ftrace_disabled)) 2832 return 0; 2833 2834 ret = ftrace_init_nop(mod, rec); 2835 if (ret) { 2836 ftrace_bug_type = FTRACE_BUG_INIT; 2837 ftrace_bug(ret, rec); 2838 return 0; 2839 } 2840 return 1; 2841 } 2842 2843 /* 2844 * archs can override this function if they must do something 2845 * before the modifying code is performed. 2846 */ 2847 void __weak ftrace_arch_code_modify_prepare(void) 2848 { 2849 } 2850 2851 /* 2852 * archs can override this function if they must do something 2853 * after the modifying code is performed. 2854 */ 2855 void __weak ftrace_arch_code_modify_post_process(void) 2856 { 2857 } 2858 2859 static int update_ftrace_func(ftrace_func_t func) 2860 { 2861 static ftrace_func_t save_func; 2862 2863 /* Avoid updating if it hasn't changed */ 2864 if (func == save_func) 2865 return 0; 2866 2867 save_func = func; 2868 2869 return ftrace_update_ftrace_func(func); 2870 } 2871 2872 void ftrace_modify_all_code(int command) 2873 { 2874 int update = command & FTRACE_UPDATE_TRACE_FUNC; 2875 int mod_flags = 0; 2876 int err = 0; 2877 2878 if (command & FTRACE_MAY_SLEEP) 2879 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; 2880 2881 /* 2882 * If the ftrace_caller calls a ftrace_ops func directly, 2883 * we need to make sure that it only traces functions it 2884 * expects to trace. When doing the switch of functions, 2885 * we need to update to the ftrace_ops_list_func first 2886 * before the transition between old and new calls are set, 2887 * as the ftrace_ops_list_func will check the ops hashes 2888 * to make sure the ops are having the right functions 2889 * traced. 2890 */ 2891 if (update) { 2892 err = update_ftrace_func(ftrace_ops_list_func); 2893 if (FTRACE_WARN_ON(err)) 2894 return; 2895 } 2896 2897 if (command & FTRACE_UPDATE_CALLS) 2898 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL); 2899 else if (command & FTRACE_DISABLE_CALLS) 2900 ftrace_replace_code(mod_flags); 2901 2902 if (update && ftrace_trace_function != ftrace_ops_list_func) { 2903 function_trace_op = set_function_trace_op; 2904 smp_wmb(); 2905 /* If irqs are disabled, we are in stop machine */ 2906 if (!irqs_disabled()) 2907 smp_call_function(ftrace_sync_ipi, NULL, 1); 2908 err = update_ftrace_func(ftrace_trace_function); 2909 if (FTRACE_WARN_ON(err)) 2910 return; 2911 } 2912 2913 if (command & FTRACE_START_FUNC_RET) 2914 err = ftrace_enable_ftrace_graph_caller(); 2915 else if (command & FTRACE_STOP_FUNC_RET) 2916 err = ftrace_disable_ftrace_graph_caller(); 2917 FTRACE_WARN_ON(err); 2918 } 2919 2920 static int __ftrace_modify_code(void *data) 2921 { 2922 int *command = data; 2923 2924 ftrace_modify_all_code(*command); 2925 2926 return 0; 2927 } 2928 2929 /** 2930 * ftrace_run_stop_machine - go back to the stop machine method 2931 * @command: The command to tell ftrace what to do 2932 * 2933 * If an arch needs to fall back to the stop machine method, the 2934 * it can call this function. 2935 */ 2936 void ftrace_run_stop_machine(int command) 2937 { 2938 stop_machine(__ftrace_modify_code, &command, NULL); 2939 } 2940 2941 /** 2942 * arch_ftrace_update_code - modify the code to trace or not trace 2943 * @command: The command that needs to be done 2944 * 2945 * Archs can override this function if it does not need to 2946 * run stop_machine() to modify code. 2947 */ 2948 void __weak arch_ftrace_update_code(int command) 2949 { 2950 ftrace_run_stop_machine(command); 2951 } 2952 2953 static void ftrace_run_update_code(int command) 2954 { 2955 ftrace_arch_code_modify_prepare(); 2956 2957 /* 2958 * By default we use stop_machine() to modify the code. 2959 * But archs can do what ever they want as long as it 2960 * is safe. The stop_machine() is the safest, but also 2961 * produces the most overhead. 2962 */ 2963 arch_ftrace_update_code(command); 2964 2965 ftrace_arch_code_modify_post_process(); 2966 } 2967 2968 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, 2969 struct ftrace_ops_hash *old_hash) 2970 { 2971 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2972 ops->old_hash.filter_hash = old_hash->filter_hash; 2973 ops->old_hash.notrace_hash = old_hash->notrace_hash; 2974 ftrace_run_update_code(command); 2975 ops->old_hash.filter_hash = NULL; 2976 ops->old_hash.notrace_hash = NULL; 2977 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2978 } 2979 2980 static ftrace_func_t saved_ftrace_func; 2981 static int ftrace_start_up; 2982 2983 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) 2984 { 2985 } 2986 2987 /* List of trace_ops that have allocated trampolines */ 2988 static LIST_HEAD(ftrace_ops_trampoline_list); 2989 2990 static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops) 2991 { 2992 lockdep_assert_held(&ftrace_lock); 2993 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list); 2994 } 2995 2996 static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) 2997 { 2998 lockdep_assert_held(&ftrace_lock); 2999 list_del_rcu(&ops->list); 3000 synchronize_rcu(); 3001 } 3002 3003 /* 3004 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols 3005 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is 3006 * not a module. 3007 */ 3008 #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace" 3009 #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline" 3010 3011 static void ftrace_trampoline_free(struct ftrace_ops *ops) 3012 { 3013 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && 3014 ops->trampoline) { 3015 /* 3016 * Record the text poke event before the ksymbol unregister 3017 * event. 3018 */ 3019 perf_event_text_poke((void *)ops->trampoline, 3020 (void *)ops->trampoline, 3021 ops->trampoline_size, NULL, 0); 3022 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, 3023 ops->trampoline, ops->trampoline_size, 3024 true, FTRACE_TRAMPOLINE_SYM); 3025 /* Remove from kallsyms after the perf events */ 3026 ftrace_remove_trampoline_from_kallsyms(ops); 3027 } 3028 3029 arch_ftrace_trampoline_free(ops); 3030 } 3031 3032 static void ftrace_startup_enable(int command) 3033 { 3034 if (saved_ftrace_func != ftrace_trace_function) { 3035 saved_ftrace_func = ftrace_trace_function; 3036 command |= FTRACE_UPDATE_TRACE_FUNC; 3037 } 3038 3039 if (!command || !ftrace_enabled) 3040 return; 3041 3042 ftrace_run_update_code(command); 3043 } 3044 3045 static void ftrace_startup_all(int command) 3046 { 3047 update_all_ops = true; 3048 ftrace_startup_enable(command); 3049 update_all_ops = false; 3050 } 3051 3052 int ftrace_startup(struct ftrace_ops *ops, int command) 3053 { 3054 int ret; 3055 3056 if (unlikely(ftrace_disabled)) 3057 return -ENODEV; 3058 3059 ret = __register_ftrace_function(ops); 3060 if (ret) 3061 return ret; 3062 3063 ftrace_start_up++; 3064 3065 /* 3066 * Note that ftrace probes uses this to start up 3067 * and modify functions it will probe. But we still 3068 * set the ADDING flag for modification, as probes 3069 * do not have trampolines. If they add them in the 3070 * future, then the probes will need to distinguish 3071 * between adding and updating probes. 3072 */ 3073 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; 3074 3075 ret = ftrace_hash_ipmodify_enable(ops); 3076 if (ret < 0) { 3077 /* Rollback registration process */ 3078 __unregister_ftrace_function(ops); 3079 ftrace_start_up--; 3080 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 3081 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 3082 ftrace_trampoline_free(ops); 3083 return ret; 3084 } 3085 3086 if (ftrace_hash_rec_enable(ops, 1)) 3087 command |= FTRACE_UPDATE_CALLS; 3088 3089 ftrace_startup_enable(command); 3090 3091 /* 3092 * If ftrace is in an undefined state, we just remove ops from list 3093 * to prevent the NULL pointer, instead of totally rolling it back and 3094 * free trampoline, because those actions could cause further damage. 3095 */ 3096 if (unlikely(ftrace_disabled)) { 3097 __unregister_ftrace_function(ops); 3098 return -ENODEV; 3099 } 3100 3101 ops->flags &= ~FTRACE_OPS_FL_ADDING; 3102 3103 return 0; 3104 } 3105 3106 int ftrace_shutdown(struct ftrace_ops *ops, int command) 3107 { 3108 int ret; 3109 3110 if (unlikely(ftrace_disabled)) 3111 return -ENODEV; 3112 3113 ret = __unregister_ftrace_function(ops); 3114 if (ret) 3115 return ret; 3116 3117 ftrace_start_up--; 3118 /* 3119 * Just warn in case of unbalance, no need to kill ftrace, it's not 3120 * critical but the ftrace_call callers may be never nopped again after 3121 * further ftrace uses. 3122 */ 3123 WARN_ON_ONCE(ftrace_start_up < 0); 3124 3125 /* Disabling ipmodify never fails */ 3126 ftrace_hash_ipmodify_disable(ops); 3127 3128 if (ftrace_hash_rec_disable(ops, 1)) 3129 command |= FTRACE_UPDATE_CALLS; 3130 3131 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 3132 3133 if (saved_ftrace_func != ftrace_trace_function) { 3134 saved_ftrace_func = ftrace_trace_function; 3135 command |= FTRACE_UPDATE_TRACE_FUNC; 3136 } 3137 3138 if (!command || !ftrace_enabled) 3139 goto out; 3140 3141 /* 3142 * If the ops uses a trampoline, then it needs to be 3143 * tested first on update. 3144 */ 3145 ops->flags |= FTRACE_OPS_FL_REMOVING; 3146 removed_ops = ops; 3147 3148 /* The trampoline logic checks the old hashes */ 3149 ops->old_hash.filter_hash = ops->func_hash->filter_hash; 3150 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; 3151 3152 ftrace_run_update_code(command); 3153 3154 /* 3155 * If there's no more ops registered with ftrace, run a 3156 * sanity check to make sure all rec flags are cleared. 3157 */ 3158 if (rcu_dereference_protected(ftrace_ops_list, 3159 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 3160 struct ftrace_page *pg; 3161 struct dyn_ftrace *rec; 3162 3163 do_for_each_ftrace_rec(pg, rec) { 3164 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) 3165 pr_warn(" %pS flags:%lx\n", 3166 (void *)rec->ip, rec->flags); 3167 } while_for_each_ftrace_rec(); 3168 } 3169 3170 ops->old_hash.filter_hash = NULL; 3171 ops->old_hash.notrace_hash = NULL; 3172 3173 removed_ops = NULL; 3174 ops->flags &= ~FTRACE_OPS_FL_REMOVING; 3175 3176 out: 3177 /* 3178 * Dynamic ops may be freed, we must make sure that all 3179 * callers are done before leaving this function. 3180 */ 3181 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { 3182 /* 3183 * We need to do a hard force of sched synchronization. 3184 * This is because we use preempt_disable() to do RCU, but 3185 * the function tracers can be called where RCU is not watching 3186 * (like before user_exit()). We can not rely on the RCU 3187 * infrastructure to do the synchronization, thus we must do it 3188 * ourselves. 3189 */ 3190 synchronize_rcu_tasks_rude(); 3191 3192 /* 3193 * When the kernel is preemptive, tasks can be preempted 3194 * while on a ftrace trampoline. Just scheduling a task on 3195 * a CPU is not good enough to flush them. Calling 3196 * synchronize_rcu_tasks() will wait for those tasks to 3197 * execute and either schedule voluntarily or enter user space. 3198 */ 3199 if (IS_ENABLED(CONFIG_PREEMPTION)) 3200 synchronize_rcu_tasks(); 3201 3202 ftrace_trampoline_free(ops); 3203 } 3204 3205 return 0; 3206 } 3207 3208 static u64 ftrace_update_time; 3209 unsigned long ftrace_update_tot_cnt; 3210 unsigned long ftrace_number_of_pages; 3211 unsigned long ftrace_number_of_groups; 3212 3213 static inline int ops_traces_mod(struct ftrace_ops *ops) 3214 { 3215 /* 3216 * Filter_hash being empty will default to trace module. 3217 * But notrace hash requires a test of individual module functions. 3218 */ 3219 return ftrace_hash_empty(ops->func_hash->filter_hash) && 3220 ftrace_hash_empty(ops->func_hash->notrace_hash); 3221 } 3222 3223 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) 3224 { 3225 bool init_nop = ftrace_need_init_nop(); 3226 struct ftrace_page *pg; 3227 struct dyn_ftrace *p; 3228 u64 start, stop; 3229 unsigned long update_cnt = 0; 3230 unsigned long rec_flags = 0; 3231 int i; 3232 3233 start = ftrace_now(raw_smp_processor_id()); 3234 3235 /* 3236 * When a module is loaded, this function is called to convert 3237 * the calls to mcount in its text to nops, and also to create 3238 * an entry in the ftrace data. Now, if ftrace is activated 3239 * after this call, but before the module sets its text to 3240 * read-only, the modification of enabling ftrace can fail if 3241 * the read-only is done while ftrace is converting the calls. 3242 * To prevent this, the module's records are set as disabled 3243 * and will be enabled after the call to set the module's text 3244 * to read-only. 3245 */ 3246 if (mod) 3247 rec_flags |= FTRACE_FL_DISABLED; 3248 3249 for (pg = new_pgs; pg; pg = pg->next) { 3250 3251 for (i = 0; i < pg->index; i++) { 3252 3253 /* If something went wrong, bail without enabling anything */ 3254 if (unlikely(ftrace_disabled)) 3255 return -1; 3256 3257 p = &pg->records[i]; 3258 p->flags = rec_flags; 3259 3260 /* 3261 * Do the initial record conversion from mcount jump 3262 * to the NOP instructions. 3263 */ 3264 if (init_nop && !ftrace_nop_initialize(mod, p)) 3265 break; 3266 3267 update_cnt++; 3268 } 3269 } 3270 3271 stop = ftrace_now(raw_smp_processor_id()); 3272 ftrace_update_time = stop - start; 3273 ftrace_update_tot_cnt += update_cnt; 3274 3275 return 0; 3276 } 3277 3278 static int ftrace_allocate_records(struct ftrace_page *pg, int count) 3279 { 3280 int order; 3281 int pages; 3282 int cnt; 3283 3284 if (WARN_ON(!count)) 3285 return -EINVAL; 3286 3287 /* We want to fill as much as possible, with no empty pages */ 3288 pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); 3289 order = fls(pages) - 1; 3290 3291 again: 3292 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 3293 3294 if (!pg->records) { 3295 /* if we can't allocate this size, try something smaller */ 3296 if (!order) 3297 return -ENOMEM; 3298 order--; 3299 goto again; 3300 } 3301 3302 ftrace_number_of_pages += 1 << order; 3303 ftrace_number_of_groups++; 3304 3305 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; 3306 pg->order = order; 3307 3308 if (cnt > count) 3309 cnt = count; 3310 3311 return cnt; 3312 } 3313 3314 static struct ftrace_page * 3315 ftrace_allocate_pages(unsigned long num_to_init) 3316 { 3317 struct ftrace_page *start_pg; 3318 struct ftrace_page *pg; 3319 int cnt; 3320 3321 if (!num_to_init) 3322 return NULL; 3323 3324 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); 3325 if (!pg) 3326 return NULL; 3327 3328 /* 3329 * Try to allocate as much as possible in one continues 3330 * location that fills in all of the space. We want to 3331 * waste as little space as possible. 3332 */ 3333 for (;;) { 3334 cnt = ftrace_allocate_records(pg, num_to_init); 3335 if (cnt < 0) 3336 goto free_pages; 3337 3338 num_to_init -= cnt; 3339 if (!num_to_init) 3340 break; 3341 3342 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); 3343 if (!pg->next) 3344 goto free_pages; 3345 3346 pg = pg->next; 3347 } 3348 3349 return start_pg; 3350 3351 free_pages: 3352 pg = start_pg; 3353 while (pg) { 3354 if (pg->records) { 3355 free_pages((unsigned long)pg->records, pg->order); 3356 ftrace_number_of_pages -= 1 << pg->order; 3357 } 3358 start_pg = pg->next; 3359 kfree(pg); 3360 pg = start_pg; 3361 ftrace_number_of_groups--; 3362 } 3363 pr_info("ftrace: FAILED to allocate memory for functions\n"); 3364 return NULL; 3365 } 3366 3367 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 3368 3369 struct ftrace_iterator { 3370 loff_t pos; 3371 loff_t func_pos; 3372 loff_t mod_pos; 3373 struct ftrace_page *pg; 3374 struct dyn_ftrace *func; 3375 struct ftrace_func_probe *probe; 3376 struct ftrace_func_entry *probe_entry; 3377 struct trace_parser parser; 3378 struct ftrace_hash *hash; 3379 struct ftrace_ops *ops; 3380 struct trace_array *tr; 3381 struct list_head *mod_list; 3382 int pidx; 3383 int idx; 3384 unsigned flags; 3385 }; 3386 3387 static void * 3388 t_probe_next(struct seq_file *m, loff_t *pos) 3389 { 3390 struct ftrace_iterator *iter = m->private; 3391 struct trace_array *tr = iter->ops->private; 3392 struct list_head *func_probes; 3393 struct ftrace_hash *hash; 3394 struct list_head *next; 3395 struct hlist_node *hnd = NULL; 3396 struct hlist_head *hhd; 3397 int size; 3398 3399 (*pos)++; 3400 iter->pos = *pos; 3401 3402 if (!tr) 3403 return NULL; 3404 3405 func_probes = &tr->func_probes; 3406 if (list_empty(func_probes)) 3407 return NULL; 3408 3409 if (!iter->probe) { 3410 next = func_probes->next; 3411 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3412 } 3413 3414 if (iter->probe_entry) 3415 hnd = &iter->probe_entry->hlist; 3416 3417 hash = iter->probe->ops.func_hash->filter_hash; 3418 3419 /* 3420 * A probe being registered may temporarily have an empty hash 3421 * and it's at the end of the func_probes list. 3422 */ 3423 if (!hash || hash == EMPTY_HASH) 3424 return NULL; 3425 3426 size = 1 << hash->size_bits; 3427 3428 retry: 3429 if (iter->pidx >= size) { 3430 if (iter->probe->list.next == func_probes) 3431 return NULL; 3432 next = iter->probe->list.next; 3433 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3434 hash = iter->probe->ops.func_hash->filter_hash; 3435 size = 1 << hash->size_bits; 3436 iter->pidx = 0; 3437 } 3438 3439 hhd = &hash->buckets[iter->pidx]; 3440 3441 if (hlist_empty(hhd)) { 3442 iter->pidx++; 3443 hnd = NULL; 3444 goto retry; 3445 } 3446 3447 if (!hnd) 3448 hnd = hhd->first; 3449 else { 3450 hnd = hnd->next; 3451 if (!hnd) { 3452 iter->pidx++; 3453 goto retry; 3454 } 3455 } 3456 3457 if (WARN_ON_ONCE(!hnd)) 3458 return NULL; 3459 3460 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); 3461 3462 return iter; 3463 } 3464 3465 static void *t_probe_start(struct seq_file *m, loff_t *pos) 3466 { 3467 struct ftrace_iterator *iter = m->private; 3468 void *p = NULL; 3469 loff_t l; 3470 3471 if (!(iter->flags & FTRACE_ITER_DO_PROBES)) 3472 return NULL; 3473 3474 if (iter->mod_pos > *pos) 3475 return NULL; 3476 3477 iter->probe = NULL; 3478 iter->probe_entry = NULL; 3479 iter->pidx = 0; 3480 for (l = 0; l <= (*pos - iter->mod_pos); ) { 3481 p = t_probe_next(m, &l); 3482 if (!p) 3483 break; 3484 } 3485 if (!p) 3486 return NULL; 3487 3488 /* Only set this if we have an item */ 3489 iter->flags |= FTRACE_ITER_PROBE; 3490 3491 return iter; 3492 } 3493 3494 static int 3495 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) 3496 { 3497 struct ftrace_func_entry *probe_entry; 3498 struct ftrace_probe_ops *probe_ops; 3499 struct ftrace_func_probe *probe; 3500 3501 probe = iter->probe; 3502 probe_entry = iter->probe_entry; 3503 3504 if (WARN_ON_ONCE(!probe || !probe_entry)) 3505 return -EIO; 3506 3507 probe_ops = probe->probe_ops; 3508 3509 if (probe_ops->print) 3510 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); 3511 3512 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, 3513 (void *)probe_ops->func); 3514 3515 return 0; 3516 } 3517 3518 static void * 3519 t_mod_next(struct seq_file *m, loff_t *pos) 3520 { 3521 struct ftrace_iterator *iter = m->private; 3522 struct trace_array *tr = iter->tr; 3523 3524 (*pos)++; 3525 iter->pos = *pos; 3526 3527 iter->mod_list = iter->mod_list->next; 3528 3529 if (iter->mod_list == &tr->mod_trace || 3530 iter->mod_list == &tr->mod_notrace) { 3531 iter->flags &= ~FTRACE_ITER_MOD; 3532 return NULL; 3533 } 3534 3535 iter->mod_pos = *pos; 3536 3537 return iter; 3538 } 3539 3540 static void *t_mod_start(struct seq_file *m, loff_t *pos) 3541 { 3542 struct ftrace_iterator *iter = m->private; 3543 void *p = NULL; 3544 loff_t l; 3545 3546 if (iter->func_pos > *pos) 3547 return NULL; 3548 3549 iter->mod_pos = iter->func_pos; 3550 3551 /* probes are only available if tr is set */ 3552 if (!iter->tr) 3553 return NULL; 3554 3555 for (l = 0; l <= (*pos - iter->func_pos); ) { 3556 p = t_mod_next(m, &l); 3557 if (!p) 3558 break; 3559 } 3560 if (!p) { 3561 iter->flags &= ~FTRACE_ITER_MOD; 3562 return t_probe_start(m, pos); 3563 } 3564 3565 /* Only set this if we have an item */ 3566 iter->flags |= FTRACE_ITER_MOD; 3567 3568 return iter; 3569 } 3570 3571 static int 3572 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) 3573 { 3574 struct ftrace_mod_load *ftrace_mod; 3575 struct trace_array *tr = iter->tr; 3576 3577 if (WARN_ON_ONCE(!iter->mod_list) || 3578 iter->mod_list == &tr->mod_trace || 3579 iter->mod_list == &tr->mod_notrace) 3580 return -EIO; 3581 3582 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); 3583 3584 if (ftrace_mod->func) 3585 seq_printf(m, "%s", ftrace_mod->func); 3586 else 3587 seq_putc(m, '*'); 3588 3589 seq_printf(m, ":mod:%s\n", ftrace_mod->module); 3590 3591 return 0; 3592 } 3593 3594 static void * 3595 t_func_next(struct seq_file *m, loff_t *pos) 3596 { 3597 struct ftrace_iterator *iter = m->private; 3598 struct dyn_ftrace *rec = NULL; 3599 3600 (*pos)++; 3601 3602 retry: 3603 if (iter->idx >= iter->pg->index) { 3604 if (iter->pg->next) { 3605 iter->pg = iter->pg->next; 3606 iter->idx = 0; 3607 goto retry; 3608 } 3609 } else { 3610 rec = &iter->pg->records[iter->idx++]; 3611 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3612 !ftrace_lookup_ip(iter->hash, rec->ip)) || 3613 3614 ((iter->flags & FTRACE_ITER_ENABLED) && 3615 !(rec->flags & FTRACE_FL_ENABLED))) { 3616 3617 rec = NULL; 3618 goto retry; 3619 } 3620 } 3621 3622 if (!rec) 3623 return NULL; 3624 3625 iter->pos = iter->func_pos = *pos; 3626 iter->func = rec; 3627 3628 return iter; 3629 } 3630 3631 static void * 3632 t_next(struct seq_file *m, void *v, loff_t *pos) 3633 { 3634 struct ftrace_iterator *iter = m->private; 3635 loff_t l = *pos; /* t_probe_start() must use original pos */ 3636 void *ret; 3637 3638 if (unlikely(ftrace_disabled)) 3639 return NULL; 3640 3641 if (iter->flags & FTRACE_ITER_PROBE) 3642 return t_probe_next(m, pos); 3643 3644 if (iter->flags & FTRACE_ITER_MOD) 3645 return t_mod_next(m, pos); 3646 3647 if (iter->flags & FTRACE_ITER_PRINTALL) { 3648 /* next must increment pos, and t_probe_start does not */ 3649 (*pos)++; 3650 return t_mod_start(m, &l); 3651 } 3652 3653 ret = t_func_next(m, pos); 3654 3655 if (!ret) 3656 return t_mod_start(m, &l); 3657 3658 return ret; 3659 } 3660 3661 static void reset_iter_read(struct ftrace_iterator *iter) 3662 { 3663 iter->pos = 0; 3664 iter->func_pos = 0; 3665 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); 3666 } 3667 3668 static void *t_start(struct seq_file *m, loff_t *pos) 3669 { 3670 struct ftrace_iterator *iter = m->private; 3671 void *p = NULL; 3672 loff_t l; 3673 3674 mutex_lock(&ftrace_lock); 3675 3676 if (unlikely(ftrace_disabled)) 3677 return NULL; 3678 3679 /* 3680 * If an lseek was done, then reset and start from beginning. 3681 */ 3682 if (*pos < iter->pos) 3683 reset_iter_read(iter); 3684 3685 /* 3686 * For set_ftrace_filter reading, if we have the filter 3687 * off, we can short cut and just print out that all 3688 * functions are enabled. 3689 */ 3690 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3691 ftrace_hash_empty(iter->hash)) { 3692 iter->func_pos = 1; /* Account for the message */ 3693 if (*pos > 0) 3694 return t_mod_start(m, pos); 3695 iter->flags |= FTRACE_ITER_PRINTALL; 3696 /* reset in case of seek/pread */ 3697 iter->flags &= ~FTRACE_ITER_PROBE; 3698 return iter; 3699 } 3700 3701 if (iter->flags & FTRACE_ITER_MOD) 3702 return t_mod_start(m, pos); 3703 3704 /* 3705 * Unfortunately, we need to restart at ftrace_pages_start 3706 * every time we let go of the ftrace_mutex. This is because 3707 * those pointers can change without the lock. 3708 */ 3709 iter->pg = ftrace_pages_start; 3710 iter->idx = 0; 3711 for (l = 0; l <= *pos; ) { 3712 p = t_func_next(m, &l); 3713 if (!p) 3714 break; 3715 } 3716 3717 if (!p) 3718 return t_mod_start(m, pos); 3719 3720 return iter; 3721 } 3722 3723 static void t_stop(struct seq_file *m, void *p) 3724 { 3725 mutex_unlock(&ftrace_lock); 3726 } 3727 3728 void * __weak 3729 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 3730 { 3731 return NULL; 3732 } 3733 3734 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, 3735 struct dyn_ftrace *rec) 3736 { 3737 void *ptr; 3738 3739 ptr = arch_ftrace_trampoline_func(ops, rec); 3740 if (ptr) 3741 seq_printf(m, " ->%pS", ptr); 3742 } 3743 3744 #ifdef FTRACE_MCOUNT_MAX_OFFSET 3745 /* 3746 * Weak functions can still have an mcount/fentry that is saved in 3747 * the __mcount_loc section. These can be detected by having a 3748 * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the 3749 * symbol found by kallsyms is not the function that the mcount/fentry 3750 * is part of. The offset is much greater in these cases. 3751 * 3752 * Test the record to make sure that the ip points to a valid kallsyms 3753 * and if not, mark it disabled. 3754 */ 3755 static int test_for_valid_rec(struct dyn_ftrace *rec) 3756 { 3757 char str[KSYM_SYMBOL_LEN]; 3758 unsigned long offset; 3759 const char *ret; 3760 3761 ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str); 3762 3763 /* Weak functions can cause invalid addresses */ 3764 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { 3765 rec->flags |= FTRACE_FL_DISABLED; 3766 return 0; 3767 } 3768 return 1; 3769 } 3770 3771 static struct workqueue_struct *ftrace_check_wq __initdata; 3772 static struct work_struct ftrace_check_work __initdata; 3773 3774 /* 3775 * Scan all the mcount/fentry entries to make sure they are valid. 3776 */ 3777 static __init void ftrace_check_work_func(struct work_struct *work) 3778 { 3779 struct ftrace_page *pg; 3780 struct dyn_ftrace *rec; 3781 3782 mutex_lock(&ftrace_lock); 3783 do_for_each_ftrace_rec(pg, rec) { 3784 test_for_valid_rec(rec); 3785 } while_for_each_ftrace_rec(); 3786 mutex_unlock(&ftrace_lock); 3787 } 3788 3789 static int __init ftrace_check_for_weak_functions(void) 3790 { 3791 INIT_WORK(&ftrace_check_work, ftrace_check_work_func); 3792 3793 ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0); 3794 3795 queue_work(ftrace_check_wq, &ftrace_check_work); 3796 return 0; 3797 } 3798 3799 static int __init ftrace_check_sync(void) 3800 { 3801 /* Make sure the ftrace_check updates are finished */ 3802 if (ftrace_check_wq) 3803 destroy_workqueue(ftrace_check_wq); 3804 return 0; 3805 } 3806 3807 late_initcall_sync(ftrace_check_sync); 3808 subsys_initcall(ftrace_check_for_weak_functions); 3809 3810 static int print_rec(struct seq_file *m, unsigned long ip) 3811 { 3812 unsigned long offset; 3813 char str[KSYM_SYMBOL_LEN]; 3814 char *modname; 3815 const char *ret; 3816 3817 ret = kallsyms_lookup(ip, NULL, &offset, &modname, str); 3818 /* Weak functions can cause invalid addresses */ 3819 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { 3820 snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld", 3821 FTRACE_INVALID_FUNCTION, offset); 3822 ret = NULL; 3823 } 3824 3825 seq_puts(m, str); 3826 if (modname) 3827 seq_printf(m, " [%s]", modname); 3828 return ret == NULL ? -1 : 0; 3829 } 3830 #else 3831 static inline int test_for_valid_rec(struct dyn_ftrace *rec) 3832 { 3833 return 1; 3834 } 3835 3836 static inline int print_rec(struct seq_file *m, unsigned long ip) 3837 { 3838 seq_printf(m, "%ps", (void *)ip); 3839 return 0; 3840 } 3841 #endif 3842 3843 static int t_show(struct seq_file *m, void *v) 3844 { 3845 struct ftrace_iterator *iter = m->private; 3846 struct dyn_ftrace *rec; 3847 3848 if (iter->flags & FTRACE_ITER_PROBE) 3849 return t_probe_show(m, iter); 3850 3851 if (iter->flags & FTRACE_ITER_MOD) 3852 return t_mod_show(m, iter); 3853 3854 if (iter->flags & FTRACE_ITER_PRINTALL) { 3855 if (iter->flags & FTRACE_ITER_NOTRACE) 3856 seq_puts(m, "#### no functions disabled ####\n"); 3857 else 3858 seq_puts(m, "#### all functions enabled ####\n"); 3859 return 0; 3860 } 3861 3862 rec = iter->func; 3863 3864 if (!rec) 3865 return 0; 3866 3867 if (print_rec(m, rec->ip)) { 3868 /* This should only happen when a rec is disabled */ 3869 WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED)); 3870 seq_putc(m, '\n'); 3871 return 0; 3872 } 3873 3874 if (iter->flags & FTRACE_ITER_ENABLED) { 3875 struct ftrace_ops *ops; 3876 3877 seq_printf(m, " (%ld)%s%s%s%s", 3878 ftrace_rec_count(rec), 3879 rec->flags & FTRACE_FL_REGS ? " R" : " ", 3880 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", 3881 rec->flags & FTRACE_FL_DIRECT ? " D" : " ", 3882 rec->flags & FTRACE_FL_CALL_OPS ? " O" : " "); 3883 if (rec->flags & FTRACE_FL_TRAMP_EN) { 3884 ops = ftrace_find_tramp_ops_any(rec); 3885 if (ops) { 3886 do { 3887 seq_printf(m, "\ttramp: %pS (%pS)", 3888 (void *)ops->trampoline, 3889 (void *)ops->func); 3890 add_trampoline_func(m, ops, rec); 3891 ops = ftrace_find_tramp_ops_next(rec, ops); 3892 } while (ops); 3893 } else 3894 seq_puts(m, "\ttramp: ERROR!"); 3895 } else { 3896 add_trampoline_func(m, NULL, rec); 3897 } 3898 if (rec->flags & FTRACE_FL_CALL_OPS_EN) { 3899 ops = ftrace_find_unique_ops(rec); 3900 if (ops) { 3901 seq_printf(m, "\tops: %pS (%pS)", 3902 ops, ops->func); 3903 } else { 3904 seq_puts(m, "\tops: ERROR!"); 3905 } 3906 } 3907 if (rec->flags & FTRACE_FL_DIRECT) { 3908 unsigned long direct; 3909 3910 direct = ftrace_find_rec_direct(rec->ip); 3911 if (direct) 3912 seq_printf(m, "\n\tdirect-->%pS", (void *)direct); 3913 } 3914 } 3915 3916 seq_putc(m, '\n'); 3917 3918 return 0; 3919 } 3920 3921 static const struct seq_operations show_ftrace_seq_ops = { 3922 .start = t_start, 3923 .next = t_next, 3924 .stop = t_stop, 3925 .show = t_show, 3926 }; 3927 3928 static int 3929 ftrace_avail_open(struct inode *inode, struct file *file) 3930 { 3931 struct ftrace_iterator *iter; 3932 int ret; 3933 3934 ret = security_locked_down(LOCKDOWN_TRACEFS); 3935 if (ret) 3936 return ret; 3937 3938 if (unlikely(ftrace_disabled)) 3939 return -ENODEV; 3940 3941 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3942 if (!iter) 3943 return -ENOMEM; 3944 3945 iter->pg = ftrace_pages_start; 3946 iter->ops = &global_ops; 3947 3948 return 0; 3949 } 3950 3951 static int 3952 ftrace_enabled_open(struct inode *inode, struct file *file) 3953 { 3954 struct ftrace_iterator *iter; 3955 3956 /* 3957 * This shows us what functions are currently being 3958 * traced and by what. Not sure if we want lockdown 3959 * to hide such critical information for an admin. 3960 * Although, perhaps it can show information we don't 3961 * want people to see, but if something is tracing 3962 * something, we probably want to know about it. 3963 */ 3964 3965 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3966 if (!iter) 3967 return -ENOMEM; 3968 3969 iter->pg = ftrace_pages_start; 3970 iter->flags = FTRACE_ITER_ENABLED; 3971 iter->ops = &global_ops; 3972 3973 return 0; 3974 } 3975 3976 /** 3977 * ftrace_regex_open - initialize function tracer filter files 3978 * @ops: The ftrace_ops that hold the hash filters 3979 * @flag: The type of filter to process 3980 * @inode: The inode, usually passed in to your open routine 3981 * @file: The file, usually passed in to your open routine 3982 * 3983 * ftrace_regex_open() initializes the filter files for the 3984 * @ops. Depending on @flag it may process the filter hash or 3985 * the notrace hash of @ops. With this called from the open 3986 * routine, you can use ftrace_filter_write() for the write 3987 * routine if @flag has FTRACE_ITER_FILTER set, or 3988 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 3989 * tracing_lseek() should be used as the lseek routine, and 3990 * release must call ftrace_regex_release(). 3991 */ 3992 int 3993 ftrace_regex_open(struct ftrace_ops *ops, int flag, 3994 struct inode *inode, struct file *file) 3995 { 3996 struct ftrace_iterator *iter; 3997 struct ftrace_hash *hash; 3998 struct list_head *mod_head; 3999 struct trace_array *tr = ops->private; 4000 int ret = -ENOMEM; 4001 4002 ftrace_ops_init(ops); 4003 4004 if (unlikely(ftrace_disabled)) 4005 return -ENODEV; 4006 4007 if (tracing_check_open_get_tr(tr)) 4008 return -ENODEV; 4009 4010 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 4011 if (!iter) 4012 goto out; 4013 4014 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) 4015 goto out; 4016 4017 iter->ops = ops; 4018 iter->flags = flag; 4019 iter->tr = tr; 4020 4021 mutex_lock(&ops->func_hash->regex_lock); 4022 4023 if (flag & FTRACE_ITER_NOTRACE) { 4024 hash = ops->func_hash->notrace_hash; 4025 mod_head = tr ? &tr->mod_notrace : NULL; 4026 } else { 4027 hash = ops->func_hash->filter_hash; 4028 mod_head = tr ? &tr->mod_trace : NULL; 4029 } 4030 4031 iter->mod_list = mod_head; 4032 4033 if (file->f_mode & FMODE_WRITE) { 4034 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 4035 4036 if (file->f_flags & O_TRUNC) { 4037 iter->hash = alloc_ftrace_hash(size_bits); 4038 clear_ftrace_mod_list(mod_head); 4039 } else { 4040 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); 4041 } 4042 4043 if (!iter->hash) { 4044 trace_parser_put(&iter->parser); 4045 goto out_unlock; 4046 } 4047 } else 4048 iter->hash = hash; 4049 4050 ret = 0; 4051 4052 if (file->f_mode & FMODE_READ) { 4053 iter->pg = ftrace_pages_start; 4054 4055 ret = seq_open(file, &show_ftrace_seq_ops); 4056 if (!ret) { 4057 struct seq_file *m = file->private_data; 4058 m->private = iter; 4059 } else { 4060 /* Failed */ 4061 free_ftrace_hash(iter->hash); 4062 trace_parser_put(&iter->parser); 4063 } 4064 } else 4065 file->private_data = iter; 4066 4067 out_unlock: 4068 mutex_unlock(&ops->func_hash->regex_lock); 4069 4070 out: 4071 if (ret) { 4072 kfree(iter); 4073 if (tr) 4074 trace_array_put(tr); 4075 } 4076 4077 return ret; 4078 } 4079 4080 static int 4081 ftrace_filter_open(struct inode *inode, struct file *file) 4082 { 4083 struct ftrace_ops *ops = inode->i_private; 4084 4085 /* Checks for tracefs lockdown */ 4086 return ftrace_regex_open(ops, 4087 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, 4088 inode, file); 4089 } 4090 4091 static int 4092 ftrace_notrace_open(struct inode *inode, struct file *file) 4093 { 4094 struct ftrace_ops *ops = inode->i_private; 4095 4096 /* Checks for tracefs lockdown */ 4097 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, 4098 inode, file); 4099 } 4100 4101 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ 4102 struct ftrace_glob { 4103 char *search; 4104 unsigned len; 4105 int type; 4106 }; 4107 4108 /* 4109 * If symbols in an architecture don't correspond exactly to the user-visible 4110 * name of what they represent, it is possible to define this function to 4111 * perform the necessary adjustments. 4112 */ 4113 char * __weak arch_ftrace_match_adjust(char *str, const char *search) 4114 { 4115 return str; 4116 } 4117 4118 static int ftrace_match(char *str, struct ftrace_glob *g) 4119 { 4120 int matched = 0; 4121 int slen; 4122 4123 str = arch_ftrace_match_adjust(str, g->search); 4124 4125 switch (g->type) { 4126 case MATCH_FULL: 4127 if (strcmp(str, g->search) == 0) 4128 matched = 1; 4129 break; 4130 case MATCH_FRONT_ONLY: 4131 if (strncmp(str, g->search, g->len) == 0) 4132 matched = 1; 4133 break; 4134 case MATCH_MIDDLE_ONLY: 4135 if (strstr(str, g->search)) 4136 matched = 1; 4137 break; 4138 case MATCH_END_ONLY: 4139 slen = strlen(str); 4140 if (slen >= g->len && 4141 memcmp(str + slen - g->len, g->search, g->len) == 0) 4142 matched = 1; 4143 break; 4144 case MATCH_GLOB: 4145 if (glob_match(g->search, str)) 4146 matched = 1; 4147 break; 4148 } 4149 4150 return matched; 4151 } 4152 4153 static int 4154 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) 4155 { 4156 struct ftrace_func_entry *entry; 4157 int ret = 0; 4158 4159 entry = ftrace_lookup_ip(hash, rec->ip); 4160 if (clear_filter) { 4161 /* Do nothing if it doesn't exist */ 4162 if (!entry) 4163 return 0; 4164 4165 free_hash_entry(hash, entry); 4166 } else { 4167 /* Do nothing if it exists */ 4168 if (entry) 4169 return 0; 4170 4171 ret = add_hash_entry(hash, rec->ip); 4172 } 4173 return ret; 4174 } 4175 4176 static int 4177 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, 4178 int clear_filter) 4179 { 4180 long index = simple_strtoul(func_g->search, NULL, 0); 4181 struct ftrace_page *pg; 4182 struct dyn_ftrace *rec; 4183 4184 /* The index starts at 1 */ 4185 if (--index < 0) 4186 return 0; 4187 4188 do_for_each_ftrace_rec(pg, rec) { 4189 if (pg->index <= index) { 4190 index -= pg->index; 4191 /* this is a double loop, break goes to the next page */ 4192 break; 4193 } 4194 rec = &pg->records[index]; 4195 enter_record(hash, rec, clear_filter); 4196 return 1; 4197 } while_for_each_ftrace_rec(); 4198 return 0; 4199 } 4200 4201 #ifdef FTRACE_MCOUNT_MAX_OFFSET 4202 static int lookup_ip(unsigned long ip, char **modname, char *str) 4203 { 4204 unsigned long offset; 4205 4206 kallsyms_lookup(ip, NULL, &offset, modname, str); 4207 if (offset > FTRACE_MCOUNT_MAX_OFFSET) 4208 return -1; 4209 return 0; 4210 } 4211 #else 4212 static int lookup_ip(unsigned long ip, char **modname, char *str) 4213 { 4214 kallsyms_lookup(ip, NULL, NULL, modname, str); 4215 return 0; 4216 } 4217 #endif 4218 4219 static int 4220 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, 4221 struct ftrace_glob *mod_g, int exclude_mod) 4222 { 4223 char str[KSYM_SYMBOL_LEN]; 4224 char *modname; 4225 4226 if (lookup_ip(rec->ip, &modname, str)) { 4227 /* This should only happen when a rec is disabled */ 4228 WARN_ON_ONCE(system_state == SYSTEM_RUNNING && 4229 !(rec->flags & FTRACE_FL_DISABLED)); 4230 return 0; 4231 } 4232 4233 if (mod_g) { 4234 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0; 4235 4236 /* blank module name to match all modules */ 4237 if (!mod_g->len) { 4238 /* blank module globbing: modname xor exclude_mod */ 4239 if (!exclude_mod != !modname) 4240 goto func_match; 4241 return 0; 4242 } 4243 4244 /* 4245 * exclude_mod is set to trace everything but the given 4246 * module. If it is set and the module matches, then 4247 * return 0. If it is not set, and the module doesn't match 4248 * also return 0. Otherwise, check the function to see if 4249 * that matches. 4250 */ 4251 if (!mod_matches == !exclude_mod) 4252 return 0; 4253 func_match: 4254 /* blank search means to match all funcs in the mod */ 4255 if (!func_g->len) 4256 return 1; 4257 } 4258 4259 return ftrace_match(str, func_g); 4260 } 4261 4262 static int 4263 match_records(struct ftrace_hash *hash, char *func, int len, char *mod) 4264 { 4265 struct ftrace_page *pg; 4266 struct dyn_ftrace *rec; 4267 struct ftrace_glob func_g = { .type = MATCH_FULL }; 4268 struct ftrace_glob mod_g = { .type = MATCH_FULL }; 4269 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; 4270 int exclude_mod = 0; 4271 int found = 0; 4272 int ret; 4273 int clear_filter = 0; 4274 4275 if (func) { 4276 func_g.type = filter_parse_regex(func, len, &func_g.search, 4277 &clear_filter); 4278 func_g.len = strlen(func_g.search); 4279 } 4280 4281 if (mod) { 4282 mod_g.type = filter_parse_regex(mod, strlen(mod), 4283 &mod_g.search, &exclude_mod); 4284 mod_g.len = strlen(mod_g.search); 4285 } 4286 4287 mutex_lock(&ftrace_lock); 4288 4289 if (unlikely(ftrace_disabled)) 4290 goto out_unlock; 4291 4292 if (func_g.type == MATCH_INDEX) { 4293 found = add_rec_by_index(hash, &func_g, clear_filter); 4294 goto out_unlock; 4295 } 4296 4297 do_for_each_ftrace_rec(pg, rec) { 4298 4299 if (rec->flags & FTRACE_FL_DISABLED) 4300 continue; 4301 4302 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 4303 ret = enter_record(hash, rec, clear_filter); 4304 if (ret < 0) { 4305 found = ret; 4306 goto out_unlock; 4307 } 4308 found = 1; 4309 } 4310 cond_resched(); 4311 } while_for_each_ftrace_rec(); 4312 out_unlock: 4313 mutex_unlock(&ftrace_lock); 4314 4315 return found; 4316 } 4317 4318 static int 4319 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) 4320 { 4321 return match_records(hash, buff, len, NULL); 4322 } 4323 4324 static void ftrace_ops_update_code(struct ftrace_ops *ops, 4325 struct ftrace_ops_hash *old_hash) 4326 { 4327 struct ftrace_ops *op; 4328 4329 if (!ftrace_enabled) 4330 return; 4331 4332 if (ops->flags & FTRACE_OPS_FL_ENABLED) { 4333 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); 4334 return; 4335 } 4336 4337 /* 4338 * If this is the shared global_ops filter, then we need to 4339 * check if there is another ops that shares it, is enabled. 4340 * If so, we still need to run the modify code. 4341 */ 4342 if (ops->func_hash != &global_ops.local_hash) 4343 return; 4344 4345 do_for_each_ftrace_op(op, ftrace_ops_list) { 4346 if (op->func_hash == &global_ops.local_hash && 4347 op->flags & FTRACE_OPS_FL_ENABLED) { 4348 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); 4349 /* Only need to do this once */ 4350 return; 4351 } 4352 } while_for_each_ftrace_op(op); 4353 } 4354 4355 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, 4356 struct ftrace_hash **orig_hash, 4357 struct ftrace_hash *hash, 4358 int enable) 4359 { 4360 struct ftrace_ops_hash old_hash_ops; 4361 struct ftrace_hash *old_hash; 4362 int ret; 4363 4364 old_hash = *orig_hash; 4365 old_hash_ops.filter_hash = ops->func_hash->filter_hash; 4366 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; 4367 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 4368 if (!ret) { 4369 ftrace_ops_update_code(ops, &old_hash_ops); 4370 free_ftrace_hash_rcu(old_hash); 4371 } 4372 return ret; 4373 } 4374 4375 static bool module_exists(const char *module) 4376 { 4377 /* All modules have the symbol __this_module */ 4378 static const char this_mod[] = "__this_module"; 4379 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 4380 unsigned long val; 4381 int n; 4382 4383 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); 4384 4385 if (n > sizeof(modname) - 1) 4386 return false; 4387 4388 val = module_kallsyms_lookup_name(modname); 4389 return val != 0; 4390 } 4391 4392 static int cache_mod(struct trace_array *tr, 4393 const char *func, char *module, int enable) 4394 { 4395 struct ftrace_mod_load *ftrace_mod, *n; 4396 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; 4397 int ret; 4398 4399 mutex_lock(&ftrace_lock); 4400 4401 /* We do not cache inverse filters */ 4402 if (func[0] == '!') { 4403 func++; 4404 ret = -EINVAL; 4405 4406 /* Look to remove this hash */ 4407 list_for_each_entry_safe(ftrace_mod, n, head, list) { 4408 if (strcmp(ftrace_mod->module, module) != 0) 4409 continue; 4410 4411 /* no func matches all */ 4412 if (strcmp(func, "*") == 0 || 4413 (ftrace_mod->func && 4414 strcmp(ftrace_mod->func, func) == 0)) { 4415 ret = 0; 4416 free_ftrace_mod(ftrace_mod); 4417 continue; 4418 } 4419 } 4420 goto out; 4421 } 4422 4423 ret = -EINVAL; 4424 /* We only care about modules that have not been loaded yet */ 4425 if (module_exists(module)) 4426 goto out; 4427 4428 /* Save this string off, and execute it when the module is loaded */ 4429 ret = ftrace_add_mod(tr, func, module, enable); 4430 out: 4431 mutex_unlock(&ftrace_lock); 4432 4433 return ret; 4434 } 4435 4436 static int 4437 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 4438 int reset, int enable); 4439 4440 #ifdef CONFIG_MODULES 4441 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, 4442 char *mod, bool enable) 4443 { 4444 struct ftrace_mod_load *ftrace_mod, *n; 4445 struct ftrace_hash **orig_hash, *new_hash; 4446 LIST_HEAD(process_mods); 4447 char *func; 4448 4449 mutex_lock(&ops->func_hash->regex_lock); 4450 4451 if (enable) 4452 orig_hash = &ops->func_hash->filter_hash; 4453 else 4454 orig_hash = &ops->func_hash->notrace_hash; 4455 4456 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, 4457 *orig_hash); 4458 if (!new_hash) 4459 goto out; /* warn? */ 4460 4461 mutex_lock(&ftrace_lock); 4462 4463 list_for_each_entry_safe(ftrace_mod, n, head, list) { 4464 4465 if (strcmp(ftrace_mod->module, mod) != 0) 4466 continue; 4467 4468 if (ftrace_mod->func) 4469 func = kstrdup(ftrace_mod->func, GFP_KERNEL); 4470 else 4471 func = kstrdup("*", GFP_KERNEL); 4472 4473 if (!func) /* warn? */ 4474 continue; 4475 4476 list_move(&ftrace_mod->list, &process_mods); 4477 4478 /* Use the newly allocated func, as it may be "*" */ 4479 kfree(ftrace_mod->func); 4480 ftrace_mod->func = func; 4481 } 4482 4483 mutex_unlock(&ftrace_lock); 4484 4485 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { 4486 4487 func = ftrace_mod->func; 4488 4489 /* Grabs ftrace_lock, which is why we have this extra step */ 4490 match_records(new_hash, func, strlen(func), mod); 4491 free_ftrace_mod(ftrace_mod); 4492 } 4493 4494 if (enable && list_empty(head)) 4495 new_hash->flags &= ~FTRACE_HASH_FL_MOD; 4496 4497 mutex_lock(&ftrace_lock); 4498 4499 ftrace_hash_move_and_update_ops(ops, orig_hash, 4500 new_hash, enable); 4501 mutex_unlock(&ftrace_lock); 4502 4503 out: 4504 mutex_unlock(&ops->func_hash->regex_lock); 4505 4506 free_ftrace_hash(new_hash); 4507 } 4508 4509 static void process_cached_mods(const char *mod_name) 4510 { 4511 struct trace_array *tr; 4512 char *mod; 4513 4514 mod = kstrdup(mod_name, GFP_KERNEL); 4515 if (!mod) 4516 return; 4517 4518 mutex_lock(&trace_types_lock); 4519 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 4520 if (!list_empty(&tr->mod_trace)) 4521 process_mod_list(&tr->mod_trace, tr->ops, mod, true); 4522 if (!list_empty(&tr->mod_notrace)) 4523 process_mod_list(&tr->mod_notrace, tr->ops, mod, false); 4524 } 4525 mutex_unlock(&trace_types_lock); 4526 4527 kfree(mod); 4528 } 4529 #endif 4530 4531 /* 4532 * We register the module command as a template to show others how 4533 * to register the a command as well. 4534 */ 4535 4536 static int 4537 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, 4538 char *func_orig, char *cmd, char *module, int enable) 4539 { 4540 char *func; 4541 int ret; 4542 4543 /* match_records() modifies func, and we need the original */ 4544 func = kstrdup(func_orig, GFP_KERNEL); 4545 if (!func) 4546 return -ENOMEM; 4547 4548 /* 4549 * cmd == 'mod' because we only registered this func 4550 * for the 'mod' ftrace_func_command. 4551 * But if you register one func with multiple commands, 4552 * you can tell which command was used by the cmd 4553 * parameter. 4554 */ 4555 ret = match_records(hash, func, strlen(func), module); 4556 kfree(func); 4557 4558 if (!ret) 4559 return cache_mod(tr, func_orig, module, enable); 4560 if (ret < 0) 4561 return ret; 4562 return 0; 4563 } 4564 4565 static struct ftrace_func_command ftrace_mod_cmd = { 4566 .name = "mod", 4567 .func = ftrace_mod_callback, 4568 }; 4569 4570 static int __init ftrace_mod_cmd_init(void) 4571 { 4572 return register_ftrace_command(&ftrace_mod_cmd); 4573 } 4574 core_initcall(ftrace_mod_cmd_init); 4575 4576 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 4577 struct ftrace_ops *op, struct ftrace_regs *fregs) 4578 { 4579 struct ftrace_probe_ops *probe_ops; 4580 struct ftrace_func_probe *probe; 4581 4582 probe = container_of(op, struct ftrace_func_probe, ops); 4583 probe_ops = probe->probe_ops; 4584 4585 /* 4586 * Disable preemption for these calls to prevent a RCU grace 4587 * period. This syncs the hash iteration and freeing of items 4588 * on the hash. rcu_read_lock is too dangerous here. 4589 */ 4590 preempt_disable_notrace(); 4591 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); 4592 preempt_enable_notrace(); 4593 } 4594 4595 struct ftrace_func_map { 4596 struct ftrace_func_entry entry; 4597 void *data; 4598 }; 4599 4600 struct ftrace_func_mapper { 4601 struct ftrace_hash hash; 4602 }; 4603 4604 /** 4605 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper 4606 * 4607 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data. 4608 */ 4609 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) 4610 { 4611 struct ftrace_hash *hash; 4612 4613 /* 4614 * The mapper is simply a ftrace_hash, but since the entries 4615 * in the hash are not ftrace_func_entry type, we define it 4616 * as a separate structure. 4617 */ 4618 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4619 return (struct ftrace_func_mapper *)hash; 4620 } 4621 4622 /** 4623 * ftrace_func_mapper_find_ip - Find some data mapped to an ip 4624 * @mapper: The mapper that has the ip maps 4625 * @ip: the instruction pointer to find the data for 4626 * 4627 * Returns the data mapped to @ip if found otherwise NULL. The return 4628 * is actually the address of the mapper data pointer. The address is 4629 * returned for use cases where the data is no bigger than a long, and 4630 * the user can use the data pointer as its data instead of having to 4631 * allocate more memory for the reference. 4632 */ 4633 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 4634 unsigned long ip) 4635 { 4636 struct ftrace_func_entry *entry; 4637 struct ftrace_func_map *map; 4638 4639 entry = ftrace_lookup_ip(&mapper->hash, ip); 4640 if (!entry) 4641 return NULL; 4642 4643 map = (struct ftrace_func_map *)entry; 4644 return &map->data; 4645 } 4646 4647 /** 4648 * ftrace_func_mapper_add_ip - Map some data to an ip 4649 * @mapper: The mapper that has the ip maps 4650 * @ip: The instruction pointer address to map @data to 4651 * @data: The data to map to @ip 4652 * 4653 * Returns 0 on success otherwise an error. 4654 */ 4655 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 4656 unsigned long ip, void *data) 4657 { 4658 struct ftrace_func_entry *entry; 4659 struct ftrace_func_map *map; 4660 4661 entry = ftrace_lookup_ip(&mapper->hash, ip); 4662 if (entry) 4663 return -EBUSY; 4664 4665 map = kmalloc(sizeof(*map), GFP_KERNEL); 4666 if (!map) 4667 return -ENOMEM; 4668 4669 map->entry.ip = ip; 4670 map->data = data; 4671 4672 __add_hash_entry(&mapper->hash, &map->entry); 4673 4674 return 0; 4675 } 4676 4677 /** 4678 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping 4679 * @mapper: The mapper that has the ip maps 4680 * @ip: The instruction pointer address to remove the data from 4681 * 4682 * Returns the data if it is found, otherwise NULL. 4683 * Note, if the data pointer is used as the data itself, (see 4684 * ftrace_func_mapper_find_ip(), then the return value may be meaningless, 4685 * if the data pointer was set to zero. 4686 */ 4687 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 4688 unsigned long ip) 4689 { 4690 struct ftrace_func_entry *entry; 4691 struct ftrace_func_map *map; 4692 void *data; 4693 4694 entry = ftrace_lookup_ip(&mapper->hash, ip); 4695 if (!entry) 4696 return NULL; 4697 4698 map = (struct ftrace_func_map *)entry; 4699 data = map->data; 4700 4701 remove_hash_entry(&mapper->hash, entry); 4702 kfree(entry); 4703 4704 return data; 4705 } 4706 4707 /** 4708 * free_ftrace_func_mapper - free a mapping of ips and data 4709 * @mapper: The mapper that has the ip maps 4710 * @free_func: A function to be called on each data item. 4711 * 4712 * This is used to free the function mapper. The @free_func is optional 4713 * and can be used if the data needs to be freed as well. 4714 */ 4715 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 4716 ftrace_mapper_func free_func) 4717 { 4718 struct ftrace_func_entry *entry; 4719 struct ftrace_func_map *map; 4720 struct hlist_head *hhd; 4721 int size, i; 4722 4723 if (!mapper) 4724 return; 4725 4726 if (free_func && mapper->hash.count) { 4727 size = 1 << mapper->hash.size_bits; 4728 for (i = 0; i < size; i++) { 4729 hhd = &mapper->hash.buckets[i]; 4730 hlist_for_each_entry(entry, hhd, hlist) { 4731 map = (struct ftrace_func_map *)entry; 4732 free_func(map); 4733 } 4734 } 4735 } 4736 free_ftrace_hash(&mapper->hash); 4737 } 4738 4739 static void release_probe(struct ftrace_func_probe *probe) 4740 { 4741 struct ftrace_probe_ops *probe_ops; 4742 4743 mutex_lock(&ftrace_lock); 4744 4745 WARN_ON(probe->ref <= 0); 4746 4747 /* Subtract the ref that was used to protect this instance */ 4748 probe->ref--; 4749 4750 if (!probe->ref) { 4751 probe_ops = probe->probe_ops; 4752 /* 4753 * Sending zero as ip tells probe_ops to free 4754 * the probe->data itself 4755 */ 4756 if (probe_ops->free) 4757 probe_ops->free(probe_ops, probe->tr, 0, probe->data); 4758 list_del(&probe->list); 4759 kfree(probe); 4760 } 4761 mutex_unlock(&ftrace_lock); 4762 } 4763 4764 static void acquire_probe_locked(struct ftrace_func_probe *probe) 4765 { 4766 /* 4767 * Add one ref to keep it from being freed when releasing the 4768 * ftrace_lock mutex. 4769 */ 4770 probe->ref++; 4771 } 4772 4773 int 4774 register_ftrace_function_probe(char *glob, struct trace_array *tr, 4775 struct ftrace_probe_ops *probe_ops, 4776 void *data) 4777 { 4778 struct ftrace_func_probe *probe = NULL, *iter; 4779 struct ftrace_func_entry *entry; 4780 struct ftrace_hash **orig_hash; 4781 struct ftrace_hash *old_hash; 4782 struct ftrace_hash *hash; 4783 int count = 0; 4784 int size; 4785 int ret; 4786 int i; 4787 4788 if (WARN_ON(!tr)) 4789 return -EINVAL; 4790 4791 /* We do not support '!' for function probes */ 4792 if (WARN_ON(glob[0] == '!')) 4793 return -EINVAL; 4794 4795 4796 mutex_lock(&ftrace_lock); 4797 /* Check if the probe_ops is already registered */ 4798 list_for_each_entry(iter, &tr->func_probes, list) { 4799 if (iter->probe_ops == probe_ops) { 4800 probe = iter; 4801 break; 4802 } 4803 } 4804 if (!probe) { 4805 probe = kzalloc(sizeof(*probe), GFP_KERNEL); 4806 if (!probe) { 4807 mutex_unlock(&ftrace_lock); 4808 return -ENOMEM; 4809 } 4810 probe->probe_ops = probe_ops; 4811 probe->ops.func = function_trace_probe_call; 4812 probe->tr = tr; 4813 ftrace_ops_init(&probe->ops); 4814 list_add(&probe->list, &tr->func_probes); 4815 } 4816 4817 acquire_probe_locked(probe); 4818 4819 mutex_unlock(&ftrace_lock); 4820 4821 /* 4822 * Note, there's a small window here that the func_hash->filter_hash 4823 * may be NULL or empty. Need to be careful when reading the loop. 4824 */ 4825 mutex_lock(&probe->ops.func_hash->regex_lock); 4826 4827 orig_hash = &probe->ops.func_hash->filter_hash; 4828 old_hash = *orig_hash; 4829 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4830 4831 if (!hash) { 4832 ret = -ENOMEM; 4833 goto out; 4834 } 4835 4836 ret = ftrace_match_records(hash, glob, strlen(glob)); 4837 4838 /* Nothing found? */ 4839 if (!ret) 4840 ret = -EINVAL; 4841 4842 if (ret < 0) 4843 goto out; 4844 4845 size = 1 << hash->size_bits; 4846 for (i = 0; i < size; i++) { 4847 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4848 if (ftrace_lookup_ip(old_hash, entry->ip)) 4849 continue; 4850 /* 4851 * The caller might want to do something special 4852 * for each function we find. We call the callback 4853 * to give the caller an opportunity to do so. 4854 */ 4855 if (probe_ops->init) { 4856 ret = probe_ops->init(probe_ops, tr, 4857 entry->ip, data, 4858 &probe->data); 4859 if (ret < 0) { 4860 if (probe_ops->free && count) 4861 probe_ops->free(probe_ops, tr, 4862 0, probe->data); 4863 probe->data = NULL; 4864 goto out; 4865 } 4866 } 4867 count++; 4868 } 4869 } 4870 4871 mutex_lock(&ftrace_lock); 4872 4873 if (!count) { 4874 /* Nothing was added? */ 4875 ret = -EINVAL; 4876 goto out_unlock; 4877 } 4878 4879 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 4880 hash, 1); 4881 if (ret < 0) 4882 goto err_unlock; 4883 4884 /* One ref for each new function traced */ 4885 probe->ref += count; 4886 4887 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) 4888 ret = ftrace_startup(&probe->ops, 0); 4889 4890 out_unlock: 4891 mutex_unlock(&ftrace_lock); 4892 4893 if (!ret) 4894 ret = count; 4895 out: 4896 mutex_unlock(&probe->ops.func_hash->regex_lock); 4897 free_ftrace_hash(hash); 4898 4899 release_probe(probe); 4900 4901 return ret; 4902 4903 err_unlock: 4904 if (!probe_ops->free || !count) 4905 goto out_unlock; 4906 4907 /* Failed to do the move, need to call the free functions */ 4908 for (i = 0; i < size; i++) { 4909 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4910 if (ftrace_lookup_ip(old_hash, entry->ip)) 4911 continue; 4912 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 4913 } 4914 } 4915 goto out_unlock; 4916 } 4917 4918 int 4919 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 4920 struct ftrace_probe_ops *probe_ops) 4921 { 4922 struct ftrace_func_probe *probe = NULL, *iter; 4923 struct ftrace_ops_hash old_hash_ops; 4924 struct ftrace_func_entry *entry; 4925 struct ftrace_glob func_g; 4926 struct ftrace_hash **orig_hash; 4927 struct ftrace_hash *old_hash; 4928 struct ftrace_hash *hash = NULL; 4929 struct hlist_node *tmp; 4930 struct hlist_head hhd; 4931 char str[KSYM_SYMBOL_LEN]; 4932 int count = 0; 4933 int i, ret = -ENODEV; 4934 int size; 4935 4936 if (!glob || !strlen(glob) || !strcmp(glob, "*")) 4937 func_g.search = NULL; 4938 else { 4939 int not; 4940 4941 func_g.type = filter_parse_regex(glob, strlen(glob), 4942 &func_g.search, ¬); 4943 func_g.len = strlen(func_g.search); 4944 4945 /* we do not support '!' for function probes */ 4946 if (WARN_ON(not)) 4947 return -EINVAL; 4948 } 4949 4950 mutex_lock(&ftrace_lock); 4951 /* Check if the probe_ops is already registered */ 4952 list_for_each_entry(iter, &tr->func_probes, list) { 4953 if (iter->probe_ops == probe_ops) { 4954 probe = iter; 4955 break; 4956 } 4957 } 4958 if (!probe) 4959 goto err_unlock_ftrace; 4960 4961 ret = -EINVAL; 4962 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) 4963 goto err_unlock_ftrace; 4964 4965 acquire_probe_locked(probe); 4966 4967 mutex_unlock(&ftrace_lock); 4968 4969 mutex_lock(&probe->ops.func_hash->regex_lock); 4970 4971 orig_hash = &probe->ops.func_hash->filter_hash; 4972 old_hash = *orig_hash; 4973 4974 if (ftrace_hash_empty(old_hash)) 4975 goto out_unlock; 4976 4977 old_hash_ops.filter_hash = old_hash; 4978 /* Probes only have filters */ 4979 old_hash_ops.notrace_hash = NULL; 4980 4981 ret = -ENOMEM; 4982 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4983 if (!hash) 4984 goto out_unlock; 4985 4986 INIT_HLIST_HEAD(&hhd); 4987 4988 size = 1 << hash->size_bits; 4989 for (i = 0; i < size; i++) { 4990 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { 4991 4992 if (func_g.search) { 4993 kallsyms_lookup(entry->ip, NULL, NULL, 4994 NULL, str); 4995 if (!ftrace_match(str, &func_g)) 4996 continue; 4997 } 4998 count++; 4999 remove_hash_entry(hash, entry); 5000 hlist_add_head(&entry->hlist, &hhd); 5001 } 5002 } 5003 5004 /* Nothing found? */ 5005 if (!count) { 5006 ret = -EINVAL; 5007 goto out_unlock; 5008 } 5009 5010 mutex_lock(&ftrace_lock); 5011 5012 WARN_ON(probe->ref < count); 5013 5014 probe->ref -= count; 5015 5016 if (ftrace_hash_empty(hash)) 5017 ftrace_shutdown(&probe->ops, 0); 5018 5019 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 5020 hash, 1); 5021 5022 /* still need to update the function call sites */ 5023 if (ftrace_enabled && !ftrace_hash_empty(hash)) 5024 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, 5025 &old_hash_ops); 5026 synchronize_rcu(); 5027 5028 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { 5029 hlist_del(&entry->hlist); 5030 if (probe_ops->free) 5031 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 5032 kfree(entry); 5033 } 5034 mutex_unlock(&ftrace_lock); 5035 5036 out_unlock: 5037 mutex_unlock(&probe->ops.func_hash->regex_lock); 5038 free_ftrace_hash(hash); 5039 5040 release_probe(probe); 5041 5042 return ret; 5043 5044 err_unlock_ftrace: 5045 mutex_unlock(&ftrace_lock); 5046 return ret; 5047 } 5048 5049 void clear_ftrace_function_probes(struct trace_array *tr) 5050 { 5051 struct ftrace_func_probe *probe, *n; 5052 5053 list_for_each_entry_safe(probe, n, &tr->func_probes, list) 5054 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); 5055 } 5056 5057 static LIST_HEAD(ftrace_commands); 5058 static DEFINE_MUTEX(ftrace_cmd_mutex); 5059 5060 /* 5061 * Currently we only register ftrace commands from __init, so mark this 5062 * __init too. 5063 */ 5064 __init int register_ftrace_command(struct ftrace_func_command *cmd) 5065 { 5066 struct ftrace_func_command *p; 5067 int ret = 0; 5068 5069 mutex_lock(&ftrace_cmd_mutex); 5070 list_for_each_entry(p, &ftrace_commands, list) { 5071 if (strcmp(cmd->name, p->name) == 0) { 5072 ret = -EBUSY; 5073 goto out_unlock; 5074 } 5075 } 5076 list_add(&cmd->list, &ftrace_commands); 5077 out_unlock: 5078 mutex_unlock(&ftrace_cmd_mutex); 5079 5080 return ret; 5081 } 5082 5083 /* 5084 * Currently we only unregister ftrace commands from __init, so mark 5085 * this __init too. 5086 */ 5087 __init int unregister_ftrace_command(struct ftrace_func_command *cmd) 5088 { 5089 struct ftrace_func_command *p, *n; 5090 int ret = -ENODEV; 5091 5092 mutex_lock(&ftrace_cmd_mutex); 5093 list_for_each_entry_safe(p, n, &ftrace_commands, list) { 5094 if (strcmp(cmd->name, p->name) == 0) { 5095 ret = 0; 5096 list_del_init(&p->list); 5097 goto out_unlock; 5098 } 5099 } 5100 out_unlock: 5101 mutex_unlock(&ftrace_cmd_mutex); 5102 5103 return ret; 5104 } 5105 5106 static int ftrace_process_regex(struct ftrace_iterator *iter, 5107 char *buff, int len, int enable) 5108 { 5109 struct ftrace_hash *hash = iter->hash; 5110 struct trace_array *tr = iter->ops->private; 5111 char *func, *command, *next = buff; 5112 struct ftrace_func_command *p; 5113 int ret = -EINVAL; 5114 5115 func = strsep(&next, ":"); 5116 5117 if (!next) { 5118 ret = ftrace_match_records(hash, func, len); 5119 if (!ret) 5120 ret = -EINVAL; 5121 if (ret < 0) 5122 return ret; 5123 return 0; 5124 } 5125 5126 /* command found */ 5127 5128 command = strsep(&next, ":"); 5129 5130 mutex_lock(&ftrace_cmd_mutex); 5131 list_for_each_entry(p, &ftrace_commands, list) { 5132 if (strcmp(p->name, command) == 0) { 5133 ret = p->func(tr, hash, func, command, next, enable); 5134 goto out_unlock; 5135 } 5136 } 5137 out_unlock: 5138 mutex_unlock(&ftrace_cmd_mutex); 5139 5140 return ret; 5141 } 5142 5143 static ssize_t 5144 ftrace_regex_write(struct file *file, const char __user *ubuf, 5145 size_t cnt, loff_t *ppos, int enable) 5146 { 5147 struct ftrace_iterator *iter; 5148 struct trace_parser *parser; 5149 ssize_t ret, read; 5150 5151 if (!cnt) 5152 return 0; 5153 5154 if (file->f_mode & FMODE_READ) { 5155 struct seq_file *m = file->private_data; 5156 iter = m->private; 5157 } else 5158 iter = file->private_data; 5159 5160 if (unlikely(ftrace_disabled)) 5161 return -ENODEV; 5162 5163 /* iter->hash is a local copy, so we don't need regex_lock */ 5164 5165 parser = &iter->parser; 5166 read = trace_get_user(parser, ubuf, cnt, ppos); 5167 5168 if (read >= 0 && trace_parser_loaded(parser) && 5169 !trace_parser_cont(parser)) { 5170 ret = ftrace_process_regex(iter, parser->buffer, 5171 parser->idx, enable); 5172 trace_parser_clear(parser); 5173 if (ret < 0) 5174 goto out; 5175 } 5176 5177 ret = read; 5178 out: 5179 return ret; 5180 } 5181 5182 ssize_t 5183 ftrace_filter_write(struct file *file, const char __user *ubuf, 5184 size_t cnt, loff_t *ppos) 5185 { 5186 return ftrace_regex_write(file, ubuf, cnt, ppos, 1); 5187 } 5188 5189 ssize_t 5190 ftrace_notrace_write(struct file *file, const char __user *ubuf, 5191 size_t cnt, loff_t *ppos) 5192 { 5193 return ftrace_regex_write(file, ubuf, cnt, ppos, 0); 5194 } 5195 5196 static int 5197 __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) 5198 { 5199 struct ftrace_func_entry *entry; 5200 5201 ip = ftrace_location(ip); 5202 if (!ip) 5203 return -EINVAL; 5204 5205 if (remove) { 5206 entry = ftrace_lookup_ip(hash, ip); 5207 if (!entry) 5208 return -ENOENT; 5209 free_hash_entry(hash, entry); 5210 return 0; 5211 } 5212 5213 return add_hash_entry(hash, ip); 5214 } 5215 5216 static int 5217 ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips, 5218 unsigned int cnt, int remove) 5219 { 5220 unsigned int i; 5221 int err; 5222 5223 for (i = 0; i < cnt; i++) { 5224 err = __ftrace_match_addr(hash, ips[i], remove); 5225 if (err) { 5226 /* 5227 * This expects the @hash is a temporary hash and if this 5228 * fails the caller must free the @hash. 5229 */ 5230 return err; 5231 } 5232 } 5233 return 0; 5234 } 5235 5236 static int 5237 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 5238 unsigned long *ips, unsigned int cnt, 5239 int remove, int reset, int enable) 5240 { 5241 struct ftrace_hash **orig_hash; 5242 struct ftrace_hash *hash; 5243 int ret; 5244 5245 if (unlikely(ftrace_disabled)) 5246 return -ENODEV; 5247 5248 mutex_lock(&ops->func_hash->regex_lock); 5249 5250 if (enable) 5251 orig_hash = &ops->func_hash->filter_hash; 5252 else 5253 orig_hash = &ops->func_hash->notrace_hash; 5254 5255 if (reset) 5256 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 5257 else 5258 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 5259 5260 if (!hash) { 5261 ret = -ENOMEM; 5262 goto out_regex_unlock; 5263 } 5264 5265 if (buf && !ftrace_match_records(hash, buf, len)) { 5266 ret = -EINVAL; 5267 goto out_regex_unlock; 5268 } 5269 if (ips) { 5270 ret = ftrace_match_addr(hash, ips, cnt, remove); 5271 if (ret < 0) 5272 goto out_regex_unlock; 5273 } 5274 5275 mutex_lock(&ftrace_lock); 5276 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); 5277 mutex_unlock(&ftrace_lock); 5278 5279 out_regex_unlock: 5280 mutex_unlock(&ops->func_hash->regex_lock); 5281 5282 free_ftrace_hash(hash); 5283 return ret; 5284 } 5285 5286 static int 5287 ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt, 5288 int remove, int reset, int enable) 5289 { 5290 return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable); 5291 } 5292 5293 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 5294 5295 struct ftrace_direct_func { 5296 struct list_head next; 5297 unsigned long addr; 5298 int count; 5299 }; 5300 5301 static LIST_HEAD(ftrace_direct_funcs); 5302 5303 /** 5304 * ftrace_find_direct_func - test an address if it is a registered direct caller 5305 * @addr: The address of a registered direct caller 5306 * 5307 * This searches to see if a ftrace direct caller has been registered 5308 * at a specific address, and if so, it returns a descriptor for it. 5309 * 5310 * This can be used by architecture code to see if an address is 5311 * a direct caller (trampoline) attached to a fentry/mcount location. 5312 * This is useful for the function_graph tracer, as it may need to 5313 * do adjustments if it traced a location that also has a direct 5314 * trampoline attached to it. 5315 */ 5316 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) 5317 { 5318 struct ftrace_direct_func *entry; 5319 bool found = false; 5320 5321 /* May be called by fgraph trampoline (protected by rcu tasks) */ 5322 list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) { 5323 if (entry->addr == addr) { 5324 found = true; 5325 break; 5326 } 5327 } 5328 if (found) 5329 return entry; 5330 5331 return NULL; 5332 } 5333 5334 static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr) 5335 { 5336 struct ftrace_direct_func *direct; 5337 5338 direct = kmalloc(sizeof(*direct), GFP_KERNEL); 5339 if (!direct) 5340 return NULL; 5341 direct->addr = addr; 5342 direct->count = 0; 5343 list_add_rcu(&direct->next, &ftrace_direct_funcs); 5344 ftrace_direct_func_count++; 5345 return direct; 5346 } 5347 5348 static int register_ftrace_function_nolock(struct ftrace_ops *ops); 5349 5350 /** 5351 * register_ftrace_direct - Call a custom trampoline directly 5352 * @ip: The address of the nop at the beginning of a function 5353 * @addr: The address of the trampoline to call at @ip 5354 * 5355 * This is used to connect a direct call from the nop location (@ip) 5356 * at the start of ftrace traced functions. The location that it calls 5357 * (@addr) must be able to handle a direct call, and save the parameters 5358 * of the function being traced, and restore them (or inject new ones 5359 * if needed), before returning. 5360 * 5361 * Returns: 5362 * 0 on success 5363 * -EBUSY - Another direct function is already attached (there can be only one) 5364 * -ENODEV - @ip does not point to a ftrace nop location (or not supported) 5365 * -ENOMEM - There was an allocation failure. 5366 */ 5367 int register_ftrace_direct(unsigned long ip, unsigned long addr) 5368 { 5369 struct ftrace_direct_func *direct; 5370 struct ftrace_func_entry *entry; 5371 struct ftrace_hash *free_hash = NULL; 5372 struct dyn_ftrace *rec; 5373 int ret = -ENODEV; 5374 5375 mutex_lock(&direct_mutex); 5376 5377 ip = ftrace_location(ip); 5378 if (!ip) 5379 goto out_unlock; 5380 5381 /* See if there's a direct function at @ip already */ 5382 ret = -EBUSY; 5383 if (ftrace_find_rec_direct(ip)) 5384 goto out_unlock; 5385 5386 ret = -ENODEV; 5387 rec = lookup_rec(ip, ip); 5388 if (!rec) 5389 goto out_unlock; 5390 5391 /* 5392 * Check if the rec says it has a direct call but we didn't 5393 * find one earlier? 5394 */ 5395 if (WARN_ON(rec->flags & FTRACE_FL_DIRECT)) 5396 goto out_unlock; 5397 5398 /* Make sure the ip points to the exact record */ 5399 if (ip != rec->ip) { 5400 ip = rec->ip; 5401 /* Need to check this ip for a direct. */ 5402 if (ftrace_find_rec_direct(ip)) 5403 goto out_unlock; 5404 } 5405 5406 ret = -ENOMEM; 5407 direct = ftrace_find_direct_func(addr); 5408 if (!direct) { 5409 direct = ftrace_alloc_direct_func(addr); 5410 if (!direct) 5411 goto out_unlock; 5412 } 5413 5414 entry = ftrace_add_rec_direct(ip, addr, &free_hash); 5415 if (!entry) 5416 goto out_unlock; 5417 5418 ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0); 5419 5420 if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) { 5421 ret = register_ftrace_function_nolock(&direct_ops); 5422 if (ret) 5423 ftrace_set_filter_ip(&direct_ops, ip, 1, 0); 5424 } 5425 5426 if (ret) { 5427 remove_hash_entry(direct_functions, entry); 5428 kfree(entry); 5429 if (!direct->count) { 5430 list_del_rcu(&direct->next); 5431 synchronize_rcu_tasks(); 5432 kfree(direct); 5433 if (free_hash) 5434 free_ftrace_hash(free_hash); 5435 free_hash = NULL; 5436 ftrace_direct_func_count--; 5437 } 5438 } else { 5439 direct->count++; 5440 } 5441 out_unlock: 5442 mutex_unlock(&direct_mutex); 5443 5444 if (free_hash) { 5445 synchronize_rcu_tasks(); 5446 free_ftrace_hash(free_hash); 5447 } 5448 5449 return ret; 5450 } 5451 EXPORT_SYMBOL_GPL(register_ftrace_direct); 5452 5453 static struct ftrace_func_entry *find_direct_entry(unsigned long *ip, 5454 struct dyn_ftrace **recp) 5455 { 5456 struct ftrace_func_entry *entry; 5457 struct dyn_ftrace *rec; 5458 5459 rec = lookup_rec(*ip, *ip); 5460 if (!rec) 5461 return NULL; 5462 5463 entry = __ftrace_lookup_ip(direct_functions, rec->ip); 5464 if (!entry) { 5465 WARN_ON(rec->flags & FTRACE_FL_DIRECT); 5466 return NULL; 5467 } 5468 5469 WARN_ON(!(rec->flags & FTRACE_FL_DIRECT)); 5470 5471 /* Passed in ip just needs to be on the call site */ 5472 *ip = rec->ip; 5473 5474 if (recp) 5475 *recp = rec; 5476 5477 return entry; 5478 } 5479 5480 int unregister_ftrace_direct(unsigned long ip, unsigned long addr) 5481 { 5482 struct ftrace_direct_func *direct; 5483 struct ftrace_func_entry *entry; 5484 struct ftrace_hash *hash; 5485 int ret = -ENODEV; 5486 5487 mutex_lock(&direct_mutex); 5488 5489 ip = ftrace_location(ip); 5490 if (!ip) 5491 goto out_unlock; 5492 5493 entry = find_direct_entry(&ip, NULL); 5494 if (!entry) 5495 goto out_unlock; 5496 5497 hash = direct_ops.func_hash->filter_hash; 5498 if (hash->count == 1) 5499 unregister_ftrace_function(&direct_ops); 5500 5501 ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0); 5502 5503 WARN_ON(ret); 5504 5505 remove_hash_entry(direct_functions, entry); 5506 5507 direct = ftrace_find_direct_func(addr); 5508 if (!WARN_ON(!direct)) { 5509 /* This is the good path (see the ! before WARN) */ 5510 direct->count--; 5511 WARN_ON(direct->count < 0); 5512 if (!direct->count) { 5513 list_del_rcu(&direct->next); 5514 synchronize_rcu_tasks(); 5515 kfree(direct); 5516 kfree(entry); 5517 ftrace_direct_func_count--; 5518 } 5519 } 5520 out_unlock: 5521 mutex_unlock(&direct_mutex); 5522 5523 return ret; 5524 } 5525 EXPORT_SYMBOL_GPL(unregister_ftrace_direct); 5526 5527 static struct ftrace_ops stub_ops = { 5528 .func = ftrace_stub, 5529 }; 5530 5531 /** 5532 * ftrace_modify_direct_caller - modify ftrace nop directly 5533 * @entry: The ftrace hash entry of the direct helper for @rec 5534 * @rec: The record representing the function site to patch 5535 * @old_addr: The location that the site at @rec->ip currently calls 5536 * @new_addr: The location that the site at @rec->ip should call 5537 * 5538 * An architecture may overwrite this function to optimize the 5539 * changing of the direct callback on an ftrace nop location. 5540 * This is called with the ftrace_lock mutex held, and no other 5541 * ftrace callbacks are on the associated record (@rec). Thus, 5542 * it is safe to modify the ftrace record, where it should be 5543 * currently calling @old_addr directly, to call @new_addr. 5544 * 5545 * This is called with direct_mutex locked. 5546 * 5547 * Safety checks should be made to make sure that the code at 5548 * @rec->ip is currently calling @old_addr. And this must 5549 * also update entry->direct to @new_addr. 5550 */ 5551 int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 5552 struct dyn_ftrace *rec, 5553 unsigned long old_addr, 5554 unsigned long new_addr) 5555 { 5556 unsigned long ip = rec->ip; 5557 int ret; 5558 5559 lockdep_assert_held(&direct_mutex); 5560 5561 /* 5562 * The ftrace_lock was used to determine if the record 5563 * had more than one registered user to it. If it did, 5564 * we needed to prevent that from changing to do the quick 5565 * switch. But if it did not (only a direct caller was attached) 5566 * then this function is called. But this function can deal 5567 * with attached callers to the rec that we care about, and 5568 * since this function uses standard ftrace calls that take 5569 * the ftrace_lock mutex, we need to release it. 5570 */ 5571 mutex_unlock(&ftrace_lock); 5572 5573 /* 5574 * By setting a stub function at the same address, we force 5575 * the code to call the iterator and the direct_ops helper. 5576 * This means that @ip does not call the direct call, and 5577 * we can simply modify it. 5578 */ 5579 ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0); 5580 if (ret) 5581 goto out_lock; 5582 5583 ret = register_ftrace_function_nolock(&stub_ops); 5584 if (ret) { 5585 ftrace_set_filter_ip(&stub_ops, ip, 1, 0); 5586 goto out_lock; 5587 } 5588 5589 entry->direct = new_addr; 5590 5591 /* 5592 * By removing the stub, we put back the direct call, calling 5593 * the @new_addr. 5594 */ 5595 unregister_ftrace_function(&stub_ops); 5596 ftrace_set_filter_ip(&stub_ops, ip, 1, 0); 5597 5598 out_lock: 5599 mutex_lock(&ftrace_lock); 5600 5601 return ret; 5602 } 5603 5604 /** 5605 * modify_ftrace_direct - Modify an existing direct call to call something else 5606 * @ip: The instruction pointer to modify 5607 * @old_addr: The address that the current @ip calls directly 5608 * @new_addr: The address that the @ip should call 5609 * 5610 * This modifies a ftrace direct caller at an instruction pointer without 5611 * having to disable it first. The direct call will switch over to the 5612 * @new_addr without missing anything. 5613 * 5614 * Returns: zero on success. Non zero on error, which includes: 5615 * -ENODEV : the @ip given has no direct caller attached 5616 * -EINVAL : the @old_addr does not match the current direct caller 5617 */ 5618 int modify_ftrace_direct(unsigned long ip, 5619 unsigned long old_addr, unsigned long new_addr) 5620 { 5621 struct ftrace_direct_func *direct, *new_direct = NULL; 5622 struct ftrace_func_entry *entry; 5623 struct dyn_ftrace *rec; 5624 int ret = -ENODEV; 5625 5626 mutex_lock(&direct_mutex); 5627 5628 mutex_lock(&ftrace_lock); 5629 5630 ip = ftrace_location(ip); 5631 if (!ip) 5632 goto out_unlock; 5633 5634 entry = find_direct_entry(&ip, &rec); 5635 if (!entry) 5636 goto out_unlock; 5637 5638 ret = -EINVAL; 5639 if (entry->direct != old_addr) 5640 goto out_unlock; 5641 5642 direct = ftrace_find_direct_func(old_addr); 5643 if (WARN_ON(!direct)) 5644 goto out_unlock; 5645 if (direct->count > 1) { 5646 ret = -ENOMEM; 5647 new_direct = ftrace_alloc_direct_func(new_addr); 5648 if (!new_direct) 5649 goto out_unlock; 5650 direct->count--; 5651 new_direct->count++; 5652 } else { 5653 direct->addr = new_addr; 5654 } 5655 5656 /* 5657 * If there's no other ftrace callback on the rec->ip location, 5658 * then it can be changed directly by the architecture. 5659 * If there is another caller, then we just need to change the 5660 * direct caller helper to point to @new_addr. 5661 */ 5662 if (ftrace_rec_count(rec) == 1) { 5663 ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr); 5664 } else { 5665 entry->direct = new_addr; 5666 ret = 0; 5667 } 5668 5669 if (unlikely(ret && new_direct)) { 5670 direct->count++; 5671 list_del_rcu(&new_direct->next); 5672 synchronize_rcu_tasks(); 5673 kfree(new_direct); 5674 ftrace_direct_func_count--; 5675 } 5676 5677 out_unlock: 5678 mutex_unlock(&ftrace_lock); 5679 mutex_unlock(&direct_mutex); 5680 return ret; 5681 } 5682 EXPORT_SYMBOL_GPL(modify_ftrace_direct); 5683 5684 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS) 5685 5686 static int check_direct_multi(struct ftrace_ops *ops) 5687 { 5688 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) 5689 return -EINVAL; 5690 if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS) 5691 return -EINVAL; 5692 return 0; 5693 } 5694 5695 static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr) 5696 { 5697 struct ftrace_func_entry *entry, *del; 5698 int size, i; 5699 5700 size = 1 << hash->size_bits; 5701 for (i = 0; i < size; i++) { 5702 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 5703 del = __ftrace_lookup_ip(direct_functions, entry->ip); 5704 if (del && del->direct == addr) { 5705 remove_hash_entry(direct_functions, del); 5706 kfree(del); 5707 } 5708 } 5709 } 5710 } 5711 5712 /** 5713 * register_ftrace_direct_multi - Call a custom trampoline directly 5714 * for multiple functions registered in @ops 5715 * @ops: The address of the struct ftrace_ops object 5716 * @addr: The address of the trampoline to call at @ops functions 5717 * 5718 * This is used to connect a direct calls to @addr from the nop locations 5719 * of the functions registered in @ops (with by ftrace_set_filter_ip 5720 * function). 5721 * 5722 * The location that it calls (@addr) must be able to handle a direct call, 5723 * and save the parameters of the function being traced, and restore them 5724 * (or inject new ones if needed), before returning. 5725 * 5726 * Returns: 5727 * 0 on success 5728 * -EINVAL - The @ops object was already registered with this call or 5729 * when there are no functions in @ops object. 5730 * -EBUSY - Another direct function is already attached (there can be only one) 5731 * -ENODEV - @ip does not point to a ftrace nop location (or not supported) 5732 * -ENOMEM - There was an allocation failure. 5733 */ 5734 int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5735 { 5736 struct ftrace_hash *hash, *free_hash = NULL; 5737 struct ftrace_func_entry *entry, *new; 5738 int err = -EBUSY, size, i; 5739 5740 if (ops->func || ops->trampoline) 5741 return -EINVAL; 5742 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) 5743 return -EINVAL; 5744 if (ops->flags & FTRACE_OPS_FL_ENABLED) 5745 return -EINVAL; 5746 5747 hash = ops->func_hash->filter_hash; 5748 if (ftrace_hash_empty(hash)) 5749 return -EINVAL; 5750 5751 mutex_lock(&direct_mutex); 5752 5753 /* Make sure requested entries are not already registered.. */ 5754 size = 1 << hash->size_bits; 5755 for (i = 0; i < size; i++) { 5756 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 5757 if (ftrace_find_rec_direct(entry->ip)) 5758 goto out_unlock; 5759 } 5760 } 5761 5762 /* ... and insert them to direct_functions hash. */ 5763 err = -ENOMEM; 5764 for (i = 0; i < size; i++) { 5765 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 5766 new = ftrace_add_rec_direct(entry->ip, addr, &free_hash); 5767 if (!new) 5768 goto out_remove; 5769 entry->direct = addr; 5770 } 5771 } 5772 5773 ops->func = call_direct_funcs; 5774 ops->flags = MULTI_FLAGS; 5775 ops->trampoline = FTRACE_REGS_ADDR; 5776 5777 err = register_ftrace_function_nolock(ops); 5778 5779 out_remove: 5780 if (err) 5781 remove_direct_functions_hash(hash, addr); 5782 5783 out_unlock: 5784 mutex_unlock(&direct_mutex); 5785 5786 if (free_hash) { 5787 synchronize_rcu_tasks(); 5788 free_ftrace_hash(free_hash); 5789 } 5790 return err; 5791 } 5792 EXPORT_SYMBOL_GPL(register_ftrace_direct_multi); 5793 5794 /** 5795 * unregister_ftrace_direct_multi - Remove calls to custom trampoline 5796 * previously registered by register_ftrace_direct_multi for @ops object. 5797 * @ops: The address of the struct ftrace_ops object 5798 * 5799 * This is used to remove a direct calls to @addr from the nop locations 5800 * of the functions registered in @ops (with by ftrace_set_filter_ip 5801 * function). 5802 * 5803 * Returns: 5804 * 0 on success 5805 * -EINVAL - The @ops object was not properly registered. 5806 */ 5807 int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5808 { 5809 struct ftrace_hash *hash = ops->func_hash->filter_hash; 5810 int err; 5811 5812 if (check_direct_multi(ops)) 5813 return -EINVAL; 5814 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 5815 return -EINVAL; 5816 5817 mutex_lock(&direct_mutex); 5818 err = unregister_ftrace_function(ops); 5819 remove_direct_functions_hash(hash, addr); 5820 mutex_unlock(&direct_mutex); 5821 5822 /* cleanup for possible another register call */ 5823 ops->func = NULL; 5824 ops->trampoline = 0; 5825 return err; 5826 } 5827 EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi); 5828 5829 static int 5830 __modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5831 { 5832 struct ftrace_hash *hash; 5833 struct ftrace_func_entry *entry, *iter; 5834 static struct ftrace_ops tmp_ops = { 5835 .func = ftrace_stub, 5836 .flags = FTRACE_OPS_FL_STUB, 5837 }; 5838 int i, size; 5839 int err; 5840 5841 lockdep_assert_held_once(&direct_mutex); 5842 5843 /* Enable the tmp_ops to have the same functions as the direct ops */ 5844 ftrace_ops_init(&tmp_ops); 5845 tmp_ops.func_hash = ops->func_hash; 5846 5847 err = register_ftrace_function_nolock(&tmp_ops); 5848 if (err) 5849 return err; 5850 5851 /* 5852 * Now the ftrace_ops_list_func() is called to do the direct callers. 5853 * We can safely change the direct functions attached to each entry. 5854 */ 5855 mutex_lock(&ftrace_lock); 5856 5857 hash = ops->func_hash->filter_hash; 5858 size = 1 << hash->size_bits; 5859 for (i = 0; i < size; i++) { 5860 hlist_for_each_entry(iter, &hash->buckets[i], hlist) { 5861 entry = __ftrace_lookup_ip(direct_functions, iter->ip); 5862 if (!entry) 5863 continue; 5864 entry->direct = addr; 5865 } 5866 } 5867 5868 mutex_unlock(&ftrace_lock); 5869 5870 /* Removing the tmp_ops will add the updated direct callers to the functions */ 5871 unregister_ftrace_function(&tmp_ops); 5872 5873 return err; 5874 } 5875 5876 /** 5877 * modify_ftrace_direct_multi_nolock - Modify an existing direct 'multi' call 5878 * to call something else 5879 * @ops: The address of the struct ftrace_ops object 5880 * @addr: The address of the new trampoline to call at @ops functions 5881 * 5882 * This is used to unregister currently registered direct caller and 5883 * register new one @addr on functions registered in @ops object. 5884 * 5885 * Note there's window between ftrace_shutdown and ftrace_startup calls 5886 * where there will be no callbacks called. 5887 * 5888 * Caller should already have direct_mutex locked, so we don't lock 5889 * direct_mutex here. 5890 * 5891 * Returns: zero on success. Non zero on error, which includes: 5892 * -EINVAL - The @ops object was not properly registered. 5893 */ 5894 int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr) 5895 { 5896 if (check_direct_multi(ops)) 5897 return -EINVAL; 5898 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 5899 return -EINVAL; 5900 5901 return __modify_ftrace_direct_multi(ops, addr); 5902 } 5903 EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi_nolock); 5904 5905 /** 5906 * modify_ftrace_direct_multi - Modify an existing direct 'multi' call 5907 * to call something else 5908 * @ops: The address of the struct ftrace_ops object 5909 * @addr: The address of the new trampoline to call at @ops functions 5910 * 5911 * This is used to unregister currently registered direct caller and 5912 * register new one @addr on functions registered in @ops object. 5913 * 5914 * Note there's window between ftrace_shutdown and ftrace_startup calls 5915 * where there will be no callbacks called. 5916 * 5917 * Returns: zero on success. Non zero on error, which includes: 5918 * -EINVAL - The @ops object was not properly registered. 5919 */ 5920 int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5921 { 5922 int err; 5923 5924 if (check_direct_multi(ops)) 5925 return -EINVAL; 5926 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 5927 return -EINVAL; 5928 5929 mutex_lock(&direct_mutex); 5930 err = __modify_ftrace_direct_multi(ops, addr); 5931 mutex_unlock(&direct_mutex); 5932 return err; 5933 } 5934 EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi); 5935 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 5936 5937 /** 5938 * ftrace_set_filter_ip - set a function to filter on in ftrace by address 5939 * @ops - the ops to set the filter with 5940 * @ip - the address to add to or remove from the filter. 5941 * @remove - non zero to remove the ip from the filter 5942 * @reset - non zero to reset all filters before applying this filter. 5943 * 5944 * Filters denote which functions should be enabled when tracing is enabled 5945 * If @ip is NULL, it fails to update filter. 5946 * 5947 * This can allocate memory which must be freed before @ops can be freed, 5948 * either by removing each filtered addr or by using 5949 * ftrace_free_filter(@ops). 5950 */ 5951 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 5952 int remove, int reset) 5953 { 5954 ftrace_ops_init(ops); 5955 return ftrace_set_addr(ops, &ip, 1, remove, reset, 1); 5956 } 5957 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); 5958 5959 /** 5960 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses 5961 * @ops - the ops to set the filter with 5962 * @ips - the array of addresses to add to or remove from the filter. 5963 * @cnt - the number of addresses in @ips 5964 * @remove - non zero to remove ips from the filter 5965 * @reset - non zero to reset all filters before applying this filter. 5966 * 5967 * Filters denote which functions should be enabled when tracing is enabled 5968 * If @ips array or any ip specified within is NULL , it fails to update filter. 5969 * 5970 * This can allocate memory which must be freed before @ops can be freed, 5971 * either by removing each filtered addr or by using 5972 * ftrace_free_filter(@ops). 5973 */ 5974 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, 5975 unsigned int cnt, int remove, int reset) 5976 { 5977 ftrace_ops_init(ops); 5978 return ftrace_set_addr(ops, ips, cnt, remove, reset, 1); 5979 } 5980 EXPORT_SYMBOL_GPL(ftrace_set_filter_ips); 5981 5982 /** 5983 * ftrace_ops_set_global_filter - setup ops to use global filters 5984 * @ops - the ops which will use the global filters 5985 * 5986 * ftrace users who need global function trace filtering should call this. 5987 * It can set the global filter only if ops were not initialized before. 5988 */ 5989 void ftrace_ops_set_global_filter(struct ftrace_ops *ops) 5990 { 5991 if (ops->flags & FTRACE_OPS_FL_INITIALIZED) 5992 return; 5993 5994 ftrace_ops_init(ops); 5995 ops->func_hash = &global_ops.local_hash; 5996 } 5997 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter); 5998 5999 static int 6000 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 6001 int reset, int enable) 6002 { 6003 return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable); 6004 } 6005 6006 /** 6007 * ftrace_set_filter - set a function to filter on in ftrace 6008 * @ops - the ops to set the filter with 6009 * @buf - the string that holds the function filter text. 6010 * @len - the length of the string. 6011 * @reset - non zero to reset all filters before applying this filter. 6012 * 6013 * Filters denote which functions should be enabled when tracing is enabled. 6014 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 6015 * 6016 * This can allocate memory which must be freed before @ops can be freed, 6017 * either by removing each filtered addr or by using 6018 * ftrace_free_filter(@ops). 6019 */ 6020 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 6021 int len, int reset) 6022 { 6023 ftrace_ops_init(ops); 6024 return ftrace_set_regex(ops, buf, len, reset, 1); 6025 } 6026 EXPORT_SYMBOL_GPL(ftrace_set_filter); 6027 6028 /** 6029 * ftrace_set_notrace - set a function to not trace in ftrace 6030 * @ops - the ops to set the notrace filter with 6031 * @buf - the string that holds the function notrace text. 6032 * @len - the length of the string. 6033 * @reset - non zero to reset all filters before applying this filter. 6034 * 6035 * Notrace Filters denote which functions should not be enabled when tracing 6036 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 6037 * for tracing. 6038 * 6039 * This can allocate memory which must be freed before @ops can be freed, 6040 * either by removing each filtered addr or by using 6041 * ftrace_free_filter(@ops). 6042 */ 6043 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 6044 int len, int reset) 6045 { 6046 ftrace_ops_init(ops); 6047 return ftrace_set_regex(ops, buf, len, reset, 0); 6048 } 6049 EXPORT_SYMBOL_GPL(ftrace_set_notrace); 6050 /** 6051 * ftrace_set_global_filter - set a function to filter on with global tracers 6052 * @buf - the string that holds the function filter text. 6053 * @len - the length of the string. 6054 * @reset - non zero to reset all filters before applying this filter. 6055 * 6056 * Filters denote which functions should be enabled when tracing is enabled. 6057 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 6058 */ 6059 void ftrace_set_global_filter(unsigned char *buf, int len, int reset) 6060 { 6061 ftrace_set_regex(&global_ops, buf, len, reset, 1); 6062 } 6063 EXPORT_SYMBOL_GPL(ftrace_set_global_filter); 6064 6065 /** 6066 * ftrace_set_global_notrace - set a function to not trace with global tracers 6067 * @buf - the string that holds the function notrace text. 6068 * @len - the length of the string. 6069 * @reset - non zero to reset all filters before applying this filter. 6070 * 6071 * Notrace Filters denote which functions should not be enabled when tracing 6072 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 6073 * for tracing. 6074 */ 6075 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) 6076 { 6077 ftrace_set_regex(&global_ops, buf, len, reset, 0); 6078 } 6079 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); 6080 6081 /* 6082 * command line interface to allow users to set filters on boot up. 6083 */ 6084 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE 6085 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 6086 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 6087 6088 /* Used by function selftest to not test if filter is set */ 6089 bool ftrace_filter_param __initdata; 6090 6091 static int __init set_ftrace_notrace(char *str) 6092 { 6093 ftrace_filter_param = true; 6094 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 6095 return 1; 6096 } 6097 __setup("ftrace_notrace=", set_ftrace_notrace); 6098 6099 static int __init set_ftrace_filter(char *str) 6100 { 6101 ftrace_filter_param = true; 6102 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 6103 return 1; 6104 } 6105 __setup("ftrace_filter=", set_ftrace_filter); 6106 6107 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6108 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 6109 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 6110 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); 6111 6112 static int __init set_graph_function(char *str) 6113 { 6114 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 6115 return 1; 6116 } 6117 __setup("ftrace_graph_filter=", set_graph_function); 6118 6119 static int __init set_graph_notrace_function(char *str) 6120 { 6121 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); 6122 return 1; 6123 } 6124 __setup("ftrace_graph_notrace=", set_graph_notrace_function); 6125 6126 static int __init set_graph_max_depth_function(char *str) 6127 { 6128 if (!str) 6129 return 0; 6130 fgraph_max_depth = simple_strtoul(str, NULL, 0); 6131 return 1; 6132 } 6133 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function); 6134 6135 static void __init set_ftrace_early_graph(char *buf, int enable) 6136 { 6137 int ret; 6138 char *func; 6139 struct ftrace_hash *hash; 6140 6141 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 6142 if (MEM_FAIL(!hash, "Failed to allocate hash\n")) 6143 return; 6144 6145 while (buf) { 6146 func = strsep(&buf, ","); 6147 /* we allow only one expression at a time */ 6148 ret = ftrace_graph_set_hash(hash, func); 6149 if (ret) 6150 printk(KERN_DEBUG "ftrace: function %s not " 6151 "traceable\n", func); 6152 } 6153 6154 if (enable) 6155 ftrace_graph_hash = hash; 6156 else 6157 ftrace_graph_notrace_hash = hash; 6158 } 6159 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6160 6161 void __init 6162 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) 6163 { 6164 char *func; 6165 6166 ftrace_ops_init(ops); 6167 6168 while (buf) { 6169 func = strsep(&buf, ","); 6170 ftrace_set_regex(ops, func, strlen(func), 0, enable); 6171 } 6172 } 6173 6174 static void __init set_ftrace_early_filters(void) 6175 { 6176 if (ftrace_filter_buf[0]) 6177 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); 6178 if (ftrace_notrace_buf[0]) 6179 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); 6180 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6181 if (ftrace_graph_buf[0]) 6182 set_ftrace_early_graph(ftrace_graph_buf, 1); 6183 if (ftrace_graph_notrace_buf[0]) 6184 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); 6185 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6186 } 6187 6188 int ftrace_regex_release(struct inode *inode, struct file *file) 6189 { 6190 struct seq_file *m = (struct seq_file *)file->private_data; 6191 struct ftrace_iterator *iter; 6192 struct ftrace_hash **orig_hash; 6193 struct trace_parser *parser; 6194 int filter_hash; 6195 6196 if (file->f_mode & FMODE_READ) { 6197 iter = m->private; 6198 seq_release(inode, file); 6199 } else 6200 iter = file->private_data; 6201 6202 parser = &iter->parser; 6203 if (trace_parser_loaded(parser)) { 6204 int enable = !(iter->flags & FTRACE_ITER_NOTRACE); 6205 6206 ftrace_process_regex(iter, parser->buffer, 6207 parser->idx, enable); 6208 } 6209 6210 trace_parser_put(parser); 6211 6212 mutex_lock(&iter->ops->func_hash->regex_lock); 6213 6214 if (file->f_mode & FMODE_WRITE) { 6215 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 6216 6217 if (filter_hash) { 6218 orig_hash = &iter->ops->func_hash->filter_hash; 6219 if (iter->tr) { 6220 if (list_empty(&iter->tr->mod_trace)) 6221 iter->hash->flags &= ~FTRACE_HASH_FL_MOD; 6222 else 6223 iter->hash->flags |= FTRACE_HASH_FL_MOD; 6224 } 6225 } else 6226 orig_hash = &iter->ops->func_hash->notrace_hash; 6227 6228 mutex_lock(&ftrace_lock); 6229 ftrace_hash_move_and_update_ops(iter->ops, orig_hash, 6230 iter->hash, filter_hash); 6231 mutex_unlock(&ftrace_lock); 6232 } else { 6233 /* For read only, the hash is the ops hash */ 6234 iter->hash = NULL; 6235 } 6236 6237 mutex_unlock(&iter->ops->func_hash->regex_lock); 6238 free_ftrace_hash(iter->hash); 6239 if (iter->tr) 6240 trace_array_put(iter->tr); 6241 kfree(iter); 6242 6243 return 0; 6244 } 6245 6246 static const struct file_operations ftrace_avail_fops = { 6247 .open = ftrace_avail_open, 6248 .read = seq_read, 6249 .llseek = seq_lseek, 6250 .release = seq_release_private, 6251 }; 6252 6253 static const struct file_operations ftrace_enabled_fops = { 6254 .open = ftrace_enabled_open, 6255 .read = seq_read, 6256 .llseek = seq_lseek, 6257 .release = seq_release_private, 6258 }; 6259 6260 static const struct file_operations ftrace_filter_fops = { 6261 .open = ftrace_filter_open, 6262 .read = seq_read, 6263 .write = ftrace_filter_write, 6264 .llseek = tracing_lseek, 6265 .release = ftrace_regex_release, 6266 }; 6267 6268 static const struct file_operations ftrace_notrace_fops = { 6269 .open = ftrace_notrace_open, 6270 .read = seq_read, 6271 .write = ftrace_notrace_write, 6272 .llseek = tracing_lseek, 6273 .release = ftrace_regex_release, 6274 }; 6275 6276 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6277 6278 static DEFINE_MUTEX(graph_lock); 6279 6280 struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH; 6281 struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH; 6282 6283 enum graph_filter_type { 6284 GRAPH_FILTER_NOTRACE = 0, 6285 GRAPH_FILTER_FUNCTION, 6286 }; 6287 6288 #define FTRACE_GRAPH_EMPTY ((void *)1) 6289 6290 struct ftrace_graph_data { 6291 struct ftrace_hash *hash; 6292 struct ftrace_func_entry *entry; 6293 int idx; /* for hash table iteration */ 6294 enum graph_filter_type type; 6295 struct ftrace_hash *new_hash; 6296 const struct seq_operations *seq_ops; 6297 struct trace_parser parser; 6298 }; 6299 6300 static void * 6301 __g_next(struct seq_file *m, loff_t *pos) 6302 { 6303 struct ftrace_graph_data *fgd = m->private; 6304 struct ftrace_func_entry *entry = fgd->entry; 6305 struct hlist_head *head; 6306 int i, idx = fgd->idx; 6307 6308 if (*pos >= fgd->hash->count) 6309 return NULL; 6310 6311 if (entry) { 6312 hlist_for_each_entry_continue(entry, hlist) { 6313 fgd->entry = entry; 6314 return entry; 6315 } 6316 6317 idx++; 6318 } 6319 6320 for (i = idx; i < 1 << fgd->hash->size_bits; i++) { 6321 head = &fgd->hash->buckets[i]; 6322 hlist_for_each_entry(entry, head, hlist) { 6323 fgd->entry = entry; 6324 fgd->idx = i; 6325 return entry; 6326 } 6327 } 6328 return NULL; 6329 } 6330 6331 static void * 6332 g_next(struct seq_file *m, void *v, loff_t *pos) 6333 { 6334 (*pos)++; 6335 return __g_next(m, pos); 6336 } 6337 6338 static void *g_start(struct seq_file *m, loff_t *pos) 6339 { 6340 struct ftrace_graph_data *fgd = m->private; 6341 6342 mutex_lock(&graph_lock); 6343 6344 if (fgd->type == GRAPH_FILTER_FUNCTION) 6345 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 6346 lockdep_is_held(&graph_lock)); 6347 else 6348 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 6349 lockdep_is_held(&graph_lock)); 6350 6351 /* Nothing, tell g_show to print all functions are enabled */ 6352 if (ftrace_hash_empty(fgd->hash) && !*pos) 6353 return FTRACE_GRAPH_EMPTY; 6354 6355 fgd->idx = 0; 6356 fgd->entry = NULL; 6357 return __g_next(m, pos); 6358 } 6359 6360 static void g_stop(struct seq_file *m, void *p) 6361 { 6362 mutex_unlock(&graph_lock); 6363 } 6364 6365 static int g_show(struct seq_file *m, void *v) 6366 { 6367 struct ftrace_func_entry *entry = v; 6368 6369 if (!entry) 6370 return 0; 6371 6372 if (entry == FTRACE_GRAPH_EMPTY) { 6373 struct ftrace_graph_data *fgd = m->private; 6374 6375 if (fgd->type == GRAPH_FILTER_FUNCTION) 6376 seq_puts(m, "#### all functions enabled ####\n"); 6377 else 6378 seq_puts(m, "#### no functions disabled ####\n"); 6379 return 0; 6380 } 6381 6382 seq_printf(m, "%ps\n", (void *)entry->ip); 6383 6384 return 0; 6385 } 6386 6387 static const struct seq_operations ftrace_graph_seq_ops = { 6388 .start = g_start, 6389 .next = g_next, 6390 .stop = g_stop, 6391 .show = g_show, 6392 }; 6393 6394 static int 6395 __ftrace_graph_open(struct inode *inode, struct file *file, 6396 struct ftrace_graph_data *fgd) 6397 { 6398 int ret; 6399 struct ftrace_hash *new_hash = NULL; 6400 6401 ret = security_locked_down(LOCKDOWN_TRACEFS); 6402 if (ret) 6403 return ret; 6404 6405 if (file->f_mode & FMODE_WRITE) { 6406 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 6407 6408 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) 6409 return -ENOMEM; 6410 6411 if (file->f_flags & O_TRUNC) 6412 new_hash = alloc_ftrace_hash(size_bits); 6413 else 6414 new_hash = alloc_and_copy_ftrace_hash(size_bits, 6415 fgd->hash); 6416 if (!new_hash) { 6417 ret = -ENOMEM; 6418 goto out; 6419 } 6420 } 6421 6422 if (file->f_mode & FMODE_READ) { 6423 ret = seq_open(file, &ftrace_graph_seq_ops); 6424 if (!ret) { 6425 struct seq_file *m = file->private_data; 6426 m->private = fgd; 6427 } else { 6428 /* Failed */ 6429 free_ftrace_hash(new_hash); 6430 new_hash = NULL; 6431 } 6432 } else 6433 file->private_data = fgd; 6434 6435 out: 6436 if (ret < 0 && file->f_mode & FMODE_WRITE) 6437 trace_parser_put(&fgd->parser); 6438 6439 fgd->new_hash = new_hash; 6440 6441 /* 6442 * All uses of fgd->hash must be taken with the graph_lock 6443 * held. The graph_lock is going to be released, so force 6444 * fgd->hash to be reinitialized when it is taken again. 6445 */ 6446 fgd->hash = NULL; 6447 6448 return ret; 6449 } 6450 6451 static int 6452 ftrace_graph_open(struct inode *inode, struct file *file) 6453 { 6454 struct ftrace_graph_data *fgd; 6455 int ret; 6456 6457 if (unlikely(ftrace_disabled)) 6458 return -ENODEV; 6459 6460 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 6461 if (fgd == NULL) 6462 return -ENOMEM; 6463 6464 mutex_lock(&graph_lock); 6465 6466 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 6467 lockdep_is_held(&graph_lock)); 6468 fgd->type = GRAPH_FILTER_FUNCTION; 6469 fgd->seq_ops = &ftrace_graph_seq_ops; 6470 6471 ret = __ftrace_graph_open(inode, file, fgd); 6472 if (ret < 0) 6473 kfree(fgd); 6474 6475 mutex_unlock(&graph_lock); 6476 return ret; 6477 } 6478 6479 static int 6480 ftrace_graph_notrace_open(struct inode *inode, struct file *file) 6481 { 6482 struct ftrace_graph_data *fgd; 6483 int ret; 6484 6485 if (unlikely(ftrace_disabled)) 6486 return -ENODEV; 6487 6488 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 6489 if (fgd == NULL) 6490 return -ENOMEM; 6491 6492 mutex_lock(&graph_lock); 6493 6494 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 6495 lockdep_is_held(&graph_lock)); 6496 fgd->type = GRAPH_FILTER_NOTRACE; 6497 fgd->seq_ops = &ftrace_graph_seq_ops; 6498 6499 ret = __ftrace_graph_open(inode, file, fgd); 6500 if (ret < 0) 6501 kfree(fgd); 6502 6503 mutex_unlock(&graph_lock); 6504 return ret; 6505 } 6506 6507 static int 6508 ftrace_graph_release(struct inode *inode, struct file *file) 6509 { 6510 struct ftrace_graph_data *fgd; 6511 struct ftrace_hash *old_hash, *new_hash; 6512 struct trace_parser *parser; 6513 int ret = 0; 6514 6515 if (file->f_mode & FMODE_READ) { 6516 struct seq_file *m = file->private_data; 6517 6518 fgd = m->private; 6519 seq_release(inode, file); 6520 } else { 6521 fgd = file->private_data; 6522 } 6523 6524 6525 if (file->f_mode & FMODE_WRITE) { 6526 6527 parser = &fgd->parser; 6528 6529 if (trace_parser_loaded((parser))) { 6530 ret = ftrace_graph_set_hash(fgd->new_hash, 6531 parser->buffer); 6532 } 6533 6534 trace_parser_put(parser); 6535 6536 new_hash = __ftrace_hash_move(fgd->new_hash); 6537 if (!new_hash) { 6538 ret = -ENOMEM; 6539 goto out; 6540 } 6541 6542 mutex_lock(&graph_lock); 6543 6544 if (fgd->type == GRAPH_FILTER_FUNCTION) { 6545 old_hash = rcu_dereference_protected(ftrace_graph_hash, 6546 lockdep_is_held(&graph_lock)); 6547 rcu_assign_pointer(ftrace_graph_hash, new_hash); 6548 } else { 6549 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 6550 lockdep_is_held(&graph_lock)); 6551 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); 6552 } 6553 6554 mutex_unlock(&graph_lock); 6555 6556 /* 6557 * We need to do a hard force of sched synchronization. 6558 * This is because we use preempt_disable() to do RCU, but 6559 * the function tracers can be called where RCU is not watching 6560 * (like before user_exit()). We can not rely on the RCU 6561 * infrastructure to do the synchronization, thus we must do it 6562 * ourselves. 6563 */ 6564 if (old_hash != EMPTY_HASH) 6565 synchronize_rcu_tasks_rude(); 6566 6567 free_ftrace_hash(old_hash); 6568 } 6569 6570 out: 6571 free_ftrace_hash(fgd->new_hash); 6572 kfree(fgd); 6573 6574 return ret; 6575 } 6576 6577 static int 6578 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) 6579 { 6580 struct ftrace_glob func_g; 6581 struct dyn_ftrace *rec; 6582 struct ftrace_page *pg; 6583 struct ftrace_func_entry *entry; 6584 int fail = 1; 6585 int not; 6586 6587 /* decode regex */ 6588 func_g.type = filter_parse_regex(buffer, strlen(buffer), 6589 &func_g.search, ¬); 6590 6591 func_g.len = strlen(func_g.search); 6592 6593 mutex_lock(&ftrace_lock); 6594 6595 if (unlikely(ftrace_disabled)) { 6596 mutex_unlock(&ftrace_lock); 6597 return -ENODEV; 6598 } 6599 6600 do_for_each_ftrace_rec(pg, rec) { 6601 6602 if (rec->flags & FTRACE_FL_DISABLED) 6603 continue; 6604 6605 if (ftrace_match_record(rec, &func_g, NULL, 0)) { 6606 entry = ftrace_lookup_ip(hash, rec->ip); 6607 6608 if (!not) { 6609 fail = 0; 6610 6611 if (entry) 6612 continue; 6613 if (add_hash_entry(hash, rec->ip) < 0) 6614 goto out; 6615 } else { 6616 if (entry) { 6617 free_hash_entry(hash, entry); 6618 fail = 0; 6619 } 6620 } 6621 } 6622 } while_for_each_ftrace_rec(); 6623 out: 6624 mutex_unlock(&ftrace_lock); 6625 6626 if (fail) 6627 return -EINVAL; 6628 6629 return 0; 6630 } 6631 6632 static ssize_t 6633 ftrace_graph_write(struct file *file, const char __user *ubuf, 6634 size_t cnt, loff_t *ppos) 6635 { 6636 ssize_t read, ret = 0; 6637 struct ftrace_graph_data *fgd = file->private_data; 6638 struct trace_parser *parser; 6639 6640 if (!cnt) 6641 return 0; 6642 6643 /* Read mode uses seq functions */ 6644 if (file->f_mode & FMODE_READ) { 6645 struct seq_file *m = file->private_data; 6646 fgd = m->private; 6647 } 6648 6649 parser = &fgd->parser; 6650 6651 read = trace_get_user(parser, ubuf, cnt, ppos); 6652 6653 if (read >= 0 && trace_parser_loaded(parser) && 6654 !trace_parser_cont(parser)) { 6655 6656 ret = ftrace_graph_set_hash(fgd->new_hash, 6657 parser->buffer); 6658 trace_parser_clear(parser); 6659 } 6660 6661 if (!ret) 6662 ret = read; 6663 6664 return ret; 6665 } 6666 6667 static const struct file_operations ftrace_graph_fops = { 6668 .open = ftrace_graph_open, 6669 .read = seq_read, 6670 .write = ftrace_graph_write, 6671 .llseek = tracing_lseek, 6672 .release = ftrace_graph_release, 6673 }; 6674 6675 static const struct file_operations ftrace_graph_notrace_fops = { 6676 .open = ftrace_graph_notrace_open, 6677 .read = seq_read, 6678 .write = ftrace_graph_write, 6679 .llseek = tracing_lseek, 6680 .release = ftrace_graph_release, 6681 }; 6682 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6683 6684 void ftrace_create_filter_files(struct ftrace_ops *ops, 6685 struct dentry *parent) 6686 { 6687 6688 trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent, 6689 ops, &ftrace_filter_fops); 6690 6691 trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent, 6692 ops, &ftrace_notrace_fops); 6693 } 6694 6695 /* 6696 * The name "destroy_filter_files" is really a misnomer. Although 6697 * in the future, it may actually delete the files, but this is 6698 * really intended to make sure the ops passed in are disabled 6699 * and that when this function returns, the caller is free to 6700 * free the ops. 6701 * 6702 * The "destroy" name is only to match the "create" name that this 6703 * should be paired with. 6704 */ 6705 void ftrace_destroy_filter_files(struct ftrace_ops *ops) 6706 { 6707 mutex_lock(&ftrace_lock); 6708 if (ops->flags & FTRACE_OPS_FL_ENABLED) 6709 ftrace_shutdown(ops, 0); 6710 ops->flags |= FTRACE_OPS_FL_DELETED; 6711 ftrace_free_filter(ops); 6712 mutex_unlock(&ftrace_lock); 6713 } 6714 6715 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) 6716 { 6717 6718 trace_create_file("available_filter_functions", TRACE_MODE_READ, 6719 d_tracer, NULL, &ftrace_avail_fops); 6720 6721 trace_create_file("enabled_functions", TRACE_MODE_READ, 6722 d_tracer, NULL, &ftrace_enabled_fops); 6723 6724 ftrace_create_filter_files(&global_ops, d_tracer); 6725 6726 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6727 trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer, 6728 NULL, 6729 &ftrace_graph_fops); 6730 trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer, 6731 NULL, 6732 &ftrace_graph_notrace_fops); 6733 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6734 6735 return 0; 6736 } 6737 6738 static int ftrace_cmp_ips(const void *a, const void *b) 6739 { 6740 const unsigned long *ipa = a; 6741 const unsigned long *ipb = b; 6742 6743 if (*ipa > *ipb) 6744 return 1; 6745 if (*ipa < *ipb) 6746 return -1; 6747 return 0; 6748 } 6749 6750 #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST 6751 static void test_is_sorted(unsigned long *start, unsigned long count) 6752 { 6753 int i; 6754 6755 for (i = 1; i < count; i++) { 6756 if (WARN(start[i - 1] > start[i], 6757 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i, 6758 (void *)start[i - 1], start[i - 1], 6759 (void *)start[i], start[i])) 6760 break; 6761 } 6762 if (i == count) 6763 pr_info("ftrace section at %px sorted properly\n", start); 6764 } 6765 #else 6766 static void test_is_sorted(unsigned long *start, unsigned long count) 6767 { 6768 } 6769 #endif 6770 6771 static int ftrace_process_locs(struct module *mod, 6772 unsigned long *start, 6773 unsigned long *end) 6774 { 6775 struct ftrace_page *start_pg; 6776 struct ftrace_page *pg; 6777 struct dyn_ftrace *rec; 6778 unsigned long count; 6779 unsigned long *p; 6780 unsigned long addr; 6781 unsigned long flags = 0; /* Shut up gcc */ 6782 int ret = -ENOMEM; 6783 6784 count = end - start; 6785 6786 if (!count) 6787 return 0; 6788 6789 /* 6790 * Sorting mcount in vmlinux at build time depend on 6791 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in 6792 * modules can not be sorted at build time. 6793 */ 6794 if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) { 6795 sort(start, count, sizeof(*start), 6796 ftrace_cmp_ips, NULL); 6797 } else { 6798 test_is_sorted(start, count); 6799 } 6800 6801 start_pg = ftrace_allocate_pages(count); 6802 if (!start_pg) 6803 return -ENOMEM; 6804 6805 mutex_lock(&ftrace_lock); 6806 6807 /* 6808 * Core and each module needs their own pages, as 6809 * modules will free them when they are removed. 6810 * Force a new page to be allocated for modules. 6811 */ 6812 if (!mod) { 6813 WARN_ON(ftrace_pages || ftrace_pages_start); 6814 /* First initialization */ 6815 ftrace_pages = ftrace_pages_start = start_pg; 6816 } else { 6817 if (!ftrace_pages) 6818 goto out; 6819 6820 if (WARN_ON(ftrace_pages->next)) { 6821 /* Hmm, we have free pages? */ 6822 while (ftrace_pages->next) 6823 ftrace_pages = ftrace_pages->next; 6824 } 6825 6826 ftrace_pages->next = start_pg; 6827 } 6828 6829 p = start; 6830 pg = start_pg; 6831 while (p < end) { 6832 unsigned long end_offset; 6833 addr = ftrace_call_adjust(*p++); 6834 /* 6835 * Some architecture linkers will pad between 6836 * the different mcount_loc sections of different 6837 * object files to satisfy alignments. 6838 * Skip any NULL pointers. 6839 */ 6840 if (!addr) 6841 continue; 6842 6843 end_offset = (pg->index+1) * sizeof(pg->records[0]); 6844 if (end_offset > PAGE_SIZE << pg->order) { 6845 /* We should have allocated enough */ 6846 if (WARN_ON(!pg->next)) 6847 break; 6848 pg = pg->next; 6849 } 6850 6851 rec = &pg->records[pg->index++]; 6852 rec->ip = addr; 6853 } 6854 6855 /* We should have used all pages */ 6856 WARN_ON(pg->next); 6857 6858 /* Assign the last page to ftrace_pages */ 6859 ftrace_pages = pg; 6860 6861 /* 6862 * We only need to disable interrupts on start up 6863 * because we are modifying code that an interrupt 6864 * may execute, and the modification is not atomic. 6865 * But for modules, nothing runs the code we modify 6866 * until we are finished with it, and there's no 6867 * reason to cause large interrupt latencies while we do it. 6868 */ 6869 if (!mod) 6870 local_irq_save(flags); 6871 ftrace_update_code(mod, start_pg); 6872 if (!mod) 6873 local_irq_restore(flags); 6874 ret = 0; 6875 out: 6876 mutex_unlock(&ftrace_lock); 6877 6878 return ret; 6879 } 6880 6881 struct ftrace_mod_func { 6882 struct list_head list; 6883 char *name; 6884 unsigned long ip; 6885 unsigned int size; 6886 }; 6887 6888 struct ftrace_mod_map { 6889 struct rcu_head rcu; 6890 struct list_head list; 6891 struct module *mod; 6892 unsigned long start_addr; 6893 unsigned long end_addr; 6894 struct list_head funcs; 6895 unsigned int num_funcs; 6896 }; 6897 6898 static int ftrace_get_trampoline_kallsym(unsigned int symnum, 6899 unsigned long *value, char *type, 6900 char *name, char *module_name, 6901 int *exported) 6902 { 6903 struct ftrace_ops *op; 6904 6905 list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) { 6906 if (!op->trampoline || symnum--) 6907 continue; 6908 *value = op->trampoline; 6909 *type = 't'; 6910 strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN); 6911 strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN); 6912 *exported = 0; 6913 return 0; 6914 } 6915 6916 return -ERANGE; 6917 } 6918 6919 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES) 6920 /* 6921 * Check if the current ops references the given ip. 6922 * 6923 * If the ops traces all functions, then it was already accounted for. 6924 * If the ops does not trace the current record function, skip it. 6925 * If the ops ignores the function via notrace filter, skip it. 6926 */ 6927 static bool 6928 ops_references_ip(struct ftrace_ops *ops, unsigned long ip) 6929 { 6930 /* If ops isn't enabled, ignore it */ 6931 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 6932 return false; 6933 6934 /* If ops traces all then it includes this function */ 6935 if (ops_traces_mod(ops)) 6936 return true; 6937 6938 /* The function must be in the filter */ 6939 if (!ftrace_hash_empty(ops->func_hash->filter_hash) && 6940 !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip)) 6941 return false; 6942 6943 /* If in notrace hash, we ignore it too */ 6944 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip)) 6945 return false; 6946 6947 return true; 6948 } 6949 #endif 6950 6951 #ifdef CONFIG_MODULES 6952 6953 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) 6954 6955 static LIST_HEAD(ftrace_mod_maps); 6956 6957 static int referenced_filters(struct dyn_ftrace *rec) 6958 { 6959 struct ftrace_ops *ops; 6960 int cnt = 0; 6961 6962 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { 6963 if (ops_references_ip(ops, rec->ip)) { 6964 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT)) 6965 continue; 6966 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 6967 continue; 6968 cnt++; 6969 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 6970 rec->flags |= FTRACE_FL_REGS; 6971 if (cnt == 1 && ops->trampoline) 6972 rec->flags |= FTRACE_FL_TRAMP; 6973 else 6974 rec->flags &= ~FTRACE_FL_TRAMP; 6975 } 6976 } 6977 6978 return cnt; 6979 } 6980 6981 static void 6982 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) 6983 { 6984 struct ftrace_func_entry *entry; 6985 struct dyn_ftrace *rec; 6986 int i; 6987 6988 if (ftrace_hash_empty(hash)) 6989 return; 6990 6991 for (i = 0; i < pg->index; i++) { 6992 rec = &pg->records[i]; 6993 entry = __ftrace_lookup_ip(hash, rec->ip); 6994 /* 6995 * Do not allow this rec to match again. 6996 * Yeah, it may waste some memory, but will be removed 6997 * if/when the hash is modified again. 6998 */ 6999 if (entry) 7000 entry->ip = 0; 7001 } 7002 } 7003 7004 /* Clear any records from hashes */ 7005 static void clear_mod_from_hashes(struct ftrace_page *pg) 7006 { 7007 struct trace_array *tr; 7008 7009 mutex_lock(&trace_types_lock); 7010 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 7011 if (!tr->ops || !tr->ops->func_hash) 7012 continue; 7013 mutex_lock(&tr->ops->func_hash->regex_lock); 7014 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); 7015 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); 7016 mutex_unlock(&tr->ops->func_hash->regex_lock); 7017 } 7018 mutex_unlock(&trace_types_lock); 7019 } 7020 7021 static void ftrace_free_mod_map(struct rcu_head *rcu) 7022 { 7023 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu); 7024 struct ftrace_mod_func *mod_func; 7025 struct ftrace_mod_func *n; 7026 7027 /* All the contents of mod_map are now not visible to readers */ 7028 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { 7029 kfree(mod_func->name); 7030 list_del(&mod_func->list); 7031 kfree(mod_func); 7032 } 7033 7034 kfree(mod_map); 7035 } 7036 7037 void ftrace_release_mod(struct module *mod) 7038 { 7039 struct ftrace_mod_map *mod_map; 7040 struct ftrace_mod_map *n; 7041 struct dyn_ftrace *rec; 7042 struct ftrace_page **last_pg; 7043 struct ftrace_page *tmp_page = NULL; 7044 struct ftrace_page *pg; 7045 7046 mutex_lock(&ftrace_lock); 7047 7048 if (ftrace_disabled) 7049 goto out_unlock; 7050 7051 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { 7052 if (mod_map->mod == mod) { 7053 list_del_rcu(&mod_map->list); 7054 call_rcu(&mod_map->rcu, ftrace_free_mod_map); 7055 break; 7056 } 7057 } 7058 7059 /* 7060 * Each module has its own ftrace_pages, remove 7061 * them from the list. 7062 */ 7063 last_pg = &ftrace_pages_start; 7064 for (pg = ftrace_pages_start; pg; pg = *last_pg) { 7065 rec = &pg->records[0]; 7066 if (within_module_core(rec->ip, mod) || 7067 within_module_init(rec->ip, mod)) { 7068 /* 7069 * As core pages are first, the first 7070 * page should never be a module page. 7071 */ 7072 if (WARN_ON(pg == ftrace_pages_start)) 7073 goto out_unlock; 7074 7075 /* Check if we are deleting the last page */ 7076 if (pg == ftrace_pages) 7077 ftrace_pages = next_to_ftrace_page(last_pg); 7078 7079 ftrace_update_tot_cnt -= pg->index; 7080 *last_pg = pg->next; 7081 7082 pg->next = tmp_page; 7083 tmp_page = pg; 7084 } else 7085 last_pg = &pg->next; 7086 } 7087 out_unlock: 7088 mutex_unlock(&ftrace_lock); 7089 7090 for (pg = tmp_page; pg; pg = tmp_page) { 7091 7092 /* Needs to be called outside of ftrace_lock */ 7093 clear_mod_from_hashes(pg); 7094 7095 if (pg->records) { 7096 free_pages((unsigned long)pg->records, pg->order); 7097 ftrace_number_of_pages -= 1 << pg->order; 7098 } 7099 tmp_page = pg->next; 7100 kfree(pg); 7101 ftrace_number_of_groups--; 7102 } 7103 } 7104 7105 void ftrace_module_enable(struct module *mod) 7106 { 7107 struct dyn_ftrace *rec; 7108 struct ftrace_page *pg; 7109 7110 mutex_lock(&ftrace_lock); 7111 7112 if (ftrace_disabled) 7113 goto out_unlock; 7114 7115 /* 7116 * If the tracing is enabled, go ahead and enable the record. 7117 * 7118 * The reason not to enable the record immediately is the 7119 * inherent check of ftrace_make_nop/ftrace_make_call for 7120 * correct previous instructions. Making first the NOP 7121 * conversion puts the module to the correct state, thus 7122 * passing the ftrace_make_call check. 7123 * 7124 * We also delay this to after the module code already set the 7125 * text to read-only, as we now need to set it back to read-write 7126 * so that we can modify the text. 7127 */ 7128 if (ftrace_start_up) 7129 ftrace_arch_code_modify_prepare(); 7130 7131 do_for_each_ftrace_rec(pg, rec) { 7132 int cnt; 7133 /* 7134 * do_for_each_ftrace_rec() is a double loop. 7135 * module text shares the pg. If a record is 7136 * not part of this module, then skip this pg, 7137 * which the "break" will do. 7138 */ 7139 if (!within_module_core(rec->ip, mod) && 7140 !within_module_init(rec->ip, mod)) 7141 break; 7142 7143 /* Weak functions should still be ignored */ 7144 if (!test_for_valid_rec(rec)) { 7145 /* Clear all other flags. Should not be enabled anyway */ 7146 rec->flags = FTRACE_FL_DISABLED; 7147 continue; 7148 } 7149 7150 cnt = 0; 7151 7152 /* 7153 * When adding a module, we need to check if tracers are 7154 * currently enabled and if they are, and can trace this record, 7155 * we need to enable the module functions as well as update the 7156 * reference counts for those function records. 7157 */ 7158 if (ftrace_start_up) 7159 cnt += referenced_filters(rec); 7160 7161 rec->flags &= ~FTRACE_FL_DISABLED; 7162 rec->flags += cnt; 7163 7164 if (ftrace_start_up && cnt) { 7165 int failed = __ftrace_replace_code(rec, 1); 7166 if (failed) { 7167 ftrace_bug(failed, rec); 7168 goto out_loop; 7169 } 7170 } 7171 7172 } while_for_each_ftrace_rec(); 7173 7174 out_loop: 7175 if (ftrace_start_up) 7176 ftrace_arch_code_modify_post_process(); 7177 7178 out_unlock: 7179 mutex_unlock(&ftrace_lock); 7180 7181 process_cached_mods(mod->name); 7182 } 7183 7184 void ftrace_module_init(struct module *mod) 7185 { 7186 int ret; 7187 7188 if (ftrace_disabled || !mod->num_ftrace_callsites) 7189 return; 7190 7191 ret = ftrace_process_locs(mod, mod->ftrace_callsites, 7192 mod->ftrace_callsites + mod->num_ftrace_callsites); 7193 if (ret) 7194 pr_warn("ftrace: failed to allocate entries for module '%s' functions\n", 7195 mod->name); 7196 } 7197 7198 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 7199 struct dyn_ftrace *rec) 7200 { 7201 struct ftrace_mod_func *mod_func; 7202 unsigned long symsize; 7203 unsigned long offset; 7204 char str[KSYM_SYMBOL_LEN]; 7205 char *modname; 7206 const char *ret; 7207 7208 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); 7209 if (!ret) 7210 return; 7211 7212 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL); 7213 if (!mod_func) 7214 return; 7215 7216 mod_func->name = kstrdup(str, GFP_KERNEL); 7217 if (!mod_func->name) { 7218 kfree(mod_func); 7219 return; 7220 } 7221 7222 mod_func->ip = rec->ip - offset; 7223 mod_func->size = symsize; 7224 7225 mod_map->num_funcs++; 7226 7227 list_add_rcu(&mod_func->list, &mod_map->funcs); 7228 } 7229 7230 static struct ftrace_mod_map * 7231 allocate_ftrace_mod_map(struct module *mod, 7232 unsigned long start, unsigned long end) 7233 { 7234 struct ftrace_mod_map *mod_map; 7235 7236 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL); 7237 if (!mod_map) 7238 return NULL; 7239 7240 mod_map->mod = mod; 7241 mod_map->start_addr = start; 7242 mod_map->end_addr = end; 7243 mod_map->num_funcs = 0; 7244 7245 INIT_LIST_HEAD_RCU(&mod_map->funcs); 7246 7247 list_add_rcu(&mod_map->list, &ftrace_mod_maps); 7248 7249 return mod_map; 7250 } 7251 7252 static const char * 7253 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map, 7254 unsigned long addr, unsigned long *size, 7255 unsigned long *off, char *sym) 7256 { 7257 struct ftrace_mod_func *found_func = NULL; 7258 struct ftrace_mod_func *mod_func; 7259 7260 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 7261 if (addr >= mod_func->ip && 7262 addr < mod_func->ip + mod_func->size) { 7263 found_func = mod_func; 7264 break; 7265 } 7266 } 7267 7268 if (found_func) { 7269 if (size) 7270 *size = found_func->size; 7271 if (off) 7272 *off = addr - found_func->ip; 7273 if (sym) 7274 strlcpy(sym, found_func->name, KSYM_NAME_LEN); 7275 7276 return found_func->name; 7277 } 7278 7279 return NULL; 7280 } 7281 7282 const char * 7283 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 7284 unsigned long *off, char **modname, char *sym) 7285 { 7286 struct ftrace_mod_map *mod_map; 7287 const char *ret = NULL; 7288 7289 /* mod_map is freed via call_rcu() */ 7290 preempt_disable(); 7291 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 7292 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); 7293 if (ret) { 7294 if (modname) 7295 *modname = mod_map->mod->name; 7296 break; 7297 } 7298 } 7299 preempt_enable(); 7300 7301 return ret; 7302 } 7303 7304 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 7305 char *type, char *name, 7306 char *module_name, int *exported) 7307 { 7308 struct ftrace_mod_map *mod_map; 7309 struct ftrace_mod_func *mod_func; 7310 int ret; 7311 7312 preempt_disable(); 7313 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 7314 7315 if (symnum >= mod_map->num_funcs) { 7316 symnum -= mod_map->num_funcs; 7317 continue; 7318 } 7319 7320 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 7321 if (symnum > 1) { 7322 symnum--; 7323 continue; 7324 } 7325 7326 *value = mod_func->ip; 7327 *type = 'T'; 7328 strlcpy(name, mod_func->name, KSYM_NAME_LEN); 7329 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); 7330 *exported = 1; 7331 preempt_enable(); 7332 return 0; 7333 } 7334 WARN_ON(1); 7335 break; 7336 } 7337 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, 7338 module_name, exported); 7339 preempt_enable(); 7340 return ret; 7341 } 7342 7343 #else 7344 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 7345 struct dyn_ftrace *rec) { } 7346 static inline struct ftrace_mod_map * 7347 allocate_ftrace_mod_map(struct module *mod, 7348 unsigned long start, unsigned long end) 7349 { 7350 return NULL; 7351 } 7352 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 7353 char *type, char *name, char *module_name, 7354 int *exported) 7355 { 7356 int ret; 7357 7358 preempt_disable(); 7359 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, 7360 module_name, exported); 7361 preempt_enable(); 7362 return ret; 7363 } 7364 #endif /* CONFIG_MODULES */ 7365 7366 struct ftrace_init_func { 7367 struct list_head list; 7368 unsigned long ip; 7369 }; 7370 7371 /* Clear any init ips from hashes */ 7372 static void 7373 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) 7374 { 7375 struct ftrace_func_entry *entry; 7376 7377 entry = ftrace_lookup_ip(hash, func->ip); 7378 /* 7379 * Do not allow this rec to match again. 7380 * Yeah, it may waste some memory, but will be removed 7381 * if/when the hash is modified again. 7382 */ 7383 if (entry) 7384 entry->ip = 0; 7385 } 7386 7387 static void 7388 clear_func_from_hashes(struct ftrace_init_func *func) 7389 { 7390 struct trace_array *tr; 7391 7392 mutex_lock(&trace_types_lock); 7393 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 7394 if (!tr->ops || !tr->ops->func_hash) 7395 continue; 7396 mutex_lock(&tr->ops->func_hash->regex_lock); 7397 clear_func_from_hash(func, tr->ops->func_hash->filter_hash); 7398 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); 7399 mutex_unlock(&tr->ops->func_hash->regex_lock); 7400 } 7401 mutex_unlock(&trace_types_lock); 7402 } 7403 7404 static void add_to_clear_hash_list(struct list_head *clear_list, 7405 struct dyn_ftrace *rec) 7406 { 7407 struct ftrace_init_func *func; 7408 7409 func = kmalloc(sizeof(*func), GFP_KERNEL); 7410 if (!func) { 7411 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); 7412 return; 7413 } 7414 7415 func->ip = rec->ip; 7416 list_add(&func->list, clear_list); 7417 } 7418 7419 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) 7420 { 7421 unsigned long start = (unsigned long)(start_ptr); 7422 unsigned long end = (unsigned long)(end_ptr); 7423 struct ftrace_page **last_pg = &ftrace_pages_start; 7424 struct ftrace_page *pg; 7425 struct dyn_ftrace *rec; 7426 struct dyn_ftrace key; 7427 struct ftrace_mod_map *mod_map = NULL; 7428 struct ftrace_init_func *func, *func_next; 7429 struct list_head clear_hash; 7430 7431 INIT_LIST_HEAD(&clear_hash); 7432 7433 key.ip = start; 7434 key.flags = end; /* overload flags, as it is unsigned long */ 7435 7436 mutex_lock(&ftrace_lock); 7437 7438 /* 7439 * If we are freeing module init memory, then check if 7440 * any tracer is active. If so, we need to save a mapping of 7441 * the module functions being freed with the address. 7442 */ 7443 if (mod && ftrace_ops_list != &ftrace_list_end) 7444 mod_map = allocate_ftrace_mod_map(mod, start, end); 7445 7446 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { 7447 if (end < pg->records[0].ip || 7448 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 7449 continue; 7450 again: 7451 rec = bsearch(&key, pg->records, pg->index, 7452 sizeof(struct dyn_ftrace), 7453 ftrace_cmp_recs); 7454 if (!rec) 7455 continue; 7456 7457 /* rec will be cleared from hashes after ftrace_lock unlock */ 7458 add_to_clear_hash_list(&clear_hash, rec); 7459 7460 if (mod_map) 7461 save_ftrace_mod_rec(mod_map, rec); 7462 7463 pg->index--; 7464 ftrace_update_tot_cnt--; 7465 if (!pg->index) { 7466 *last_pg = pg->next; 7467 if (pg->records) { 7468 free_pages((unsigned long)pg->records, pg->order); 7469 ftrace_number_of_pages -= 1 << pg->order; 7470 } 7471 ftrace_number_of_groups--; 7472 kfree(pg); 7473 pg = container_of(last_pg, struct ftrace_page, next); 7474 if (!(*last_pg)) 7475 ftrace_pages = pg; 7476 continue; 7477 } 7478 memmove(rec, rec + 1, 7479 (pg->index - (rec - pg->records)) * sizeof(*rec)); 7480 /* More than one function may be in this block */ 7481 goto again; 7482 } 7483 mutex_unlock(&ftrace_lock); 7484 7485 list_for_each_entry_safe(func, func_next, &clear_hash, list) { 7486 clear_func_from_hashes(func); 7487 kfree(func); 7488 } 7489 } 7490 7491 void __init ftrace_free_init_mem(void) 7492 { 7493 void *start = (void *)(&__init_begin); 7494 void *end = (void *)(&__init_end); 7495 7496 ftrace_boot_snapshot(); 7497 7498 ftrace_free_mem(NULL, start, end); 7499 } 7500 7501 int __init __weak ftrace_dyn_arch_init(void) 7502 { 7503 return 0; 7504 } 7505 7506 void __init ftrace_init(void) 7507 { 7508 extern unsigned long __start_mcount_loc[]; 7509 extern unsigned long __stop_mcount_loc[]; 7510 unsigned long count, flags; 7511 int ret; 7512 7513 local_irq_save(flags); 7514 ret = ftrace_dyn_arch_init(); 7515 local_irq_restore(flags); 7516 if (ret) 7517 goto failed; 7518 7519 count = __stop_mcount_loc - __start_mcount_loc; 7520 if (!count) { 7521 pr_info("ftrace: No functions to be traced?\n"); 7522 goto failed; 7523 } 7524 7525 pr_info("ftrace: allocating %ld entries in %ld pages\n", 7526 count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); 7527 7528 ret = ftrace_process_locs(NULL, 7529 __start_mcount_loc, 7530 __stop_mcount_loc); 7531 if (ret) { 7532 pr_warn("ftrace: failed to allocate entries for functions\n"); 7533 goto failed; 7534 } 7535 7536 pr_info("ftrace: allocated %ld pages with %ld groups\n", 7537 ftrace_number_of_pages, ftrace_number_of_groups); 7538 7539 last_ftrace_enabled = ftrace_enabled = 1; 7540 7541 set_ftrace_early_filters(); 7542 7543 return; 7544 failed: 7545 ftrace_disabled = 1; 7546 } 7547 7548 /* Do nothing if arch does not support this */ 7549 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) 7550 { 7551 } 7552 7553 static void ftrace_update_trampoline(struct ftrace_ops *ops) 7554 { 7555 unsigned long trampoline = ops->trampoline; 7556 7557 arch_ftrace_update_trampoline(ops); 7558 if (ops->trampoline && ops->trampoline != trampoline && 7559 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { 7560 /* Add to kallsyms before the perf events */ 7561 ftrace_add_trampoline_to_kallsyms(ops); 7562 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, 7563 ops->trampoline, ops->trampoline_size, false, 7564 FTRACE_TRAMPOLINE_SYM); 7565 /* 7566 * Record the perf text poke event after the ksymbol register 7567 * event. 7568 */ 7569 perf_event_text_poke((void *)ops->trampoline, NULL, 0, 7570 (void *)ops->trampoline, 7571 ops->trampoline_size); 7572 } 7573 } 7574 7575 void ftrace_init_trace_array(struct trace_array *tr) 7576 { 7577 INIT_LIST_HEAD(&tr->func_probes); 7578 INIT_LIST_HEAD(&tr->mod_trace); 7579 INIT_LIST_HEAD(&tr->mod_notrace); 7580 } 7581 #else 7582 7583 struct ftrace_ops global_ops = { 7584 .func = ftrace_stub, 7585 .flags = FTRACE_OPS_FL_INITIALIZED | 7586 FTRACE_OPS_FL_PID, 7587 }; 7588 7589 static int __init ftrace_nodyn_init(void) 7590 { 7591 ftrace_enabled = 1; 7592 return 0; 7593 } 7594 core_initcall(ftrace_nodyn_init); 7595 7596 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } 7597 static inline void ftrace_startup_all(int command) { } 7598 7599 static void ftrace_update_trampoline(struct ftrace_ops *ops) 7600 { 7601 } 7602 7603 #endif /* CONFIG_DYNAMIC_FTRACE */ 7604 7605 __init void ftrace_init_global_array_ops(struct trace_array *tr) 7606 { 7607 tr->ops = &global_ops; 7608 tr->ops->private = tr; 7609 ftrace_init_trace_array(tr); 7610 } 7611 7612 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) 7613 { 7614 /* If we filter on pids, update to use the pid function */ 7615 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { 7616 if (WARN_ON(tr->ops->func != ftrace_stub)) 7617 printk("ftrace ops had %pS for function\n", 7618 tr->ops->func); 7619 } 7620 tr->ops->func = func; 7621 tr->ops->private = tr; 7622 } 7623 7624 void ftrace_reset_array_ops(struct trace_array *tr) 7625 { 7626 tr->ops->func = ftrace_stub; 7627 } 7628 7629 static nokprobe_inline void 7630 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 7631 struct ftrace_ops *ignored, struct ftrace_regs *fregs) 7632 { 7633 struct pt_regs *regs = ftrace_get_regs(fregs); 7634 struct ftrace_ops *op; 7635 int bit; 7636 7637 /* 7638 * The ftrace_test_and_set_recursion() will disable preemption, 7639 * which is required since some of the ops may be dynamically 7640 * allocated, they must be freed after a synchronize_rcu(). 7641 */ 7642 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); 7643 if (bit < 0) 7644 return; 7645 7646 do_for_each_ftrace_op(op, ftrace_ops_list) { 7647 /* Stub functions don't need to be called nor tested */ 7648 if (op->flags & FTRACE_OPS_FL_STUB) 7649 continue; 7650 /* 7651 * Check the following for each ops before calling their func: 7652 * if RCU flag is set, then rcu_is_watching() must be true 7653 * Otherwise test if the ip matches the ops filter 7654 * 7655 * If any of the above fails then the op->func() is not executed. 7656 */ 7657 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && 7658 ftrace_ops_test(op, ip, regs)) { 7659 if (FTRACE_WARN_ON(!op->func)) { 7660 pr_warn("op=%p %pS\n", op, op); 7661 goto out; 7662 } 7663 op->func(ip, parent_ip, op, fregs); 7664 } 7665 } while_for_each_ftrace_op(op); 7666 out: 7667 trace_clear_recursion(bit); 7668 } 7669 7670 /* 7671 * Some archs only support passing ip and parent_ip. Even though 7672 * the list function ignores the op parameter, we do not want any 7673 * C side effects, where a function is called without the caller 7674 * sending a third parameter. 7675 * Archs are to support both the regs and ftrace_ops at the same time. 7676 * If they support ftrace_ops, it is assumed they support regs. 7677 * If call backs want to use regs, they must either check for regs 7678 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. 7679 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. 7680 * An architecture can pass partial regs with ftrace_ops and still 7681 * set the ARCH_SUPPORTS_FTRACE_OPS. 7682 * 7683 * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be 7684 * arch_ftrace_ops_list_func. 7685 */ 7686 #if ARCH_SUPPORTS_FTRACE_OPS 7687 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 7688 struct ftrace_ops *op, struct ftrace_regs *fregs) 7689 { 7690 __ftrace_ops_list_func(ip, parent_ip, NULL, fregs); 7691 } 7692 #else 7693 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) 7694 { 7695 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); 7696 } 7697 #endif 7698 NOKPROBE_SYMBOL(arch_ftrace_ops_list_func); 7699 7700 /* 7701 * If there's only one function registered but it does not support 7702 * recursion, needs RCU protection, then this function will be called 7703 * by the mcount trampoline. 7704 */ 7705 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, 7706 struct ftrace_ops *op, struct ftrace_regs *fregs) 7707 { 7708 int bit; 7709 7710 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); 7711 if (bit < 0) 7712 return; 7713 7714 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) 7715 op->func(ip, parent_ip, op, fregs); 7716 7717 trace_clear_recursion(bit); 7718 } 7719 NOKPROBE_SYMBOL(ftrace_ops_assist_func); 7720 7721 /** 7722 * ftrace_ops_get_func - get the function a trampoline should call 7723 * @ops: the ops to get the function for 7724 * 7725 * Normally the mcount trampoline will call the ops->func, but there 7726 * are times that it should not. For example, if the ops does not 7727 * have its own recursion protection, then it should call the 7728 * ftrace_ops_assist_func() instead. 7729 * 7730 * Returns the function that the trampoline should call for @ops. 7731 */ 7732 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) 7733 { 7734 /* 7735 * If the function does not handle recursion or needs to be RCU safe, 7736 * then we need to call the assist handler. 7737 */ 7738 if (ops->flags & (FTRACE_OPS_FL_RECURSION | 7739 FTRACE_OPS_FL_RCU)) 7740 return ftrace_ops_assist_func; 7741 7742 return ops->func; 7743 } 7744 7745 static void 7746 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, 7747 struct task_struct *prev, 7748 struct task_struct *next, 7749 unsigned int prev_state) 7750 { 7751 struct trace_array *tr = data; 7752 struct trace_pid_list *pid_list; 7753 struct trace_pid_list *no_pid_list; 7754 7755 pid_list = rcu_dereference_sched(tr->function_pids); 7756 no_pid_list = rcu_dereference_sched(tr->function_no_pids); 7757 7758 if (trace_ignore_this_task(pid_list, no_pid_list, next)) 7759 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 7760 FTRACE_PID_IGNORE); 7761 else 7762 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 7763 next->pid); 7764 } 7765 7766 static void 7767 ftrace_pid_follow_sched_process_fork(void *data, 7768 struct task_struct *self, 7769 struct task_struct *task) 7770 { 7771 struct trace_pid_list *pid_list; 7772 struct trace_array *tr = data; 7773 7774 pid_list = rcu_dereference_sched(tr->function_pids); 7775 trace_filter_add_remove_task(pid_list, self, task); 7776 7777 pid_list = rcu_dereference_sched(tr->function_no_pids); 7778 trace_filter_add_remove_task(pid_list, self, task); 7779 } 7780 7781 static void 7782 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) 7783 { 7784 struct trace_pid_list *pid_list; 7785 struct trace_array *tr = data; 7786 7787 pid_list = rcu_dereference_sched(tr->function_pids); 7788 trace_filter_add_remove_task(pid_list, NULL, task); 7789 7790 pid_list = rcu_dereference_sched(tr->function_no_pids); 7791 trace_filter_add_remove_task(pid_list, NULL, task); 7792 } 7793 7794 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) 7795 { 7796 if (enable) { 7797 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 7798 tr); 7799 register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, 7800 tr); 7801 } else { 7802 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 7803 tr); 7804 unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, 7805 tr); 7806 } 7807 } 7808 7809 static void clear_ftrace_pids(struct trace_array *tr, int type) 7810 { 7811 struct trace_pid_list *pid_list; 7812 struct trace_pid_list *no_pid_list; 7813 int cpu; 7814 7815 pid_list = rcu_dereference_protected(tr->function_pids, 7816 lockdep_is_held(&ftrace_lock)); 7817 no_pid_list = rcu_dereference_protected(tr->function_no_pids, 7818 lockdep_is_held(&ftrace_lock)); 7819 7820 /* Make sure there's something to do */ 7821 if (!pid_type_enabled(type, pid_list, no_pid_list)) 7822 return; 7823 7824 /* See if the pids still need to be checked after this */ 7825 if (!still_need_pid_events(type, pid_list, no_pid_list)) { 7826 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 7827 for_each_possible_cpu(cpu) 7828 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE; 7829 } 7830 7831 if (type & TRACE_PIDS) 7832 rcu_assign_pointer(tr->function_pids, NULL); 7833 7834 if (type & TRACE_NO_PIDS) 7835 rcu_assign_pointer(tr->function_no_pids, NULL); 7836 7837 /* Wait till all users are no longer using pid filtering */ 7838 synchronize_rcu(); 7839 7840 if ((type & TRACE_PIDS) && pid_list) 7841 trace_pid_list_free(pid_list); 7842 7843 if ((type & TRACE_NO_PIDS) && no_pid_list) 7844 trace_pid_list_free(no_pid_list); 7845 } 7846 7847 void ftrace_clear_pids(struct trace_array *tr) 7848 { 7849 mutex_lock(&ftrace_lock); 7850 7851 clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS); 7852 7853 mutex_unlock(&ftrace_lock); 7854 } 7855 7856 static void ftrace_pid_reset(struct trace_array *tr, int type) 7857 { 7858 mutex_lock(&ftrace_lock); 7859 clear_ftrace_pids(tr, type); 7860 7861 ftrace_update_pid_func(); 7862 ftrace_startup_all(0); 7863 7864 mutex_unlock(&ftrace_lock); 7865 } 7866 7867 /* Greater than any max PID */ 7868 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) 7869 7870 static void *fpid_start(struct seq_file *m, loff_t *pos) 7871 __acquires(RCU) 7872 { 7873 struct trace_pid_list *pid_list; 7874 struct trace_array *tr = m->private; 7875 7876 mutex_lock(&ftrace_lock); 7877 rcu_read_lock_sched(); 7878 7879 pid_list = rcu_dereference_sched(tr->function_pids); 7880 7881 if (!pid_list) 7882 return !(*pos) ? FTRACE_NO_PIDS : NULL; 7883 7884 return trace_pid_start(pid_list, pos); 7885 } 7886 7887 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) 7888 { 7889 struct trace_array *tr = m->private; 7890 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); 7891 7892 if (v == FTRACE_NO_PIDS) { 7893 (*pos)++; 7894 return NULL; 7895 } 7896 return trace_pid_next(pid_list, v, pos); 7897 } 7898 7899 static void fpid_stop(struct seq_file *m, void *p) 7900 __releases(RCU) 7901 { 7902 rcu_read_unlock_sched(); 7903 mutex_unlock(&ftrace_lock); 7904 } 7905 7906 static int fpid_show(struct seq_file *m, void *v) 7907 { 7908 if (v == FTRACE_NO_PIDS) { 7909 seq_puts(m, "no pid\n"); 7910 return 0; 7911 } 7912 7913 return trace_pid_show(m, v); 7914 } 7915 7916 static const struct seq_operations ftrace_pid_sops = { 7917 .start = fpid_start, 7918 .next = fpid_next, 7919 .stop = fpid_stop, 7920 .show = fpid_show, 7921 }; 7922 7923 static void *fnpid_start(struct seq_file *m, loff_t *pos) 7924 __acquires(RCU) 7925 { 7926 struct trace_pid_list *pid_list; 7927 struct trace_array *tr = m->private; 7928 7929 mutex_lock(&ftrace_lock); 7930 rcu_read_lock_sched(); 7931 7932 pid_list = rcu_dereference_sched(tr->function_no_pids); 7933 7934 if (!pid_list) 7935 return !(*pos) ? FTRACE_NO_PIDS : NULL; 7936 7937 return trace_pid_start(pid_list, pos); 7938 } 7939 7940 static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos) 7941 { 7942 struct trace_array *tr = m->private; 7943 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids); 7944 7945 if (v == FTRACE_NO_PIDS) { 7946 (*pos)++; 7947 return NULL; 7948 } 7949 return trace_pid_next(pid_list, v, pos); 7950 } 7951 7952 static const struct seq_operations ftrace_no_pid_sops = { 7953 .start = fnpid_start, 7954 .next = fnpid_next, 7955 .stop = fpid_stop, 7956 .show = fpid_show, 7957 }; 7958 7959 static int pid_open(struct inode *inode, struct file *file, int type) 7960 { 7961 const struct seq_operations *seq_ops; 7962 struct trace_array *tr = inode->i_private; 7963 struct seq_file *m; 7964 int ret = 0; 7965 7966 ret = tracing_check_open_get_tr(tr); 7967 if (ret) 7968 return ret; 7969 7970 if ((file->f_mode & FMODE_WRITE) && 7971 (file->f_flags & O_TRUNC)) 7972 ftrace_pid_reset(tr, type); 7973 7974 switch (type) { 7975 case TRACE_PIDS: 7976 seq_ops = &ftrace_pid_sops; 7977 break; 7978 case TRACE_NO_PIDS: 7979 seq_ops = &ftrace_no_pid_sops; 7980 break; 7981 default: 7982 trace_array_put(tr); 7983 WARN_ON_ONCE(1); 7984 return -EINVAL; 7985 } 7986 7987 ret = seq_open(file, seq_ops); 7988 if (ret < 0) { 7989 trace_array_put(tr); 7990 } else { 7991 m = file->private_data; 7992 /* copy tr over to seq ops */ 7993 m->private = tr; 7994 } 7995 7996 return ret; 7997 } 7998 7999 static int 8000 ftrace_pid_open(struct inode *inode, struct file *file) 8001 { 8002 return pid_open(inode, file, TRACE_PIDS); 8003 } 8004 8005 static int 8006 ftrace_no_pid_open(struct inode *inode, struct file *file) 8007 { 8008 return pid_open(inode, file, TRACE_NO_PIDS); 8009 } 8010 8011 static void ignore_task_cpu(void *data) 8012 { 8013 struct trace_array *tr = data; 8014 struct trace_pid_list *pid_list; 8015 struct trace_pid_list *no_pid_list; 8016 8017 /* 8018 * This function is called by on_each_cpu() while the 8019 * event_mutex is held. 8020 */ 8021 pid_list = rcu_dereference_protected(tr->function_pids, 8022 mutex_is_locked(&ftrace_lock)); 8023 no_pid_list = rcu_dereference_protected(tr->function_no_pids, 8024 mutex_is_locked(&ftrace_lock)); 8025 8026 if (trace_ignore_this_task(pid_list, no_pid_list, current)) 8027 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 8028 FTRACE_PID_IGNORE); 8029 else 8030 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 8031 current->pid); 8032 } 8033 8034 static ssize_t 8035 pid_write(struct file *filp, const char __user *ubuf, 8036 size_t cnt, loff_t *ppos, int type) 8037 { 8038 struct seq_file *m = filp->private_data; 8039 struct trace_array *tr = m->private; 8040 struct trace_pid_list *filtered_pids; 8041 struct trace_pid_list *other_pids; 8042 struct trace_pid_list *pid_list; 8043 ssize_t ret; 8044 8045 if (!cnt) 8046 return 0; 8047 8048 mutex_lock(&ftrace_lock); 8049 8050 switch (type) { 8051 case TRACE_PIDS: 8052 filtered_pids = rcu_dereference_protected(tr->function_pids, 8053 lockdep_is_held(&ftrace_lock)); 8054 other_pids = rcu_dereference_protected(tr->function_no_pids, 8055 lockdep_is_held(&ftrace_lock)); 8056 break; 8057 case TRACE_NO_PIDS: 8058 filtered_pids = rcu_dereference_protected(tr->function_no_pids, 8059 lockdep_is_held(&ftrace_lock)); 8060 other_pids = rcu_dereference_protected(tr->function_pids, 8061 lockdep_is_held(&ftrace_lock)); 8062 break; 8063 default: 8064 ret = -EINVAL; 8065 WARN_ON_ONCE(1); 8066 goto out; 8067 } 8068 8069 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 8070 if (ret < 0) 8071 goto out; 8072 8073 switch (type) { 8074 case TRACE_PIDS: 8075 rcu_assign_pointer(tr->function_pids, pid_list); 8076 break; 8077 case TRACE_NO_PIDS: 8078 rcu_assign_pointer(tr->function_no_pids, pid_list); 8079 break; 8080 } 8081 8082 8083 if (filtered_pids) { 8084 synchronize_rcu(); 8085 trace_pid_list_free(filtered_pids); 8086 } else if (pid_list && !other_pids) { 8087 /* Register a probe to set whether to ignore the tracing of a task */ 8088 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 8089 } 8090 8091 /* 8092 * Ignoring of pids is done at task switch. But we have to 8093 * check for those tasks that are currently running. 8094 * Always do this in case a pid was appended or removed. 8095 */ 8096 on_each_cpu(ignore_task_cpu, tr, 1); 8097 8098 ftrace_update_pid_func(); 8099 ftrace_startup_all(0); 8100 out: 8101 mutex_unlock(&ftrace_lock); 8102 8103 if (ret > 0) 8104 *ppos += ret; 8105 8106 return ret; 8107 } 8108 8109 static ssize_t 8110 ftrace_pid_write(struct file *filp, const char __user *ubuf, 8111 size_t cnt, loff_t *ppos) 8112 { 8113 return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS); 8114 } 8115 8116 static ssize_t 8117 ftrace_no_pid_write(struct file *filp, const char __user *ubuf, 8118 size_t cnt, loff_t *ppos) 8119 { 8120 return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS); 8121 } 8122 8123 static int 8124 ftrace_pid_release(struct inode *inode, struct file *file) 8125 { 8126 struct trace_array *tr = inode->i_private; 8127 8128 trace_array_put(tr); 8129 8130 return seq_release(inode, file); 8131 } 8132 8133 static const struct file_operations ftrace_pid_fops = { 8134 .open = ftrace_pid_open, 8135 .write = ftrace_pid_write, 8136 .read = seq_read, 8137 .llseek = tracing_lseek, 8138 .release = ftrace_pid_release, 8139 }; 8140 8141 static const struct file_operations ftrace_no_pid_fops = { 8142 .open = ftrace_no_pid_open, 8143 .write = ftrace_no_pid_write, 8144 .read = seq_read, 8145 .llseek = tracing_lseek, 8146 .release = ftrace_pid_release, 8147 }; 8148 8149 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) 8150 { 8151 trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer, 8152 tr, &ftrace_pid_fops); 8153 trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE, 8154 d_tracer, tr, &ftrace_no_pid_fops); 8155 } 8156 8157 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, 8158 struct dentry *d_tracer) 8159 { 8160 /* Only the top level directory has the dyn_tracefs and profile */ 8161 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 8162 8163 ftrace_init_dyn_tracefs(d_tracer); 8164 ftrace_profile_tracefs(d_tracer); 8165 } 8166 8167 /** 8168 * ftrace_kill - kill ftrace 8169 * 8170 * This function should be used by panic code. It stops ftrace 8171 * but in a not so nice way. If you need to simply kill ftrace 8172 * from a non-atomic section, use ftrace_kill. 8173 */ 8174 void ftrace_kill(void) 8175 { 8176 ftrace_disabled = 1; 8177 ftrace_enabled = 0; 8178 ftrace_trace_function = ftrace_stub; 8179 } 8180 8181 /** 8182 * ftrace_is_dead - Test if ftrace is dead or not. 8183 * 8184 * Returns 1 if ftrace is "dead", zero otherwise. 8185 */ 8186 int ftrace_is_dead(void) 8187 { 8188 return ftrace_disabled; 8189 } 8190 8191 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 8192 /* 8193 * When registering ftrace_ops with IPMODIFY, it is necessary to make sure 8194 * it doesn't conflict with any direct ftrace_ops. If there is existing 8195 * direct ftrace_ops on a kernel function being patched, call 8196 * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing. 8197 * 8198 * @ops: ftrace_ops being registered. 8199 * 8200 * Returns: 8201 * 0 on success; 8202 * Negative on failure. 8203 */ 8204 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) 8205 { 8206 struct ftrace_func_entry *entry; 8207 struct ftrace_hash *hash; 8208 struct ftrace_ops *op; 8209 int size, i, ret; 8210 8211 lockdep_assert_held_once(&direct_mutex); 8212 8213 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 8214 return 0; 8215 8216 hash = ops->func_hash->filter_hash; 8217 size = 1 << hash->size_bits; 8218 for (i = 0; i < size; i++) { 8219 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 8220 unsigned long ip = entry->ip; 8221 bool found_op = false; 8222 8223 mutex_lock(&ftrace_lock); 8224 do_for_each_ftrace_op(op, ftrace_ops_list) { 8225 if (!(op->flags & FTRACE_OPS_FL_DIRECT)) 8226 continue; 8227 if (ops_references_ip(op, ip)) { 8228 found_op = true; 8229 break; 8230 } 8231 } while_for_each_ftrace_op(op); 8232 mutex_unlock(&ftrace_lock); 8233 8234 if (found_op) { 8235 if (!op->ops_func) 8236 return -EBUSY; 8237 8238 ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER); 8239 if (ret) 8240 return ret; 8241 } 8242 } 8243 } 8244 8245 return 0; 8246 } 8247 8248 /* 8249 * Similar to prepare_direct_functions_for_ipmodify, clean up after ops 8250 * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT 8251 * ops. 8252 */ 8253 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) 8254 { 8255 struct ftrace_func_entry *entry; 8256 struct ftrace_hash *hash; 8257 struct ftrace_ops *op; 8258 int size, i; 8259 8260 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 8261 return; 8262 8263 mutex_lock(&direct_mutex); 8264 8265 hash = ops->func_hash->filter_hash; 8266 size = 1 << hash->size_bits; 8267 for (i = 0; i < size; i++) { 8268 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 8269 unsigned long ip = entry->ip; 8270 bool found_op = false; 8271 8272 mutex_lock(&ftrace_lock); 8273 do_for_each_ftrace_op(op, ftrace_ops_list) { 8274 if (!(op->flags & FTRACE_OPS_FL_DIRECT)) 8275 continue; 8276 if (ops_references_ip(op, ip)) { 8277 found_op = true; 8278 break; 8279 } 8280 } while_for_each_ftrace_op(op); 8281 mutex_unlock(&ftrace_lock); 8282 8283 /* The cleanup is optional, ignore any errors */ 8284 if (found_op && op->ops_func) 8285 op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER); 8286 } 8287 } 8288 mutex_unlock(&direct_mutex); 8289 } 8290 8291 #define lock_direct_mutex() mutex_lock(&direct_mutex) 8292 #define unlock_direct_mutex() mutex_unlock(&direct_mutex) 8293 8294 #else /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 8295 8296 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) 8297 { 8298 return 0; 8299 } 8300 8301 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) 8302 { 8303 } 8304 8305 #define lock_direct_mutex() do { } while (0) 8306 #define unlock_direct_mutex() do { } while (0) 8307 8308 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 8309 8310 /* 8311 * Similar to register_ftrace_function, except we don't lock direct_mutex. 8312 */ 8313 static int register_ftrace_function_nolock(struct ftrace_ops *ops) 8314 { 8315 int ret; 8316 8317 ftrace_ops_init(ops); 8318 8319 mutex_lock(&ftrace_lock); 8320 8321 ret = ftrace_startup(ops, 0); 8322 8323 mutex_unlock(&ftrace_lock); 8324 8325 return ret; 8326 } 8327 8328 /** 8329 * register_ftrace_function - register a function for profiling 8330 * @ops: ops structure that holds the function for profiling. 8331 * 8332 * Register a function to be called by all functions in the 8333 * kernel. 8334 * 8335 * Note: @ops->func and all the functions it calls must be labeled 8336 * with "notrace", otherwise it will go into a 8337 * recursive loop. 8338 */ 8339 int register_ftrace_function(struct ftrace_ops *ops) 8340 { 8341 int ret; 8342 8343 lock_direct_mutex(); 8344 ret = prepare_direct_functions_for_ipmodify(ops); 8345 if (ret < 0) 8346 goto out_unlock; 8347 8348 ret = register_ftrace_function_nolock(ops); 8349 8350 out_unlock: 8351 unlock_direct_mutex(); 8352 return ret; 8353 } 8354 EXPORT_SYMBOL_GPL(register_ftrace_function); 8355 8356 /** 8357 * unregister_ftrace_function - unregister a function for profiling. 8358 * @ops: ops structure that holds the function to unregister 8359 * 8360 * Unregister a function that was added to be called by ftrace profiling. 8361 */ 8362 int unregister_ftrace_function(struct ftrace_ops *ops) 8363 { 8364 int ret; 8365 8366 mutex_lock(&ftrace_lock); 8367 ret = ftrace_shutdown(ops, 0); 8368 mutex_unlock(&ftrace_lock); 8369 8370 cleanup_direct_functions_after_ipmodify(ops); 8371 return ret; 8372 } 8373 EXPORT_SYMBOL_GPL(unregister_ftrace_function); 8374 8375 static int symbols_cmp(const void *a, const void *b) 8376 { 8377 const char **str_a = (const char **) a; 8378 const char **str_b = (const char **) b; 8379 8380 return strcmp(*str_a, *str_b); 8381 } 8382 8383 struct kallsyms_data { 8384 unsigned long *addrs; 8385 const char **syms; 8386 size_t cnt; 8387 size_t found; 8388 }; 8389 8390 /* This function gets called for all kernel and module symbols 8391 * and returns 1 in case we resolved all the requested symbols, 8392 * 0 otherwise. 8393 */ 8394 static int kallsyms_callback(void *data, const char *name, unsigned long addr) 8395 { 8396 struct kallsyms_data *args = data; 8397 const char **sym; 8398 int idx; 8399 8400 sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp); 8401 if (!sym) 8402 return 0; 8403 8404 idx = sym - args->syms; 8405 if (args->addrs[idx]) 8406 return 0; 8407 8408 if (!ftrace_location(addr)) 8409 return 0; 8410 8411 args->addrs[idx] = addr; 8412 args->found++; 8413 return args->found == args->cnt ? 1 : 0; 8414 } 8415 8416 /** 8417 * ftrace_lookup_symbols - Lookup addresses for array of symbols 8418 * 8419 * @sorted_syms: array of symbols pointers symbols to resolve, 8420 * must be alphabetically sorted 8421 * @cnt: number of symbols/addresses in @syms/@addrs arrays 8422 * @addrs: array for storing resulting addresses 8423 * 8424 * This function looks up addresses for array of symbols provided in 8425 * @syms array (must be alphabetically sorted) and stores them in 8426 * @addrs array, which needs to be big enough to store at least @cnt 8427 * addresses. 8428 * 8429 * This function returns 0 if all provided symbols are found, 8430 * -ESRCH otherwise. 8431 */ 8432 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs) 8433 { 8434 struct kallsyms_data args; 8435 int found_all; 8436 8437 memset(addrs, 0, sizeof(*addrs) * cnt); 8438 args.addrs = addrs; 8439 args.syms = sorted_syms; 8440 args.cnt = cnt; 8441 args.found = 0; 8442 8443 found_all = kallsyms_on_each_symbol(kallsyms_callback, &args); 8444 if (found_all) 8445 return 0; 8446 found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args); 8447 return found_all ? 0 : -ESRCH; 8448 } 8449 8450 #ifdef CONFIG_SYSCTL 8451 8452 #ifdef CONFIG_DYNAMIC_FTRACE 8453 static void ftrace_startup_sysctl(void) 8454 { 8455 int command; 8456 8457 if (unlikely(ftrace_disabled)) 8458 return; 8459 8460 /* Force update next time */ 8461 saved_ftrace_func = NULL; 8462 /* ftrace_start_up is true if we want ftrace running */ 8463 if (ftrace_start_up) { 8464 command = FTRACE_UPDATE_CALLS; 8465 if (ftrace_graph_active) 8466 command |= FTRACE_START_FUNC_RET; 8467 ftrace_startup_enable(command); 8468 } 8469 } 8470 8471 static void ftrace_shutdown_sysctl(void) 8472 { 8473 int command; 8474 8475 if (unlikely(ftrace_disabled)) 8476 return; 8477 8478 /* ftrace_start_up is true if ftrace is running */ 8479 if (ftrace_start_up) { 8480 command = FTRACE_DISABLE_CALLS; 8481 if (ftrace_graph_active) 8482 command |= FTRACE_STOP_FUNC_RET; 8483 ftrace_run_update_code(command); 8484 } 8485 } 8486 #else 8487 # define ftrace_startup_sysctl() do { } while (0) 8488 # define ftrace_shutdown_sysctl() do { } while (0) 8489 #endif /* CONFIG_DYNAMIC_FTRACE */ 8490 8491 static bool is_permanent_ops_registered(void) 8492 { 8493 struct ftrace_ops *op; 8494 8495 do_for_each_ftrace_op(op, ftrace_ops_list) { 8496 if (op->flags & FTRACE_OPS_FL_PERMANENT) 8497 return true; 8498 } while_for_each_ftrace_op(op); 8499 8500 return false; 8501 } 8502 8503 static int 8504 ftrace_enable_sysctl(struct ctl_table *table, int write, 8505 void *buffer, size_t *lenp, loff_t *ppos) 8506 { 8507 int ret = -ENODEV; 8508 8509 mutex_lock(&ftrace_lock); 8510 8511 if (unlikely(ftrace_disabled)) 8512 goto out; 8513 8514 ret = proc_dointvec(table, write, buffer, lenp, ppos); 8515 8516 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 8517 goto out; 8518 8519 if (ftrace_enabled) { 8520 8521 /* we are starting ftrace again */ 8522 if (rcu_dereference_protected(ftrace_ops_list, 8523 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) 8524 update_ftrace_function(); 8525 8526 ftrace_startup_sysctl(); 8527 8528 } else { 8529 if (is_permanent_ops_registered()) { 8530 ftrace_enabled = true; 8531 ret = -EBUSY; 8532 goto out; 8533 } 8534 8535 /* stopping ftrace calls (just send to ftrace_stub) */ 8536 ftrace_trace_function = ftrace_stub; 8537 8538 ftrace_shutdown_sysctl(); 8539 } 8540 8541 last_ftrace_enabled = !!ftrace_enabled; 8542 out: 8543 mutex_unlock(&ftrace_lock); 8544 return ret; 8545 } 8546 8547 static struct ctl_table ftrace_sysctls[] = { 8548 { 8549 .procname = "ftrace_enabled", 8550 .data = &ftrace_enabled, 8551 .maxlen = sizeof(int), 8552 .mode = 0644, 8553 .proc_handler = ftrace_enable_sysctl, 8554 }, 8555 {} 8556 }; 8557 8558 static int __init ftrace_sysctl_init(void) 8559 { 8560 register_sysctl_init("kernel", ftrace_sysctls); 8561 return 0; 8562 } 8563 late_initcall(ftrace_sysctl_init); 8564 #endif 8565