1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Infrastructure for profiling code inserted by 'gcc -pg'. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Originally ported from the -rt patch by: 9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 10 * 11 * Based on code in the latency_tracer, that is: 12 * 13 * Copyright (C) 2004-2006 Ingo Molnar 14 * Copyright (C) 2004 Nadia Yvette Chambers 15 */ 16 17 #include <linux/stop_machine.h> 18 #include <linux/clocksource.h> 19 #include <linux/sched/task.h> 20 #include <linux/kallsyms.h> 21 #include <linux/security.h> 22 #include <linux/seq_file.h> 23 #include <linux/tracefs.h> 24 #include <linux/hardirq.h> 25 #include <linux/kthread.h> 26 #include <linux/uaccess.h> 27 #include <linux/bsearch.h> 28 #include <linux/module.h> 29 #include <linux/ftrace.h> 30 #include <linux/sysctl.h> 31 #include <linux/slab.h> 32 #include <linux/ctype.h> 33 #include <linux/sort.h> 34 #include <linux/list.h> 35 #include <linux/hash.h> 36 #include <linux/rcupdate.h> 37 #include <linux/kprobes.h> 38 39 #include <trace/events/sched.h> 40 41 #include <asm/sections.h> 42 #include <asm/setup.h> 43 44 #include "ftrace_internal.h" 45 #include "trace_output.h" 46 #include "trace_stat.h" 47 48 #define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__" 49 50 #define FTRACE_WARN_ON(cond) \ 51 ({ \ 52 int ___r = cond; \ 53 if (WARN_ON(___r)) \ 54 ftrace_kill(); \ 55 ___r; \ 56 }) 57 58 #define FTRACE_WARN_ON_ONCE(cond) \ 59 ({ \ 60 int ___r = cond; \ 61 if (WARN_ON_ONCE(___r)) \ 62 ftrace_kill(); \ 63 ___r; \ 64 }) 65 66 /* hash bits for specific function selection */ 67 #define FTRACE_HASH_DEFAULT_BITS 10 68 #define FTRACE_HASH_MAX_BITS 12 69 70 #ifdef CONFIG_DYNAMIC_FTRACE 71 #define INIT_OPS_HASH(opsname) \ 72 .func_hash = &opsname.local_hash, \ 73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 74 #else 75 #define INIT_OPS_HASH(opsname) 76 #endif 77 78 enum { 79 FTRACE_MODIFY_ENABLE_FL = (1 << 0), 80 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), 81 }; 82 83 struct ftrace_ops ftrace_list_end __read_mostly = { 84 .func = ftrace_stub, 85 .flags = FTRACE_OPS_FL_STUB, 86 INIT_OPS_HASH(ftrace_list_end) 87 }; 88 89 /* ftrace_enabled is a method to turn ftrace on or off */ 90 int ftrace_enabled __read_mostly; 91 static int __maybe_unused last_ftrace_enabled; 92 93 /* Current function tracing op */ 94 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 95 /* What to set function_trace_op to */ 96 static struct ftrace_ops *set_function_trace_op; 97 98 static bool ftrace_pids_enabled(struct ftrace_ops *ops) 99 { 100 struct trace_array *tr; 101 102 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) 103 return false; 104 105 tr = ops->private; 106 107 return tr->function_pids != NULL || tr->function_no_pids != NULL; 108 } 109 110 static void ftrace_update_trampoline(struct ftrace_ops *ops); 111 112 /* 113 * ftrace_disabled is set when an anomaly is discovered. 114 * ftrace_disabled is much stronger than ftrace_enabled. 115 */ 116 static int ftrace_disabled __read_mostly; 117 118 DEFINE_MUTEX(ftrace_lock); 119 120 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; 121 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 122 struct ftrace_ops global_ops; 123 124 /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */ 125 void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 126 struct ftrace_ops *op, struct ftrace_regs *fregs); 127 128 static inline void ftrace_ops_init(struct ftrace_ops *ops) 129 { 130 #ifdef CONFIG_DYNAMIC_FTRACE 131 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 132 mutex_init(&ops->local_hash.regex_lock); 133 ops->func_hash = &ops->local_hash; 134 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 135 } 136 #endif 137 } 138 139 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 140 struct ftrace_ops *op, struct ftrace_regs *fregs) 141 { 142 struct trace_array *tr = op->private; 143 int pid; 144 145 if (tr) { 146 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); 147 if (pid == FTRACE_PID_IGNORE) 148 return; 149 if (pid != FTRACE_PID_TRACE && 150 pid != current->pid) 151 return; 152 } 153 154 op->saved_func(ip, parent_ip, op, fregs); 155 } 156 157 static void ftrace_sync_ipi(void *data) 158 { 159 /* Probably not needed, but do it anyway */ 160 smp_rmb(); 161 } 162 163 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) 164 { 165 /* 166 * If this is a dynamic or RCU ops, or we force list func, 167 * then it needs to call the list anyway. 168 */ 169 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || 170 FTRACE_FORCE_LIST_FUNC) 171 return ftrace_ops_list_func; 172 173 return ftrace_ops_get_func(ops); 174 } 175 176 static void update_ftrace_function(void) 177 { 178 ftrace_func_t func; 179 180 /* 181 * Prepare the ftrace_ops that the arch callback will use. 182 * If there's only one ftrace_ops registered, the ftrace_ops_list 183 * will point to the ops we want. 184 */ 185 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, 186 lockdep_is_held(&ftrace_lock)); 187 188 /* If there's no ftrace_ops registered, just call the stub function */ 189 if (set_function_trace_op == &ftrace_list_end) { 190 func = ftrace_stub; 191 192 /* 193 * If we are at the end of the list and this ops is 194 * recursion safe and not dynamic and the arch supports passing ops, 195 * then have the mcount trampoline call the function directly. 196 */ 197 } else if (rcu_dereference_protected(ftrace_ops_list->next, 198 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 199 func = ftrace_ops_get_list_func(ftrace_ops_list); 200 201 } else { 202 /* Just use the default ftrace_ops */ 203 set_function_trace_op = &ftrace_list_end; 204 func = ftrace_ops_list_func; 205 } 206 207 update_function_graph_func(); 208 209 /* If there's no change, then do nothing more here */ 210 if (ftrace_trace_function == func) 211 return; 212 213 /* 214 * If we are using the list function, it doesn't care 215 * about the function_trace_ops. 216 */ 217 if (func == ftrace_ops_list_func) { 218 ftrace_trace_function = func; 219 /* 220 * Don't even bother setting function_trace_ops, 221 * it would be racy to do so anyway. 222 */ 223 return; 224 } 225 226 #ifndef CONFIG_DYNAMIC_FTRACE 227 /* 228 * For static tracing, we need to be a bit more careful. 229 * The function change takes affect immediately. Thus, 230 * we need to coordinate the setting of the function_trace_ops 231 * with the setting of the ftrace_trace_function. 232 * 233 * Set the function to the list ops, which will call the 234 * function we want, albeit indirectly, but it handles the 235 * ftrace_ops and doesn't depend on function_trace_op. 236 */ 237 ftrace_trace_function = ftrace_ops_list_func; 238 /* 239 * Make sure all CPUs see this. Yes this is slow, but static 240 * tracing is slow and nasty to have enabled. 241 */ 242 synchronize_rcu_tasks_rude(); 243 /* Now all cpus are using the list ops. */ 244 function_trace_op = set_function_trace_op; 245 /* Make sure the function_trace_op is visible on all CPUs */ 246 smp_wmb(); 247 /* Nasty way to force a rmb on all cpus */ 248 smp_call_function(ftrace_sync_ipi, NULL, 1); 249 /* OK, we are all set to update the ftrace_trace_function now! */ 250 #endif /* !CONFIG_DYNAMIC_FTRACE */ 251 252 ftrace_trace_function = func; 253 } 254 255 static void add_ftrace_ops(struct ftrace_ops __rcu **list, 256 struct ftrace_ops *ops) 257 { 258 rcu_assign_pointer(ops->next, *list); 259 260 /* 261 * We are entering ops into the list but another 262 * CPU might be walking that list. We need to make sure 263 * the ops->next pointer is valid before another CPU sees 264 * the ops pointer included into the list. 265 */ 266 rcu_assign_pointer(*list, ops); 267 } 268 269 static int remove_ftrace_ops(struct ftrace_ops __rcu **list, 270 struct ftrace_ops *ops) 271 { 272 struct ftrace_ops **p; 273 274 /* 275 * If we are removing the last function, then simply point 276 * to the ftrace_stub. 277 */ 278 if (rcu_dereference_protected(*list, 279 lockdep_is_held(&ftrace_lock)) == ops && 280 rcu_dereference_protected(ops->next, 281 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 282 *list = &ftrace_list_end; 283 return 0; 284 } 285 286 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) 287 if (*p == ops) 288 break; 289 290 if (*p != ops) 291 return -1; 292 293 *p = (*p)->next; 294 return 0; 295 } 296 297 static void ftrace_update_trampoline(struct ftrace_ops *ops); 298 299 int __register_ftrace_function(struct ftrace_ops *ops) 300 { 301 if (ops->flags & FTRACE_OPS_FL_DELETED) 302 return -EINVAL; 303 304 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 305 return -EBUSY; 306 307 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS 308 /* 309 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 310 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. 311 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. 312 */ 313 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && 314 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) 315 return -EINVAL; 316 317 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) 318 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; 319 #endif 320 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) 321 return -EBUSY; 322 323 if (!is_kernel_core_data((unsigned long)ops)) 324 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 325 326 add_ftrace_ops(&ftrace_ops_list, ops); 327 328 /* Always save the function, and reset at unregistering */ 329 ops->saved_func = ops->func; 330 331 if (ftrace_pids_enabled(ops)) 332 ops->func = ftrace_pid_func; 333 334 ftrace_update_trampoline(ops); 335 336 if (ftrace_enabled) 337 update_ftrace_function(); 338 339 return 0; 340 } 341 342 int __unregister_ftrace_function(struct ftrace_ops *ops) 343 { 344 int ret; 345 346 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 347 return -EBUSY; 348 349 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 350 351 if (ret < 0) 352 return ret; 353 354 if (ftrace_enabled) 355 update_ftrace_function(); 356 357 ops->func = ops->saved_func; 358 359 return 0; 360 } 361 362 static void ftrace_update_pid_func(void) 363 { 364 struct ftrace_ops *op; 365 366 /* Only do something if we are tracing something */ 367 if (ftrace_trace_function == ftrace_stub) 368 return; 369 370 do_for_each_ftrace_op(op, ftrace_ops_list) { 371 if (op->flags & FTRACE_OPS_FL_PID) { 372 op->func = ftrace_pids_enabled(op) ? 373 ftrace_pid_func : op->saved_func; 374 ftrace_update_trampoline(op); 375 } 376 } while_for_each_ftrace_op(op); 377 378 update_ftrace_function(); 379 } 380 381 #ifdef CONFIG_FUNCTION_PROFILER 382 struct ftrace_profile { 383 struct hlist_node node; 384 unsigned long ip; 385 unsigned long counter; 386 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 387 unsigned long long time; 388 unsigned long long time_squared; 389 #endif 390 }; 391 392 struct ftrace_profile_page { 393 struct ftrace_profile_page *next; 394 unsigned long index; 395 struct ftrace_profile records[]; 396 }; 397 398 struct ftrace_profile_stat { 399 atomic_t disabled; 400 struct hlist_head *hash; 401 struct ftrace_profile_page *pages; 402 struct ftrace_profile_page *start; 403 struct tracer_stat stat; 404 }; 405 406 #define PROFILE_RECORDS_SIZE \ 407 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) 408 409 #define PROFILES_PER_PAGE \ 410 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) 411 412 static int ftrace_profile_enabled __read_mostly; 413 414 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ 415 static DEFINE_MUTEX(ftrace_profile_lock); 416 417 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); 418 419 #define FTRACE_PROFILE_HASH_BITS 10 420 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) 421 422 static void * 423 function_stat_next(void *v, int idx) 424 { 425 struct ftrace_profile *rec = v; 426 struct ftrace_profile_page *pg; 427 428 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 429 430 again: 431 if (idx != 0) 432 rec++; 433 434 if ((void *)rec >= (void *)&pg->records[pg->index]) { 435 pg = pg->next; 436 if (!pg) 437 return NULL; 438 rec = &pg->records[0]; 439 if (!rec->counter) 440 goto again; 441 } 442 443 return rec; 444 } 445 446 static void *function_stat_start(struct tracer_stat *trace) 447 { 448 struct ftrace_profile_stat *stat = 449 container_of(trace, struct ftrace_profile_stat, stat); 450 451 if (!stat || !stat->start) 452 return NULL; 453 454 return function_stat_next(&stat->start->records[0], 0); 455 } 456 457 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 458 /* function graph compares on total time */ 459 static int function_stat_cmp(const void *p1, const void *p2) 460 { 461 const struct ftrace_profile *a = p1; 462 const struct ftrace_profile *b = p2; 463 464 if (a->time < b->time) 465 return -1; 466 if (a->time > b->time) 467 return 1; 468 else 469 return 0; 470 } 471 #else 472 /* not function graph compares against hits */ 473 static int function_stat_cmp(const void *p1, const void *p2) 474 { 475 const struct ftrace_profile *a = p1; 476 const struct ftrace_profile *b = p2; 477 478 if (a->counter < b->counter) 479 return -1; 480 if (a->counter > b->counter) 481 return 1; 482 else 483 return 0; 484 } 485 #endif 486 487 static int function_stat_headers(struct seq_file *m) 488 { 489 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 490 seq_puts(m, " Function " 491 "Hit Time Avg s^2\n" 492 " -------- " 493 "--- ---- --- ---\n"); 494 #else 495 seq_puts(m, " Function Hit\n" 496 " -------- ---\n"); 497 #endif 498 return 0; 499 } 500 501 static int function_stat_show(struct seq_file *m, void *v) 502 { 503 struct ftrace_profile *rec = v; 504 char str[KSYM_SYMBOL_LEN]; 505 int ret = 0; 506 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 507 static struct trace_seq s; 508 unsigned long long avg; 509 unsigned long long stddev; 510 #endif 511 mutex_lock(&ftrace_profile_lock); 512 513 /* we raced with function_profile_reset() */ 514 if (unlikely(rec->counter == 0)) { 515 ret = -EBUSY; 516 goto out; 517 } 518 519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 520 avg = div64_ul(rec->time, rec->counter); 521 if (tracing_thresh && (avg < tracing_thresh)) 522 goto out; 523 #endif 524 525 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 526 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 527 528 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 529 seq_puts(m, " "); 530 531 /* Sample standard deviation (s^2) */ 532 if (rec->counter <= 1) 533 stddev = 0; 534 else { 535 /* 536 * Apply Welford's method: 537 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) 538 */ 539 stddev = rec->counter * rec->time_squared - 540 rec->time * rec->time; 541 542 /* 543 * Divide only 1000 for ns^2 -> us^2 conversion. 544 * trace_print_graph_duration will divide 1000 again. 545 */ 546 stddev = div64_ul(stddev, 547 rec->counter * (rec->counter - 1) * 1000); 548 } 549 550 trace_seq_init(&s); 551 trace_print_graph_duration(rec->time, &s); 552 trace_seq_puts(&s, " "); 553 trace_print_graph_duration(avg, &s); 554 trace_seq_puts(&s, " "); 555 trace_print_graph_duration(stddev, &s); 556 trace_print_seq(m, &s); 557 #endif 558 seq_putc(m, '\n'); 559 out: 560 mutex_unlock(&ftrace_profile_lock); 561 562 return ret; 563 } 564 565 static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 566 { 567 struct ftrace_profile_page *pg; 568 569 pg = stat->pages = stat->start; 570 571 while (pg) { 572 memset(pg->records, 0, PROFILE_RECORDS_SIZE); 573 pg->index = 0; 574 pg = pg->next; 575 } 576 577 memset(stat->hash, 0, 578 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); 579 } 580 581 static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) 582 { 583 struct ftrace_profile_page *pg; 584 int functions; 585 int pages; 586 int i; 587 588 /* If we already allocated, do nothing */ 589 if (stat->pages) 590 return 0; 591 592 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); 593 if (!stat->pages) 594 return -ENOMEM; 595 596 #ifdef CONFIG_DYNAMIC_FTRACE 597 functions = ftrace_update_tot_cnt; 598 #else 599 /* 600 * We do not know the number of functions that exist because 601 * dynamic tracing is what counts them. With past experience 602 * we have around 20K functions. That should be more than enough. 603 * It is highly unlikely we will execute every function in 604 * the kernel. 605 */ 606 functions = 20000; 607 #endif 608 609 pg = stat->start = stat->pages; 610 611 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); 612 613 for (i = 1; i < pages; i++) { 614 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 615 if (!pg->next) 616 goto out_free; 617 pg = pg->next; 618 } 619 620 return 0; 621 622 out_free: 623 pg = stat->start; 624 while (pg) { 625 unsigned long tmp = (unsigned long)pg; 626 627 pg = pg->next; 628 free_page(tmp); 629 } 630 631 stat->pages = NULL; 632 stat->start = NULL; 633 634 return -ENOMEM; 635 } 636 637 static int ftrace_profile_init_cpu(int cpu) 638 { 639 struct ftrace_profile_stat *stat; 640 int size; 641 642 stat = &per_cpu(ftrace_profile_stats, cpu); 643 644 if (stat->hash) { 645 /* If the profile is already created, simply reset it */ 646 ftrace_profile_reset(stat); 647 return 0; 648 } 649 650 /* 651 * We are profiling all functions, but usually only a few thousand 652 * functions are hit. We'll make a hash of 1024 items. 653 */ 654 size = FTRACE_PROFILE_HASH_SIZE; 655 656 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); 657 658 if (!stat->hash) 659 return -ENOMEM; 660 661 /* Preallocate the function profiling pages */ 662 if (ftrace_profile_pages_init(stat) < 0) { 663 kfree(stat->hash); 664 stat->hash = NULL; 665 return -ENOMEM; 666 } 667 668 return 0; 669 } 670 671 static int ftrace_profile_init(void) 672 { 673 int cpu; 674 int ret = 0; 675 676 for_each_possible_cpu(cpu) { 677 ret = ftrace_profile_init_cpu(cpu); 678 if (ret) 679 break; 680 } 681 682 return ret; 683 } 684 685 /* interrupts must be disabled */ 686 static struct ftrace_profile * 687 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) 688 { 689 struct ftrace_profile *rec; 690 struct hlist_head *hhd; 691 unsigned long key; 692 693 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); 694 hhd = &stat->hash[key]; 695 696 if (hlist_empty(hhd)) 697 return NULL; 698 699 hlist_for_each_entry_rcu_notrace(rec, hhd, node) { 700 if (rec->ip == ip) 701 return rec; 702 } 703 704 return NULL; 705 } 706 707 static void ftrace_add_profile(struct ftrace_profile_stat *stat, 708 struct ftrace_profile *rec) 709 { 710 unsigned long key; 711 712 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); 713 hlist_add_head_rcu(&rec->node, &stat->hash[key]); 714 } 715 716 /* 717 * The memory is already allocated, this simply finds a new record to use. 718 */ 719 static struct ftrace_profile * 720 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) 721 { 722 struct ftrace_profile *rec = NULL; 723 724 /* prevent recursion (from NMIs) */ 725 if (atomic_inc_return(&stat->disabled) != 1) 726 goto out; 727 728 /* 729 * Try to find the function again since an NMI 730 * could have added it 731 */ 732 rec = ftrace_find_profiled_func(stat, ip); 733 if (rec) 734 goto out; 735 736 if (stat->pages->index == PROFILES_PER_PAGE) { 737 if (!stat->pages->next) 738 goto out; 739 stat->pages = stat->pages->next; 740 } 741 742 rec = &stat->pages->records[stat->pages->index++]; 743 rec->ip = ip; 744 ftrace_add_profile(stat, rec); 745 746 out: 747 atomic_dec(&stat->disabled); 748 749 return rec; 750 } 751 752 static void 753 function_profile_call(unsigned long ip, unsigned long parent_ip, 754 struct ftrace_ops *ops, struct ftrace_regs *fregs) 755 { 756 struct ftrace_profile_stat *stat; 757 struct ftrace_profile *rec; 758 unsigned long flags; 759 760 if (!ftrace_profile_enabled) 761 return; 762 763 local_irq_save(flags); 764 765 stat = this_cpu_ptr(&ftrace_profile_stats); 766 if (!stat->hash || !ftrace_profile_enabled) 767 goto out; 768 769 rec = ftrace_find_profiled_func(stat, ip); 770 if (!rec) { 771 rec = ftrace_profile_alloc(stat, ip); 772 if (!rec) 773 goto out; 774 } 775 776 rec->counter++; 777 out: 778 local_irq_restore(flags); 779 } 780 781 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 782 static bool fgraph_graph_time = true; 783 784 void ftrace_graph_graph_time_control(bool enable) 785 { 786 fgraph_graph_time = enable; 787 } 788 789 static int profile_graph_entry(struct ftrace_graph_ent *trace) 790 { 791 struct ftrace_ret_stack *ret_stack; 792 793 function_profile_call(trace->func, 0, NULL, NULL); 794 795 /* If function graph is shutting down, ret_stack can be NULL */ 796 if (!current->ret_stack) 797 return 0; 798 799 ret_stack = ftrace_graph_get_ret_stack(current, 0); 800 if (ret_stack) 801 ret_stack->subtime = 0; 802 803 return 1; 804 } 805 806 static void profile_graph_return(struct ftrace_graph_ret *trace) 807 { 808 struct ftrace_ret_stack *ret_stack; 809 struct ftrace_profile_stat *stat; 810 unsigned long long calltime; 811 struct ftrace_profile *rec; 812 unsigned long flags; 813 814 local_irq_save(flags); 815 stat = this_cpu_ptr(&ftrace_profile_stats); 816 if (!stat->hash || !ftrace_profile_enabled) 817 goto out; 818 819 /* If the calltime was zero'd ignore it */ 820 if (!trace->calltime) 821 goto out; 822 823 calltime = trace->rettime - trace->calltime; 824 825 if (!fgraph_graph_time) { 826 827 /* Append this call time to the parent time to subtract */ 828 ret_stack = ftrace_graph_get_ret_stack(current, 1); 829 if (ret_stack) 830 ret_stack->subtime += calltime; 831 832 ret_stack = ftrace_graph_get_ret_stack(current, 0); 833 if (ret_stack && ret_stack->subtime < calltime) 834 calltime -= ret_stack->subtime; 835 else 836 calltime = 0; 837 } 838 839 rec = ftrace_find_profiled_func(stat, trace->func); 840 if (rec) { 841 rec->time += calltime; 842 rec->time_squared += calltime * calltime; 843 } 844 845 out: 846 local_irq_restore(flags); 847 } 848 849 static struct fgraph_ops fprofiler_ops = { 850 .entryfunc = &profile_graph_entry, 851 .retfunc = &profile_graph_return, 852 }; 853 854 static int register_ftrace_profiler(void) 855 { 856 return register_ftrace_graph(&fprofiler_ops); 857 } 858 859 static void unregister_ftrace_profiler(void) 860 { 861 unregister_ftrace_graph(&fprofiler_ops); 862 } 863 #else 864 static struct ftrace_ops ftrace_profile_ops __read_mostly = { 865 .func = function_profile_call, 866 .flags = FTRACE_OPS_FL_INITIALIZED, 867 INIT_OPS_HASH(ftrace_profile_ops) 868 }; 869 870 static int register_ftrace_profiler(void) 871 { 872 return register_ftrace_function(&ftrace_profile_ops); 873 } 874 875 static void unregister_ftrace_profiler(void) 876 { 877 unregister_ftrace_function(&ftrace_profile_ops); 878 } 879 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 880 881 static ssize_t 882 ftrace_profile_write(struct file *filp, const char __user *ubuf, 883 size_t cnt, loff_t *ppos) 884 { 885 unsigned long val; 886 int ret; 887 888 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 889 if (ret) 890 return ret; 891 892 val = !!val; 893 894 mutex_lock(&ftrace_profile_lock); 895 if (ftrace_profile_enabled ^ val) { 896 if (val) { 897 ret = ftrace_profile_init(); 898 if (ret < 0) { 899 cnt = ret; 900 goto out; 901 } 902 903 ret = register_ftrace_profiler(); 904 if (ret < 0) { 905 cnt = ret; 906 goto out; 907 } 908 ftrace_profile_enabled = 1; 909 } else { 910 ftrace_profile_enabled = 0; 911 /* 912 * unregister_ftrace_profiler calls stop_machine 913 * so this acts like an synchronize_rcu. 914 */ 915 unregister_ftrace_profiler(); 916 } 917 } 918 out: 919 mutex_unlock(&ftrace_profile_lock); 920 921 *ppos += cnt; 922 923 return cnt; 924 } 925 926 static ssize_t 927 ftrace_profile_read(struct file *filp, char __user *ubuf, 928 size_t cnt, loff_t *ppos) 929 { 930 char buf[64]; /* big enough to hold a number */ 931 int r; 932 933 r = sprintf(buf, "%u\n", ftrace_profile_enabled); 934 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 935 } 936 937 static const struct file_operations ftrace_profile_fops = { 938 .open = tracing_open_generic, 939 .read = ftrace_profile_read, 940 .write = ftrace_profile_write, 941 .llseek = default_llseek, 942 }; 943 944 /* used to initialize the real stat files */ 945 static struct tracer_stat function_stats __initdata = { 946 .name = "functions", 947 .stat_start = function_stat_start, 948 .stat_next = function_stat_next, 949 .stat_cmp = function_stat_cmp, 950 .stat_headers = function_stat_headers, 951 .stat_show = function_stat_show 952 }; 953 954 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 955 { 956 struct ftrace_profile_stat *stat; 957 char *name; 958 int ret; 959 int cpu; 960 961 for_each_possible_cpu(cpu) { 962 stat = &per_cpu(ftrace_profile_stats, cpu); 963 964 name = kasprintf(GFP_KERNEL, "function%d", cpu); 965 if (!name) { 966 /* 967 * The files created are permanent, if something happens 968 * we still do not free memory. 969 */ 970 WARN(1, 971 "Could not allocate stat file for cpu %d\n", 972 cpu); 973 return; 974 } 975 stat->stat = function_stats; 976 stat->stat.name = name; 977 ret = register_stat_tracer(&stat->stat); 978 if (ret) { 979 WARN(1, 980 "Could not register function stat for cpu %d\n", 981 cpu); 982 kfree(name); 983 return; 984 } 985 } 986 987 trace_create_file("function_profile_enabled", 988 TRACE_MODE_WRITE, d_tracer, NULL, 989 &ftrace_profile_fops); 990 } 991 992 #else /* CONFIG_FUNCTION_PROFILER */ 993 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 994 { 995 } 996 #endif /* CONFIG_FUNCTION_PROFILER */ 997 998 #ifdef CONFIG_DYNAMIC_FTRACE 999 1000 static struct ftrace_ops *removed_ops; 1001 1002 /* 1003 * Set when doing a global update, like enabling all recs or disabling them. 1004 * It is not set when just updating a single ftrace_ops. 1005 */ 1006 static bool update_all_ops; 1007 1008 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 1009 # error Dynamic ftrace depends on MCOUNT_RECORD 1010 #endif 1011 1012 struct ftrace_func_probe { 1013 struct ftrace_probe_ops *probe_ops; 1014 struct ftrace_ops ops; 1015 struct trace_array *tr; 1016 struct list_head list; 1017 void *data; 1018 int ref; 1019 }; 1020 1021 /* 1022 * We make these constant because no one should touch them, 1023 * but they are used as the default "empty hash", to avoid allocating 1024 * it all the time. These are in a read only section such that if 1025 * anyone does try to modify it, it will cause an exception. 1026 */ 1027 static const struct hlist_head empty_buckets[1]; 1028 static const struct ftrace_hash empty_hash = { 1029 .buckets = (struct hlist_head *)empty_buckets, 1030 }; 1031 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1032 1033 struct ftrace_ops global_ops = { 1034 .func = ftrace_stub, 1035 .local_hash.notrace_hash = EMPTY_HASH, 1036 .local_hash.filter_hash = EMPTY_HASH, 1037 INIT_OPS_HASH(global_ops) 1038 .flags = FTRACE_OPS_FL_INITIALIZED | 1039 FTRACE_OPS_FL_PID, 1040 }; 1041 1042 /* 1043 * Used by the stack unwinder to know about dynamic ftrace trampolines. 1044 */ 1045 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) 1046 { 1047 struct ftrace_ops *op = NULL; 1048 1049 /* 1050 * Some of the ops may be dynamically allocated, 1051 * they are freed after a synchronize_rcu(). 1052 */ 1053 preempt_disable_notrace(); 1054 1055 do_for_each_ftrace_op(op, ftrace_ops_list) { 1056 /* 1057 * This is to check for dynamically allocated trampolines. 1058 * Trampolines that are in kernel text will have 1059 * core_kernel_text() return true. 1060 */ 1061 if (op->trampoline && op->trampoline_size) 1062 if (addr >= op->trampoline && 1063 addr < op->trampoline + op->trampoline_size) { 1064 preempt_enable_notrace(); 1065 return op; 1066 } 1067 } while_for_each_ftrace_op(op); 1068 preempt_enable_notrace(); 1069 1070 return NULL; 1071 } 1072 1073 /* 1074 * This is used by __kernel_text_address() to return true if the 1075 * address is on a dynamically allocated trampoline that would 1076 * not return true for either core_kernel_text() or 1077 * is_module_text_address(). 1078 */ 1079 bool is_ftrace_trampoline(unsigned long addr) 1080 { 1081 return ftrace_ops_trampoline(addr) != NULL; 1082 } 1083 1084 struct ftrace_page { 1085 struct ftrace_page *next; 1086 struct dyn_ftrace *records; 1087 int index; 1088 int order; 1089 }; 1090 1091 #define ENTRY_SIZE sizeof(struct dyn_ftrace) 1092 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1093 1094 static struct ftrace_page *ftrace_pages_start; 1095 static struct ftrace_page *ftrace_pages; 1096 1097 static __always_inline unsigned long 1098 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) 1099 { 1100 if (hash->size_bits > 0) 1101 return hash_long(ip, hash->size_bits); 1102 1103 return 0; 1104 } 1105 1106 /* Only use this function if ftrace_hash_empty() has already been tested */ 1107 static __always_inline struct ftrace_func_entry * 1108 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1109 { 1110 unsigned long key; 1111 struct ftrace_func_entry *entry; 1112 struct hlist_head *hhd; 1113 1114 key = ftrace_hash_key(hash, ip); 1115 hhd = &hash->buckets[key]; 1116 1117 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { 1118 if (entry->ip == ip) 1119 return entry; 1120 } 1121 return NULL; 1122 } 1123 1124 /** 1125 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash 1126 * @hash: The hash to look at 1127 * @ip: The instruction pointer to test 1128 * 1129 * Search a given @hash to see if a given instruction pointer (@ip) 1130 * exists in it. 1131 * 1132 * Returns the entry that holds the @ip if found. NULL otherwise. 1133 */ 1134 struct ftrace_func_entry * 1135 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1136 { 1137 if (ftrace_hash_empty(hash)) 1138 return NULL; 1139 1140 return __ftrace_lookup_ip(hash, ip); 1141 } 1142 1143 static void __add_hash_entry(struct ftrace_hash *hash, 1144 struct ftrace_func_entry *entry) 1145 { 1146 struct hlist_head *hhd; 1147 unsigned long key; 1148 1149 key = ftrace_hash_key(hash, entry->ip); 1150 hhd = &hash->buckets[key]; 1151 hlist_add_head(&entry->hlist, hhd); 1152 hash->count++; 1153 } 1154 1155 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) 1156 { 1157 struct ftrace_func_entry *entry; 1158 1159 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1160 if (!entry) 1161 return -ENOMEM; 1162 1163 entry->ip = ip; 1164 __add_hash_entry(hash, entry); 1165 1166 return 0; 1167 } 1168 1169 static void 1170 free_hash_entry(struct ftrace_hash *hash, 1171 struct ftrace_func_entry *entry) 1172 { 1173 hlist_del(&entry->hlist); 1174 kfree(entry); 1175 hash->count--; 1176 } 1177 1178 static void 1179 remove_hash_entry(struct ftrace_hash *hash, 1180 struct ftrace_func_entry *entry) 1181 { 1182 hlist_del_rcu(&entry->hlist); 1183 hash->count--; 1184 } 1185 1186 static void ftrace_hash_clear(struct ftrace_hash *hash) 1187 { 1188 struct hlist_head *hhd; 1189 struct hlist_node *tn; 1190 struct ftrace_func_entry *entry; 1191 int size = 1 << hash->size_bits; 1192 int i; 1193 1194 if (!hash->count) 1195 return; 1196 1197 for (i = 0; i < size; i++) { 1198 hhd = &hash->buckets[i]; 1199 hlist_for_each_entry_safe(entry, tn, hhd, hlist) 1200 free_hash_entry(hash, entry); 1201 } 1202 FTRACE_WARN_ON(hash->count); 1203 } 1204 1205 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) 1206 { 1207 list_del(&ftrace_mod->list); 1208 kfree(ftrace_mod->module); 1209 kfree(ftrace_mod->func); 1210 kfree(ftrace_mod); 1211 } 1212 1213 static void clear_ftrace_mod_list(struct list_head *head) 1214 { 1215 struct ftrace_mod_load *p, *n; 1216 1217 /* stack tracer isn't supported yet */ 1218 if (!head) 1219 return; 1220 1221 mutex_lock(&ftrace_lock); 1222 list_for_each_entry_safe(p, n, head, list) 1223 free_ftrace_mod(p); 1224 mutex_unlock(&ftrace_lock); 1225 } 1226 1227 static void free_ftrace_hash(struct ftrace_hash *hash) 1228 { 1229 if (!hash || hash == EMPTY_HASH) 1230 return; 1231 ftrace_hash_clear(hash); 1232 kfree(hash->buckets); 1233 kfree(hash); 1234 } 1235 1236 static void __free_ftrace_hash_rcu(struct rcu_head *rcu) 1237 { 1238 struct ftrace_hash *hash; 1239 1240 hash = container_of(rcu, struct ftrace_hash, rcu); 1241 free_ftrace_hash(hash); 1242 } 1243 1244 static void free_ftrace_hash_rcu(struct ftrace_hash *hash) 1245 { 1246 if (!hash || hash == EMPTY_HASH) 1247 return; 1248 call_rcu(&hash->rcu, __free_ftrace_hash_rcu); 1249 } 1250 1251 void ftrace_free_filter(struct ftrace_ops *ops) 1252 { 1253 ftrace_ops_init(ops); 1254 free_ftrace_hash(ops->func_hash->filter_hash); 1255 free_ftrace_hash(ops->func_hash->notrace_hash); 1256 } 1257 1258 static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1259 { 1260 struct ftrace_hash *hash; 1261 int size; 1262 1263 hash = kzalloc(sizeof(*hash), GFP_KERNEL); 1264 if (!hash) 1265 return NULL; 1266 1267 size = 1 << size_bits; 1268 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); 1269 1270 if (!hash->buckets) { 1271 kfree(hash); 1272 return NULL; 1273 } 1274 1275 hash->size_bits = size_bits; 1276 1277 return hash; 1278 } 1279 1280 1281 static int ftrace_add_mod(struct trace_array *tr, 1282 const char *func, const char *module, 1283 int enable) 1284 { 1285 struct ftrace_mod_load *ftrace_mod; 1286 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; 1287 1288 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL); 1289 if (!ftrace_mod) 1290 return -ENOMEM; 1291 1292 INIT_LIST_HEAD(&ftrace_mod->list); 1293 ftrace_mod->func = kstrdup(func, GFP_KERNEL); 1294 ftrace_mod->module = kstrdup(module, GFP_KERNEL); 1295 ftrace_mod->enable = enable; 1296 1297 if (!ftrace_mod->func || !ftrace_mod->module) 1298 goto out_free; 1299 1300 list_add(&ftrace_mod->list, mod_head); 1301 1302 return 0; 1303 1304 out_free: 1305 free_ftrace_mod(ftrace_mod); 1306 1307 return -ENOMEM; 1308 } 1309 1310 static struct ftrace_hash * 1311 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) 1312 { 1313 struct ftrace_func_entry *entry; 1314 struct ftrace_hash *new_hash; 1315 int size; 1316 int ret; 1317 int i; 1318 1319 new_hash = alloc_ftrace_hash(size_bits); 1320 if (!new_hash) 1321 return NULL; 1322 1323 if (hash) 1324 new_hash->flags = hash->flags; 1325 1326 /* Empty hash? */ 1327 if (ftrace_hash_empty(hash)) 1328 return new_hash; 1329 1330 size = 1 << hash->size_bits; 1331 for (i = 0; i < size; i++) { 1332 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 1333 ret = add_hash_entry(new_hash, entry->ip); 1334 if (ret < 0) 1335 goto free_hash; 1336 } 1337 } 1338 1339 FTRACE_WARN_ON(new_hash->count != hash->count); 1340 1341 return new_hash; 1342 1343 free_hash: 1344 free_ftrace_hash(new_hash); 1345 return NULL; 1346 } 1347 1348 static void 1349 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); 1350 static void 1351 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); 1352 1353 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1354 struct ftrace_hash *new_hash); 1355 1356 static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size) 1357 { 1358 struct ftrace_func_entry *entry; 1359 struct ftrace_hash *new_hash; 1360 struct hlist_head *hhd; 1361 struct hlist_node *tn; 1362 int bits = 0; 1363 int i; 1364 1365 /* 1366 * Use around half the size (max bit of it), but 1367 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits). 1368 */ 1369 bits = fls(size / 2); 1370 1371 /* Don't allocate too much */ 1372 if (bits > FTRACE_HASH_MAX_BITS) 1373 bits = FTRACE_HASH_MAX_BITS; 1374 1375 new_hash = alloc_ftrace_hash(bits); 1376 if (!new_hash) 1377 return NULL; 1378 1379 new_hash->flags = src->flags; 1380 1381 size = 1 << src->size_bits; 1382 for (i = 0; i < size; i++) { 1383 hhd = &src->buckets[i]; 1384 hlist_for_each_entry_safe(entry, tn, hhd, hlist) { 1385 remove_hash_entry(src, entry); 1386 __add_hash_entry(new_hash, entry); 1387 } 1388 } 1389 return new_hash; 1390 } 1391 1392 static struct ftrace_hash * 1393 __ftrace_hash_move(struct ftrace_hash *src) 1394 { 1395 int size = src->count; 1396 1397 /* 1398 * If the new source is empty, just return the empty_hash. 1399 */ 1400 if (ftrace_hash_empty(src)) 1401 return EMPTY_HASH; 1402 1403 return dup_hash(src, size); 1404 } 1405 1406 static int 1407 ftrace_hash_move(struct ftrace_ops *ops, int enable, 1408 struct ftrace_hash **dst, struct ftrace_hash *src) 1409 { 1410 struct ftrace_hash *new_hash; 1411 int ret; 1412 1413 /* Reject setting notrace hash on IPMODIFY ftrace_ops */ 1414 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) 1415 return -EINVAL; 1416 1417 new_hash = __ftrace_hash_move(src); 1418 if (!new_hash) 1419 return -ENOMEM; 1420 1421 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ 1422 if (enable) { 1423 /* IPMODIFY should be updated only when filter_hash updating */ 1424 ret = ftrace_hash_ipmodify_update(ops, new_hash); 1425 if (ret < 0) { 1426 free_ftrace_hash(new_hash); 1427 return ret; 1428 } 1429 } 1430 1431 /* 1432 * Remove the current set, update the hash and add 1433 * them back. 1434 */ 1435 ftrace_hash_rec_disable_modify(ops, enable); 1436 1437 rcu_assign_pointer(*dst, new_hash); 1438 1439 ftrace_hash_rec_enable_modify(ops, enable); 1440 1441 return 0; 1442 } 1443 1444 static bool hash_contains_ip(unsigned long ip, 1445 struct ftrace_ops_hash *hash) 1446 { 1447 /* 1448 * The function record is a match if it exists in the filter 1449 * hash and not in the notrace hash. Note, an empty hash is 1450 * considered a match for the filter hash, but an empty 1451 * notrace hash is considered not in the notrace hash. 1452 */ 1453 return (ftrace_hash_empty(hash->filter_hash) || 1454 __ftrace_lookup_ip(hash->filter_hash, ip)) && 1455 (ftrace_hash_empty(hash->notrace_hash) || 1456 !__ftrace_lookup_ip(hash->notrace_hash, ip)); 1457 } 1458 1459 /* 1460 * Test the hashes for this ops to see if we want to call 1461 * the ops->func or not. 1462 * 1463 * It's a match if the ip is in the ops->filter_hash or 1464 * the filter_hash does not exist or is empty, 1465 * AND 1466 * the ip is not in the ops->notrace_hash. 1467 * 1468 * This needs to be called with preemption disabled as 1469 * the hashes are freed with call_rcu(). 1470 */ 1471 int 1472 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1473 { 1474 struct ftrace_ops_hash hash; 1475 int ret; 1476 1477 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 1478 /* 1479 * There's a small race when adding ops that the ftrace handler 1480 * that wants regs, may be called without them. We can not 1481 * allow that handler to be called if regs is NULL. 1482 */ 1483 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) 1484 return 0; 1485 #endif 1486 1487 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); 1488 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); 1489 1490 if (hash_contains_ip(ip, &hash)) 1491 ret = 1; 1492 else 1493 ret = 0; 1494 1495 return ret; 1496 } 1497 1498 /* 1499 * This is a double for. Do not use 'break' to break out of the loop, 1500 * you must use a goto. 1501 */ 1502 #define do_for_each_ftrace_rec(pg, rec) \ 1503 for (pg = ftrace_pages_start; pg; pg = pg->next) { \ 1504 int _____i; \ 1505 for (_____i = 0; _____i < pg->index; _____i++) { \ 1506 rec = &pg->records[_____i]; 1507 1508 #define while_for_each_ftrace_rec() \ 1509 } \ 1510 } 1511 1512 1513 static int ftrace_cmp_recs(const void *a, const void *b) 1514 { 1515 const struct dyn_ftrace *key = a; 1516 const struct dyn_ftrace *rec = b; 1517 1518 if (key->flags < rec->ip) 1519 return -1; 1520 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) 1521 return 1; 1522 return 0; 1523 } 1524 1525 static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) 1526 { 1527 struct ftrace_page *pg; 1528 struct dyn_ftrace *rec = NULL; 1529 struct dyn_ftrace key; 1530 1531 key.ip = start; 1532 key.flags = end; /* overload flags, as it is unsigned long */ 1533 1534 for (pg = ftrace_pages_start; pg; pg = pg->next) { 1535 if (end < pg->records[0].ip || 1536 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 1537 continue; 1538 rec = bsearch(&key, pg->records, pg->index, 1539 sizeof(struct dyn_ftrace), 1540 ftrace_cmp_recs); 1541 if (rec) 1542 break; 1543 } 1544 return rec; 1545 } 1546 1547 /** 1548 * ftrace_location_range - return the first address of a traced location 1549 * if it touches the given ip range 1550 * @start: start of range to search. 1551 * @end: end of range to search (inclusive). @end points to the last byte 1552 * to check. 1553 * 1554 * Returns rec->ip if the related ftrace location is a least partly within 1555 * the given address range. That is, the first address of the instruction 1556 * that is either a NOP or call to the function tracer. It checks the ftrace 1557 * internal tables to determine if the address belongs or not. 1558 */ 1559 unsigned long ftrace_location_range(unsigned long start, unsigned long end) 1560 { 1561 struct dyn_ftrace *rec; 1562 1563 rec = lookup_rec(start, end); 1564 if (rec) 1565 return rec->ip; 1566 1567 return 0; 1568 } 1569 1570 /** 1571 * ftrace_location - return the ftrace location 1572 * @ip: the instruction pointer to check 1573 * 1574 * If @ip matches the ftrace location, return @ip. 1575 * If @ip matches sym+0, return sym's ftrace location. 1576 * Otherwise, return 0. 1577 */ 1578 unsigned long ftrace_location(unsigned long ip) 1579 { 1580 struct dyn_ftrace *rec; 1581 unsigned long offset; 1582 unsigned long size; 1583 1584 rec = lookup_rec(ip, ip); 1585 if (!rec) { 1586 if (!kallsyms_lookup_size_offset(ip, &size, &offset)) 1587 goto out; 1588 1589 /* map sym+0 to __fentry__ */ 1590 if (!offset) 1591 rec = lookup_rec(ip, ip + size - 1); 1592 } 1593 1594 if (rec) 1595 return rec->ip; 1596 1597 out: 1598 return 0; 1599 } 1600 1601 /** 1602 * ftrace_text_reserved - return true if range contains an ftrace location 1603 * @start: start of range to search 1604 * @end: end of range to search (inclusive). @end points to the last byte to check. 1605 * 1606 * Returns 1 if @start and @end contains a ftrace location. 1607 * That is, the instruction that is either a NOP or call to 1608 * the function tracer. It checks the ftrace internal tables to 1609 * determine if the address belongs or not. 1610 */ 1611 int ftrace_text_reserved(const void *start, const void *end) 1612 { 1613 unsigned long ret; 1614 1615 ret = ftrace_location_range((unsigned long)start, 1616 (unsigned long)end); 1617 1618 return (int)!!ret; 1619 } 1620 1621 /* Test if ops registered to this rec needs regs */ 1622 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) 1623 { 1624 struct ftrace_ops *ops; 1625 bool keep_regs = false; 1626 1627 for (ops = ftrace_ops_list; 1628 ops != &ftrace_list_end; ops = ops->next) { 1629 /* pass rec in as regs to have non-NULL val */ 1630 if (ftrace_ops_test(ops, rec->ip, rec)) { 1631 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1632 keep_regs = true; 1633 break; 1634 } 1635 } 1636 } 1637 1638 return keep_regs; 1639 } 1640 1641 static struct ftrace_ops * 1642 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); 1643 static struct ftrace_ops * 1644 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude); 1645 static struct ftrace_ops * 1646 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); 1647 1648 static bool skip_record(struct dyn_ftrace *rec) 1649 { 1650 /* 1651 * At boot up, weak functions are set to disable. Function tracing 1652 * can be enabled before they are, and they still need to be disabled now. 1653 * If the record is disabled, still continue if it is marked as already 1654 * enabled (this is needed to keep the accounting working). 1655 */ 1656 return rec->flags & FTRACE_FL_DISABLED && 1657 !(rec->flags & FTRACE_FL_ENABLED); 1658 } 1659 1660 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, 1661 int filter_hash, 1662 bool inc) 1663 { 1664 struct ftrace_hash *hash; 1665 struct ftrace_hash *other_hash; 1666 struct ftrace_page *pg; 1667 struct dyn_ftrace *rec; 1668 bool update = false; 1669 int count = 0; 1670 int all = false; 1671 1672 /* Only update if the ops has been registered */ 1673 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1674 return false; 1675 1676 /* 1677 * In the filter_hash case: 1678 * If the count is zero, we update all records. 1679 * Otherwise we just update the items in the hash. 1680 * 1681 * In the notrace_hash case: 1682 * We enable the update in the hash. 1683 * As disabling notrace means enabling the tracing, 1684 * and enabling notrace means disabling, the inc variable 1685 * gets inversed. 1686 */ 1687 if (filter_hash) { 1688 hash = ops->func_hash->filter_hash; 1689 other_hash = ops->func_hash->notrace_hash; 1690 if (ftrace_hash_empty(hash)) 1691 all = true; 1692 } else { 1693 inc = !inc; 1694 hash = ops->func_hash->notrace_hash; 1695 other_hash = ops->func_hash->filter_hash; 1696 /* 1697 * If the notrace hash has no items, 1698 * then there's nothing to do. 1699 */ 1700 if (ftrace_hash_empty(hash)) 1701 return false; 1702 } 1703 1704 do_for_each_ftrace_rec(pg, rec) { 1705 int in_other_hash = 0; 1706 int in_hash = 0; 1707 int match = 0; 1708 1709 if (skip_record(rec)) 1710 continue; 1711 1712 if (all) { 1713 /* 1714 * Only the filter_hash affects all records. 1715 * Update if the record is not in the notrace hash. 1716 */ 1717 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) 1718 match = 1; 1719 } else { 1720 in_hash = !!ftrace_lookup_ip(hash, rec->ip); 1721 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); 1722 1723 /* 1724 * If filter_hash is set, we want to match all functions 1725 * that are in the hash but not in the other hash. 1726 * 1727 * If filter_hash is not set, then we are decrementing. 1728 * That means we match anything that is in the hash 1729 * and also in the other_hash. That is, we need to turn 1730 * off functions in the other hash because they are disabled 1731 * by this hash. 1732 */ 1733 if (filter_hash && in_hash && !in_other_hash) 1734 match = 1; 1735 else if (!filter_hash && in_hash && 1736 (in_other_hash || ftrace_hash_empty(other_hash))) 1737 match = 1; 1738 } 1739 if (!match) 1740 continue; 1741 1742 if (inc) { 1743 rec->flags++; 1744 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) 1745 return false; 1746 1747 if (ops->flags & FTRACE_OPS_FL_DIRECT) 1748 rec->flags |= FTRACE_FL_DIRECT; 1749 1750 /* 1751 * If there's only a single callback registered to a 1752 * function, and the ops has a trampoline registered 1753 * for it, then we can call it directly. 1754 */ 1755 if (ftrace_rec_count(rec) == 1 && ops->trampoline) 1756 rec->flags |= FTRACE_FL_TRAMP; 1757 else 1758 /* 1759 * If we are adding another function callback 1760 * to this function, and the previous had a 1761 * custom trampoline in use, then we need to go 1762 * back to the default trampoline. 1763 */ 1764 rec->flags &= ~FTRACE_FL_TRAMP; 1765 1766 /* 1767 * If any ops wants regs saved for this function 1768 * then all ops will get saved regs. 1769 */ 1770 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 1771 rec->flags |= FTRACE_FL_REGS; 1772 } else { 1773 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) 1774 return false; 1775 rec->flags--; 1776 1777 /* 1778 * Only the internal direct_ops should have the 1779 * DIRECT flag set. Thus, if it is removing a 1780 * function, then that function should no longer 1781 * be direct. 1782 */ 1783 if (ops->flags & FTRACE_OPS_FL_DIRECT) 1784 rec->flags &= ~FTRACE_FL_DIRECT; 1785 1786 /* 1787 * If the rec had REGS enabled and the ops that is 1788 * being removed had REGS set, then see if there is 1789 * still any ops for this record that wants regs. 1790 * If not, we can stop recording them. 1791 */ 1792 if (ftrace_rec_count(rec) > 0 && 1793 rec->flags & FTRACE_FL_REGS && 1794 ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1795 if (!test_rec_ops_needs_regs(rec)) 1796 rec->flags &= ~FTRACE_FL_REGS; 1797 } 1798 1799 /* 1800 * The TRAMP needs to be set only if rec count 1801 * is decremented to one, and the ops that is 1802 * left has a trampoline. As TRAMP can only be 1803 * enabled if there is only a single ops attached 1804 * to it. 1805 */ 1806 if (ftrace_rec_count(rec) == 1 && 1807 ftrace_find_tramp_ops_any_other(rec, ops)) 1808 rec->flags |= FTRACE_FL_TRAMP; 1809 else 1810 rec->flags &= ~FTRACE_FL_TRAMP; 1811 1812 /* 1813 * flags will be cleared in ftrace_check_record() 1814 * if rec count is zero. 1815 */ 1816 } 1817 count++; 1818 1819 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ 1820 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE; 1821 1822 /* Shortcut, if we handled all records, we are done. */ 1823 if (!all && count == hash->count) 1824 return update; 1825 } while_for_each_ftrace_rec(); 1826 1827 return update; 1828 } 1829 1830 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops, 1831 int filter_hash) 1832 { 1833 return __ftrace_hash_rec_update(ops, filter_hash, 0); 1834 } 1835 1836 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops, 1837 int filter_hash) 1838 { 1839 return __ftrace_hash_rec_update(ops, filter_hash, 1); 1840 } 1841 1842 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, 1843 int filter_hash, int inc) 1844 { 1845 struct ftrace_ops *op; 1846 1847 __ftrace_hash_rec_update(ops, filter_hash, inc); 1848 1849 if (ops->func_hash != &global_ops.local_hash) 1850 return; 1851 1852 /* 1853 * If the ops shares the global_ops hash, then we need to update 1854 * all ops that are enabled and use this hash. 1855 */ 1856 do_for_each_ftrace_op(op, ftrace_ops_list) { 1857 /* Already done */ 1858 if (op == ops) 1859 continue; 1860 if (op->func_hash == &global_ops.local_hash) 1861 __ftrace_hash_rec_update(op, filter_hash, inc); 1862 } while_for_each_ftrace_op(op); 1863 } 1864 1865 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, 1866 int filter_hash) 1867 { 1868 ftrace_hash_rec_update_modify(ops, filter_hash, 0); 1869 } 1870 1871 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, 1872 int filter_hash) 1873 { 1874 ftrace_hash_rec_update_modify(ops, filter_hash, 1); 1875 } 1876 1877 /* 1878 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK 1879 * or no-needed to update, -EBUSY if it detects a conflict of the flag 1880 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. 1881 * Note that old_hash and new_hash has below meanings 1882 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) 1883 * - If the hash is EMPTY_HASH, it hits nothing 1884 * - Anything else hits the recs which match the hash entries. 1885 * 1886 * DIRECT ops does not have IPMODIFY flag, but we still need to check it 1887 * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call 1888 * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with 1889 * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate 1890 * the return value to the caller and eventually to the owner of the DIRECT 1891 * ops. 1892 */ 1893 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, 1894 struct ftrace_hash *old_hash, 1895 struct ftrace_hash *new_hash) 1896 { 1897 struct ftrace_page *pg; 1898 struct dyn_ftrace *rec, *end = NULL; 1899 int in_old, in_new; 1900 bool is_ipmodify, is_direct; 1901 1902 /* Only update if the ops has been registered */ 1903 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1904 return 0; 1905 1906 is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY; 1907 is_direct = ops->flags & FTRACE_OPS_FL_DIRECT; 1908 1909 /* neither IPMODIFY nor DIRECT, skip */ 1910 if (!is_ipmodify && !is_direct) 1911 return 0; 1912 1913 if (WARN_ON_ONCE(is_ipmodify && is_direct)) 1914 return 0; 1915 1916 /* 1917 * Since the IPMODIFY and DIRECT are very address sensitive 1918 * actions, we do not allow ftrace_ops to set all functions to new 1919 * hash. 1920 */ 1921 if (!new_hash || !old_hash) 1922 return -EINVAL; 1923 1924 /* Update rec->flags */ 1925 do_for_each_ftrace_rec(pg, rec) { 1926 1927 if (rec->flags & FTRACE_FL_DISABLED) 1928 continue; 1929 1930 /* We need to update only differences of filter_hash */ 1931 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1932 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1933 if (in_old == in_new) 1934 continue; 1935 1936 if (in_new) { 1937 if (rec->flags & FTRACE_FL_IPMODIFY) { 1938 int ret; 1939 1940 /* Cannot have two ipmodify on same rec */ 1941 if (is_ipmodify) 1942 goto rollback; 1943 1944 FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT); 1945 1946 /* 1947 * Another ops with IPMODIFY is already 1948 * attached. We are now attaching a direct 1949 * ops. Run SHARE_IPMODIFY_SELF, to check 1950 * whether sharing is supported. 1951 */ 1952 if (!ops->ops_func) 1953 return -EBUSY; 1954 ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF); 1955 if (ret) 1956 return ret; 1957 } else if (is_ipmodify) { 1958 rec->flags |= FTRACE_FL_IPMODIFY; 1959 } 1960 } else if (is_ipmodify) { 1961 rec->flags &= ~FTRACE_FL_IPMODIFY; 1962 } 1963 } while_for_each_ftrace_rec(); 1964 1965 return 0; 1966 1967 rollback: 1968 end = rec; 1969 1970 /* Roll back what we did above */ 1971 do_for_each_ftrace_rec(pg, rec) { 1972 1973 if (rec->flags & FTRACE_FL_DISABLED) 1974 continue; 1975 1976 if (rec == end) 1977 goto err_out; 1978 1979 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1980 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1981 if (in_old == in_new) 1982 continue; 1983 1984 if (in_new) 1985 rec->flags &= ~FTRACE_FL_IPMODIFY; 1986 else 1987 rec->flags |= FTRACE_FL_IPMODIFY; 1988 } while_for_each_ftrace_rec(); 1989 1990 err_out: 1991 return -EBUSY; 1992 } 1993 1994 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) 1995 { 1996 struct ftrace_hash *hash = ops->func_hash->filter_hash; 1997 1998 if (ftrace_hash_empty(hash)) 1999 hash = NULL; 2000 2001 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); 2002 } 2003 2004 /* Disabling always succeeds */ 2005 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) 2006 { 2007 struct ftrace_hash *hash = ops->func_hash->filter_hash; 2008 2009 if (ftrace_hash_empty(hash)) 2010 hash = NULL; 2011 2012 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); 2013 } 2014 2015 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 2016 struct ftrace_hash *new_hash) 2017 { 2018 struct ftrace_hash *old_hash = ops->func_hash->filter_hash; 2019 2020 if (ftrace_hash_empty(old_hash)) 2021 old_hash = NULL; 2022 2023 if (ftrace_hash_empty(new_hash)) 2024 new_hash = NULL; 2025 2026 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); 2027 } 2028 2029 static void print_ip_ins(const char *fmt, const unsigned char *p) 2030 { 2031 char ins[MCOUNT_INSN_SIZE]; 2032 2033 if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) { 2034 printk(KERN_CONT "%s[FAULT] %px\n", fmt, p); 2035 return; 2036 } 2037 2038 printk(KERN_CONT "%s", fmt); 2039 pr_cont("%*phC", MCOUNT_INSN_SIZE, ins); 2040 } 2041 2042 enum ftrace_bug_type ftrace_bug_type; 2043 const void *ftrace_expected; 2044 2045 static void print_bug_type(void) 2046 { 2047 switch (ftrace_bug_type) { 2048 case FTRACE_BUG_UNKNOWN: 2049 break; 2050 case FTRACE_BUG_INIT: 2051 pr_info("Initializing ftrace call sites\n"); 2052 break; 2053 case FTRACE_BUG_NOP: 2054 pr_info("Setting ftrace call site to NOP\n"); 2055 break; 2056 case FTRACE_BUG_CALL: 2057 pr_info("Setting ftrace call site to call ftrace function\n"); 2058 break; 2059 case FTRACE_BUG_UPDATE: 2060 pr_info("Updating ftrace call site to call a different ftrace function\n"); 2061 break; 2062 } 2063 } 2064 2065 /** 2066 * ftrace_bug - report and shutdown function tracer 2067 * @failed: The failed type (EFAULT, EINVAL, EPERM) 2068 * @rec: The record that failed 2069 * 2070 * The arch code that enables or disables the function tracing 2071 * can call ftrace_bug() when it has detected a problem in 2072 * modifying the code. @failed should be one of either: 2073 * EFAULT - if the problem happens on reading the @ip address 2074 * EINVAL - if what is read at @ip is not what was expected 2075 * EPERM - if the problem happens on writing to the @ip address 2076 */ 2077 void ftrace_bug(int failed, struct dyn_ftrace *rec) 2078 { 2079 unsigned long ip = rec ? rec->ip : 0; 2080 2081 pr_info("------------[ ftrace bug ]------------\n"); 2082 2083 switch (failed) { 2084 case -EFAULT: 2085 pr_info("ftrace faulted on modifying "); 2086 print_ip_sym(KERN_INFO, ip); 2087 break; 2088 case -EINVAL: 2089 pr_info("ftrace failed to modify "); 2090 print_ip_sym(KERN_INFO, ip); 2091 print_ip_ins(" actual: ", (unsigned char *)ip); 2092 pr_cont("\n"); 2093 if (ftrace_expected) { 2094 print_ip_ins(" expected: ", ftrace_expected); 2095 pr_cont("\n"); 2096 } 2097 break; 2098 case -EPERM: 2099 pr_info("ftrace faulted on writing "); 2100 print_ip_sym(KERN_INFO, ip); 2101 break; 2102 default: 2103 pr_info("ftrace faulted on unknown error "); 2104 print_ip_sym(KERN_INFO, ip); 2105 } 2106 print_bug_type(); 2107 if (rec) { 2108 struct ftrace_ops *ops = NULL; 2109 2110 pr_info("ftrace record flags: %lx\n", rec->flags); 2111 pr_cont(" (%ld)%s", ftrace_rec_count(rec), 2112 rec->flags & FTRACE_FL_REGS ? " R" : " "); 2113 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2114 ops = ftrace_find_tramp_ops_any(rec); 2115 if (ops) { 2116 do { 2117 pr_cont("\ttramp: %pS (%pS)", 2118 (void *)ops->trampoline, 2119 (void *)ops->func); 2120 ops = ftrace_find_tramp_ops_next(rec, ops); 2121 } while (ops); 2122 } else 2123 pr_cont("\ttramp: ERROR!"); 2124 2125 } 2126 ip = ftrace_get_addr_curr(rec); 2127 pr_cont("\n expected tramp: %lx\n", ip); 2128 } 2129 2130 FTRACE_WARN_ON_ONCE(1); 2131 } 2132 2133 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) 2134 { 2135 unsigned long flag = 0UL; 2136 2137 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2138 2139 if (skip_record(rec)) 2140 return FTRACE_UPDATE_IGNORE; 2141 2142 /* 2143 * If we are updating calls: 2144 * 2145 * If the record has a ref count, then we need to enable it 2146 * because someone is using it. 2147 * 2148 * Otherwise we make sure its disabled. 2149 * 2150 * If we are disabling calls, then disable all records that 2151 * are enabled. 2152 */ 2153 if (enable && ftrace_rec_count(rec)) 2154 flag = FTRACE_FL_ENABLED; 2155 2156 /* 2157 * If enabling and the REGS flag does not match the REGS_EN, or 2158 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore 2159 * this record. Set flags to fail the compare against ENABLED. 2160 * Same for direct calls. 2161 */ 2162 if (flag) { 2163 if (!(rec->flags & FTRACE_FL_REGS) != 2164 !(rec->flags & FTRACE_FL_REGS_EN)) 2165 flag |= FTRACE_FL_REGS; 2166 2167 if (!(rec->flags & FTRACE_FL_TRAMP) != 2168 !(rec->flags & FTRACE_FL_TRAMP_EN)) 2169 flag |= FTRACE_FL_TRAMP; 2170 2171 /* 2172 * Direct calls are special, as count matters. 2173 * We must test the record for direct, if the 2174 * DIRECT and DIRECT_EN do not match, but only 2175 * if the count is 1. That's because, if the 2176 * count is something other than one, we do not 2177 * want the direct enabled (it will be done via the 2178 * direct helper). But if DIRECT_EN is set, and 2179 * the count is not one, we need to clear it. 2180 */ 2181 if (ftrace_rec_count(rec) == 1) { 2182 if (!(rec->flags & FTRACE_FL_DIRECT) != 2183 !(rec->flags & FTRACE_FL_DIRECT_EN)) 2184 flag |= FTRACE_FL_DIRECT; 2185 } else if (rec->flags & FTRACE_FL_DIRECT_EN) { 2186 flag |= FTRACE_FL_DIRECT; 2187 } 2188 } 2189 2190 /* If the state of this record hasn't changed, then do nothing */ 2191 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 2192 return FTRACE_UPDATE_IGNORE; 2193 2194 if (flag) { 2195 /* Save off if rec is being enabled (for return value) */ 2196 flag ^= rec->flags & FTRACE_FL_ENABLED; 2197 2198 if (update) { 2199 rec->flags |= FTRACE_FL_ENABLED; 2200 if (flag & FTRACE_FL_REGS) { 2201 if (rec->flags & FTRACE_FL_REGS) 2202 rec->flags |= FTRACE_FL_REGS_EN; 2203 else 2204 rec->flags &= ~FTRACE_FL_REGS_EN; 2205 } 2206 if (flag & FTRACE_FL_TRAMP) { 2207 if (rec->flags & FTRACE_FL_TRAMP) 2208 rec->flags |= FTRACE_FL_TRAMP_EN; 2209 else 2210 rec->flags &= ~FTRACE_FL_TRAMP_EN; 2211 } 2212 2213 if (flag & FTRACE_FL_DIRECT) { 2214 /* 2215 * If there's only one user (direct_ops helper) 2216 * then we can call the direct function 2217 * directly (no ftrace trampoline). 2218 */ 2219 if (ftrace_rec_count(rec) == 1) { 2220 if (rec->flags & FTRACE_FL_DIRECT) 2221 rec->flags |= FTRACE_FL_DIRECT_EN; 2222 else 2223 rec->flags &= ~FTRACE_FL_DIRECT_EN; 2224 } else { 2225 /* 2226 * Can only call directly if there's 2227 * only one callback to the function. 2228 */ 2229 rec->flags &= ~FTRACE_FL_DIRECT_EN; 2230 } 2231 } 2232 } 2233 2234 /* 2235 * If this record is being updated from a nop, then 2236 * return UPDATE_MAKE_CALL. 2237 * Otherwise, 2238 * return UPDATE_MODIFY_CALL to tell the caller to convert 2239 * from the save regs, to a non-save regs function or 2240 * vice versa, or from a trampoline call. 2241 */ 2242 if (flag & FTRACE_FL_ENABLED) { 2243 ftrace_bug_type = FTRACE_BUG_CALL; 2244 return FTRACE_UPDATE_MAKE_CALL; 2245 } 2246 2247 ftrace_bug_type = FTRACE_BUG_UPDATE; 2248 return FTRACE_UPDATE_MODIFY_CALL; 2249 } 2250 2251 if (update) { 2252 /* If there's no more users, clear all flags */ 2253 if (!ftrace_rec_count(rec)) 2254 rec->flags &= FTRACE_FL_DISABLED; 2255 else 2256 /* 2257 * Just disable the record, but keep the ops TRAMP 2258 * and REGS states. The _EN flags must be disabled though. 2259 */ 2260 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | 2261 FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN); 2262 } 2263 2264 ftrace_bug_type = FTRACE_BUG_NOP; 2265 return FTRACE_UPDATE_MAKE_NOP; 2266 } 2267 2268 /** 2269 * ftrace_update_record - set a record that now is tracing or not 2270 * @rec: the record to update 2271 * @enable: set to true if the record is tracing, false to force disable 2272 * 2273 * The records that represent all functions that can be traced need 2274 * to be updated when tracing has been enabled. 2275 */ 2276 int ftrace_update_record(struct dyn_ftrace *rec, bool enable) 2277 { 2278 return ftrace_check_record(rec, enable, true); 2279 } 2280 2281 /** 2282 * ftrace_test_record - check if the record has been enabled or not 2283 * @rec: the record to test 2284 * @enable: set to true to check if enabled, false if it is disabled 2285 * 2286 * The arch code may need to test if a record is already set to 2287 * tracing to determine how to modify the function code that it 2288 * represents. 2289 */ 2290 int ftrace_test_record(struct dyn_ftrace *rec, bool enable) 2291 { 2292 return ftrace_check_record(rec, enable, false); 2293 } 2294 2295 static struct ftrace_ops * 2296 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) 2297 { 2298 struct ftrace_ops *op; 2299 unsigned long ip = rec->ip; 2300 2301 do_for_each_ftrace_op(op, ftrace_ops_list) { 2302 2303 if (!op->trampoline) 2304 continue; 2305 2306 if (hash_contains_ip(ip, op->func_hash)) 2307 return op; 2308 } while_for_each_ftrace_op(op); 2309 2310 return NULL; 2311 } 2312 2313 static struct ftrace_ops * 2314 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude) 2315 { 2316 struct ftrace_ops *op; 2317 unsigned long ip = rec->ip; 2318 2319 do_for_each_ftrace_op(op, ftrace_ops_list) { 2320 2321 if (op == op_exclude || !op->trampoline) 2322 continue; 2323 2324 if (hash_contains_ip(ip, op->func_hash)) 2325 return op; 2326 } while_for_each_ftrace_op(op); 2327 2328 return NULL; 2329 } 2330 2331 static struct ftrace_ops * 2332 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, 2333 struct ftrace_ops *op) 2334 { 2335 unsigned long ip = rec->ip; 2336 2337 while_for_each_ftrace_op(op) { 2338 2339 if (!op->trampoline) 2340 continue; 2341 2342 if (hash_contains_ip(ip, op->func_hash)) 2343 return op; 2344 } 2345 2346 return NULL; 2347 } 2348 2349 static struct ftrace_ops * 2350 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) 2351 { 2352 struct ftrace_ops *op; 2353 unsigned long ip = rec->ip; 2354 2355 /* 2356 * Need to check removed ops first. 2357 * If they are being removed, and this rec has a tramp, 2358 * and this rec is in the ops list, then it would be the 2359 * one with the tramp. 2360 */ 2361 if (removed_ops) { 2362 if (hash_contains_ip(ip, &removed_ops->old_hash)) 2363 return removed_ops; 2364 } 2365 2366 /* 2367 * Need to find the current trampoline for a rec. 2368 * Now, a trampoline is only attached to a rec if there 2369 * was a single 'ops' attached to it. But this can be called 2370 * when we are adding another op to the rec or removing the 2371 * current one. Thus, if the op is being added, we can 2372 * ignore it because it hasn't attached itself to the rec 2373 * yet. 2374 * 2375 * If an ops is being modified (hooking to different functions) 2376 * then we don't care about the new functions that are being 2377 * added, just the old ones (that are probably being removed). 2378 * 2379 * If we are adding an ops to a function that already is using 2380 * a trampoline, it needs to be removed (trampolines are only 2381 * for single ops connected), then an ops that is not being 2382 * modified also needs to be checked. 2383 */ 2384 do_for_each_ftrace_op(op, ftrace_ops_list) { 2385 2386 if (!op->trampoline) 2387 continue; 2388 2389 /* 2390 * If the ops is being added, it hasn't gotten to 2391 * the point to be removed from this tree yet. 2392 */ 2393 if (op->flags & FTRACE_OPS_FL_ADDING) 2394 continue; 2395 2396 2397 /* 2398 * If the ops is being modified and is in the old 2399 * hash, then it is probably being removed from this 2400 * function. 2401 */ 2402 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && 2403 hash_contains_ip(ip, &op->old_hash)) 2404 return op; 2405 /* 2406 * If the ops is not being added or modified, and it's 2407 * in its normal filter hash, then this must be the one 2408 * we want! 2409 */ 2410 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && 2411 hash_contains_ip(ip, op->func_hash)) 2412 return op; 2413 2414 } while_for_each_ftrace_op(op); 2415 2416 return NULL; 2417 } 2418 2419 static struct ftrace_ops * 2420 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) 2421 { 2422 struct ftrace_ops *op; 2423 unsigned long ip = rec->ip; 2424 2425 do_for_each_ftrace_op(op, ftrace_ops_list) { 2426 /* pass rec in as regs to have non-NULL val */ 2427 if (hash_contains_ip(ip, op->func_hash)) 2428 return op; 2429 } while_for_each_ftrace_op(op); 2430 2431 return NULL; 2432 } 2433 2434 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 2435 /* Protected by rcu_tasks for reading, and direct_mutex for writing */ 2436 static struct ftrace_hash *direct_functions = EMPTY_HASH; 2437 static DEFINE_MUTEX(direct_mutex); 2438 int ftrace_direct_func_count; 2439 2440 /* 2441 * Search the direct_functions hash to see if the given instruction pointer 2442 * has a direct caller attached to it. 2443 */ 2444 unsigned long ftrace_find_rec_direct(unsigned long ip) 2445 { 2446 struct ftrace_func_entry *entry; 2447 2448 entry = __ftrace_lookup_ip(direct_functions, ip); 2449 if (!entry) 2450 return 0; 2451 2452 return entry->direct; 2453 } 2454 2455 static struct ftrace_func_entry* 2456 ftrace_add_rec_direct(unsigned long ip, unsigned long addr, 2457 struct ftrace_hash **free_hash) 2458 { 2459 struct ftrace_func_entry *entry; 2460 2461 if (ftrace_hash_empty(direct_functions) || 2462 direct_functions->count > 2 * (1 << direct_functions->size_bits)) { 2463 struct ftrace_hash *new_hash; 2464 int size = ftrace_hash_empty(direct_functions) ? 0 : 2465 direct_functions->count + 1; 2466 2467 if (size < 32) 2468 size = 32; 2469 2470 new_hash = dup_hash(direct_functions, size); 2471 if (!new_hash) 2472 return NULL; 2473 2474 *free_hash = direct_functions; 2475 direct_functions = new_hash; 2476 } 2477 2478 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 2479 if (!entry) 2480 return NULL; 2481 2482 entry->ip = ip; 2483 entry->direct = addr; 2484 __add_hash_entry(direct_functions, entry); 2485 return entry; 2486 } 2487 2488 static void call_direct_funcs(unsigned long ip, unsigned long pip, 2489 struct ftrace_ops *ops, struct ftrace_regs *fregs) 2490 { 2491 struct pt_regs *regs = ftrace_get_regs(fregs); 2492 unsigned long addr; 2493 2494 addr = ftrace_find_rec_direct(ip); 2495 if (!addr) 2496 return; 2497 2498 arch_ftrace_set_direct_caller(regs, addr); 2499 } 2500 2501 struct ftrace_ops direct_ops = { 2502 .func = call_direct_funcs, 2503 .flags = FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS 2504 | FTRACE_OPS_FL_PERMANENT, 2505 /* 2506 * By declaring the main trampoline as this trampoline 2507 * it will never have one allocated for it. Allocated 2508 * trampolines should not call direct functions. 2509 * The direct_ops should only be called by the builtin 2510 * ftrace_regs_caller trampoline. 2511 */ 2512 .trampoline = FTRACE_REGS_ADDR, 2513 }; 2514 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 2515 2516 /** 2517 * ftrace_get_addr_new - Get the call address to set to 2518 * @rec: The ftrace record descriptor 2519 * 2520 * If the record has the FTRACE_FL_REGS set, that means that it 2521 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS 2522 * is not set, then it wants to convert to the normal callback. 2523 * 2524 * Returns the address of the trampoline to set to 2525 */ 2526 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) 2527 { 2528 struct ftrace_ops *ops; 2529 unsigned long addr; 2530 2531 if ((rec->flags & FTRACE_FL_DIRECT) && 2532 (ftrace_rec_count(rec) == 1)) { 2533 addr = ftrace_find_rec_direct(rec->ip); 2534 if (addr) 2535 return addr; 2536 WARN_ON_ONCE(1); 2537 } 2538 2539 /* Trampolines take precedence over regs */ 2540 if (rec->flags & FTRACE_FL_TRAMP) { 2541 ops = ftrace_find_tramp_ops_new(rec); 2542 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { 2543 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", 2544 (void *)rec->ip, (void *)rec->ip, rec->flags); 2545 /* Ftrace is shutting down, return anything */ 2546 return (unsigned long)FTRACE_ADDR; 2547 } 2548 return ops->trampoline; 2549 } 2550 2551 if (rec->flags & FTRACE_FL_REGS) 2552 return (unsigned long)FTRACE_REGS_ADDR; 2553 else 2554 return (unsigned long)FTRACE_ADDR; 2555 } 2556 2557 /** 2558 * ftrace_get_addr_curr - Get the call address that is already there 2559 * @rec: The ftrace record descriptor 2560 * 2561 * The FTRACE_FL_REGS_EN is set when the record already points to 2562 * a function that saves all the regs. Basically the '_EN' version 2563 * represents the current state of the function. 2564 * 2565 * Returns the address of the trampoline that is currently being called 2566 */ 2567 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) 2568 { 2569 struct ftrace_ops *ops; 2570 unsigned long addr; 2571 2572 /* Direct calls take precedence over trampolines */ 2573 if (rec->flags & FTRACE_FL_DIRECT_EN) { 2574 addr = ftrace_find_rec_direct(rec->ip); 2575 if (addr) 2576 return addr; 2577 WARN_ON_ONCE(1); 2578 } 2579 2580 /* Trampolines take precedence over regs */ 2581 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2582 ops = ftrace_find_tramp_ops_curr(rec); 2583 if (FTRACE_WARN_ON(!ops)) { 2584 pr_warn("Bad trampoline accounting at: %p (%pS)\n", 2585 (void *)rec->ip, (void *)rec->ip); 2586 /* Ftrace is shutting down, return anything */ 2587 return (unsigned long)FTRACE_ADDR; 2588 } 2589 return ops->trampoline; 2590 } 2591 2592 if (rec->flags & FTRACE_FL_REGS_EN) 2593 return (unsigned long)FTRACE_REGS_ADDR; 2594 else 2595 return (unsigned long)FTRACE_ADDR; 2596 } 2597 2598 static int 2599 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable) 2600 { 2601 unsigned long ftrace_old_addr; 2602 unsigned long ftrace_addr; 2603 int ret; 2604 2605 ftrace_addr = ftrace_get_addr_new(rec); 2606 2607 /* This needs to be done before we call ftrace_update_record */ 2608 ftrace_old_addr = ftrace_get_addr_curr(rec); 2609 2610 ret = ftrace_update_record(rec, enable); 2611 2612 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2613 2614 switch (ret) { 2615 case FTRACE_UPDATE_IGNORE: 2616 return 0; 2617 2618 case FTRACE_UPDATE_MAKE_CALL: 2619 ftrace_bug_type = FTRACE_BUG_CALL; 2620 return ftrace_make_call(rec, ftrace_addr); 2621 2622 case FTRACE_UPDATE_MAKE_NOP: 2623 ftrace_bug_type = FTRACE_BUG_NOP; 2624 return ftrace_make_nop(NULL, rec, ftrace_old_addr); 2625 2626 case FTRACE_UPDATE_MODIFY_CALL: 2627 ftrace_bug_type = FTRACE_BUG_UPDATE; 2628 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 2629 } 2630 2631 return -1; /* unknown ftrace bug */ 2632 } 2633 2634 void __weak ftrace_replace_code(int mod_flags) 2635 { 2636 struct dyn_ftrace *rec; 2637 struct ftrace_page *pg; 2638 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; 2639 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; 2640 int failed; 2641 2642 if (unlikely(ftrace_disabled)) 2643 return; 2644 2645 do_for_each_ftrace_rec(pg, rec) { 2646 2647 if (skip_record(rec)) 2648 continue; 2649 2650 failed = __ftrace_replace_code(rec, enable); 2651 if (failed) { 2652 ftrace_bug(failed, rec); 2653 /* Stop processing */ 2654 return; 2655 } 2656 if (schedulable) 2657 cond_resched(); 2658 } while_for_each_ftrace_rec(); 2659 } 2660 2661 struct ftrace_rec_iter { 2662 struct ftrace_page *pg; 2663 int index; 2664 }; 2665 2666 /** 2667 * ftrace_rec_iter_start - start up iterating over traced functions 2668 * 2669 * Returns an iterator handle that is used to iterate over all 2670 * the records that represent address locations where functions 2671 * are traced. 2672 * 2673 * May return NULL if no records are available. 2674 */ 2675 struct ftrace_rec_iter *ftrace_rec_iter_start(void) 2676 { 2677 /* 2678 * We only use a single iterator. 2679 * Protected by the ftrace_lock mutex. 2680 */ 2681 static struct ftrace_rec_iter ftrace_rec_iter; 2682 struct ftrace_rec_iter *iter = &ftrace_rec_iter; 2683 2684 iter->pg = ftrace_pages_start; 2685 iter->index = 0; 2686 2687 /* Could have empty pages */ 2688 while (iter->pg && !iter->pg->index) 2689 iter->pg = iter->pg->next; 2690 2691 if (!iter->pg) 2692 return NULL; 2693 2694 return iter; 2695 } 2696 2697 /** 2698 * ftrace_rec_iter_next - get the next record to process. 2699 * @iter: The handle to the iterator. 2700 * 2701 * Returns the next iterator after the given iterator @iter. 2702 */ 2703 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) 2704 { 2705 iter->index++; 2706 2707 if (iter->index >= iter->pg->index) { 2708 iter->pg = iter->pg->next; 2709 iter->index = 0; 2710 2711 /* Could have empty pages */ 2712 while (iter->pg && !iter->pg->index) 2713 iter->pg = iter->pg->next; 2714 } 2715 2716 if (!iter->pg) 2717 return NULL; 2718 2719 return iter; 2720 } 2721 2722 /** 2723 * ftrace_rec_iter_record - get the record at the iterator location 2724 * @iter: The current iterator location 2725 * 2726 * Returns the record that the current @iter is at. 2727 */ 2728 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) 2729 { 2730 return &iter->pg->records[iter->index]; 2731 } 2732 2733 static int 2734 ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec) 2735 { 2736 int ret; 2737 2738 if (unlikely(ftrace_disabled)) 2739 return 0; 2740 2741 ret = ftrace_init_nop(mod, rec); 2742 if (ret) { 2743 ftrace_bug_type = FTRACE_BUG_INIT; 2744 ftrace_bug(ret, rec); 2745 return 0; 2746 } 2747 return 1; 2748 } 2749 2750 /* 2751 * archs can override this function if they must do something 2752 * before the modifying code is performed. 2753 */ 2754 void __weak ftrace_arch_code_modify_prepare(void) 2755 { 2756 } 2757 2758 /* 2759 * archs can override this function if they must do something 2760 * after the modifying code is performed. 2761 */ 2762 void __weak ftrace_arch_code_modify_post_process(void) 2763 { 2764 } 2765 2766 static int update_ftrace_func(ftrace_func_t func) 2767 { 2768 static ftrace_func_t save_func; 2769 2770 /* Avoid updating if it hasn't changed */ 2771 if (func == save_func) 2772 return 0; 2773 2774 save_func = func; 2775 2776 return ftrace_update_ftrace_func(func); 2777 } 2778 2779 void ftrace_modify_all_code(int command) 2780 { 2781 int update = command & FTRACE_UPDATE_TRACE_FUNC; 2782 int mod_flags = 0; 2783 int err = 0; 2784 2785 if (command & FTRACE_MAY_SLEEP) 2786 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; 2787 2788 /* 2789 * If the ftrace_caller calls a ftrace_ops func directly, 2790 * we need to make sure that it only traces functions it 2791 * expects to trace. When doing the switch of functions, 2792 * we need to update to the ftrace_ops_list_func first 2793 * before the transition between old and new calls are set, 2794 * as the ftrace_ops_list_func will check the ops hashes 2795 * to make sure the ops are having the right functions 2796 * traced. 2797 */ 2798 if (update) { 2799 err = update_ftrace_func(ftrace_ops_list_func); 2800 if (FTRACE_WARN_ON(err)) 2801 return; 2802 } 2803 2804 if (command & FTRACE_UPDATE_CALLS) 2805 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL); 2806 else if (command & FTRACE_DISABLE_CALLS) 2807 ftrace_replace_code(mod_flags); 2808 2809 if (update && ftrace_trace_function != ftrace_ops_list_func) { 2810 function_trace_op = set_function_trace_op; 2811 smp_wmb(); 2812 /* If irqs are disabled, we are in stop machine */ 2813 if (!irqs_disabled()) 2814 smp_call_function(ftrace_sync_ipi, NULL, 1); 2815 err = update_ftrace_func(ftrace_trace_function); 2816 if (FTRACE_WARN_ON(err)) 2817 return; 2818 } 2819 2820 if (command & FTRACE_START_FUNC_RET) 2821 err = ftrace_enable_ftrace_graph_caller(); 2822 else if (command & FTRACE_STOP_FUNC_RET) 2823 err = ftrace_disable_ftrace_graph_caller(); 2824 FTRACE_WARN_ON(err); 2825 } 2826 2827 static int __ftrace_modify_code(void *data) 2828 { 2829 int *command = data; 2830 2831 ftrace_modify_all_code(*command); 2832 2833 return 0; 2834 } 2835 2836 /** 2837 * ftrace_run_stop_machine - go back to the stop machine method 2838 * @command: The command to tell ftrace what to do 2839 * 2840 * If an arch needs to fall back to the stop machine method, the 2841 * it can call this function. 2842 */ 2843 void ftrace_run_stop_machine(int command) 2844 { 2845 stop_machine(__ftrace_modify_code, &command, NULL); 2846 } 2847 2848 /** 2849 * arch_ftrace_update_code - modify the code to trace or not trace 2850 * @command: The command that needs to be done 2851 * 2852 * Archs can override this function if it does not need to 2853 * run stop_machine() to modify code. 2854 */ 2855 void __weak arch_ftrace_update_code(int command) 2856 { 2857 ftrace_run_stop_machine(command); 2858 } 2859 2860 static void ftrace_run_update_code(int command) 2861 { 2862 ftrace_arch_code_modify_prepare(); 2863 2864 /* 2865 * By default we use stop_machine() to modify the code. 2866 * But archs can do what ever they want as long as it 2867 * is safe. The stop_machine() is the safest, but also 2868 * produces the most overhead. 2869 */ 2870 arch_ftrace_update_code(command); 2871 2872 ftrace_arch_code_modify_post_process(); 2873 } 2874 2875 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, 2876 struct ftrace_ops_hash *old_hash) 2877 { 2878 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2879 ops->old_hash.filter_hash = old_hash->filter_hash; 2880 ops->old_hash.notrace_hash = old_hash->notrace_hash; 2881 ftrace_run_update_code(command); 2882 ops->old_hash.filter_hash = NULL; 2883 ops->old_hash.notrace_hash = NULL; 2884 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2885 } 2886 2887 static ftrace_func_t saved_ftrace_func; 2888 static int ftrace_start_up; 2889 2890 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) 2891 { 2892 } 2893 2894 /* List of trace_ops that have allocated trampolines */ 2895 static LIST_HEAD(ftrace_ops_trampoline_list); 2896 2897 static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops) 2898 { 2899 lockdep_assert_held(&ftrace_lock); 2900 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list); 2901 } 2902 2903 static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) 2904 { 2905 lockdep_assert_held(&ftrace_lock); 2906 list_del_rcu(&ops->list); 2907 synchronize_rcu(); 2908 } 2909 2910 /* 2911 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols 2912 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is 2913 * not a module. 2914 */ 2915 #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace" 2916 #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline" 2917 2918 static void ftrace_trampoline_free(struct ftrace_ops *ops) 2919 { 2920 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && 2921 ops->trampoline) { 2922 /* 2923 * Record the text poke event before the ksymbol unregister 2924 * event. 2925 */ 2926 perf_event_text_poke((void *)ops->trampoline, 2927 (void *)ops->trampoline, 2928 ops->trampoline_size, NULL, 0); 2929 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, 2930 ops->trampoline, ops->trampoline_size, 2931 true, FTRACE_TRAMPOLINE_SYM); 2932 /* Remove from kallsyms after the perf events */ 2933 ftrace_remove_trampoline_from_kallsyms(ops); 2934 } 2935 2936 arch_ftrace_trampoline_free(ops); 2937 } 2938 2939 static void ftrace_startup_enable(int command) 2940 { 2941 if (saved_ftrace_func != ftrace_trace_function) { 2942 saved_ftrace_func = ftrace_trace_function; 2943 command |= FTRACE_UPDATE_TRACE_FUNC; 2944 } 2945 2946 if (!command || !ftrace_enabled) 2947 return; 2948 2949 ftrace_run_update_code(command); 2950 } 2951 2952 static void ftrace_startup_all(int command) 2953 { 2954 update_all_ops = true; 2955 ftrace_startup_enable(command); 2956 update_all_ops = false; 2957 } 2958 2959 int ftrace_startup(struct ftrace_ops *ops, int command) 2960 { 2961 int ret; 2962 2963 if (unlikely(ftrace_disabled)) 2964 return -ENODEV; 2965 2966 ret = __register_ftrace_function(ops); 2967 if (ret) 2968 return ret; 2969 2970 ftrace_start_up++; 2971 2972 /* 2973 * Note that ftrace probes uses this to start up 2974 * and modify functions it will probe. But we still 2975 * set the ADDING flag for modification, as probes 2976 * do not have trampolines. If they add them in the 2977 * future, then the probes will need to distinguish 2978 * between adding and updating probes. 2979 */ 2980 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; 2981 2982 ret = ftrace_hash_ipmodify_enable(ops); 2983 if (ret < 0) { 2984 /* Rollback registration process */ 2985 __unregister_ftrace_function(ops); 2986 ftrace_start_up--; 2987 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2988 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 2989 ftrace_trampoline_free(ops); 2990 return ret; 2991 } 2992 2993 if (ftrace_hash_rec_enable(ops, 1)) 2994 command |= FTRACE_UPDATE_CALLS; 2995 2996 ftrace_startup_enable(command); 2997 2998 /* 2999 * If ftrace is in an undefined state, we just remove ops from list 3000 * to prevent the NULL pointer, instead of totally rolling it back and 3001 * free trampoline, because those actions could cause further damage. 3002 */ 3003 if (unlikely(ftrace_disabled)) { 3004 __unregister_ftrace_function(ops); 3005 return -ENODEV; 3006 } 3007 3008 ops->flags &= ~FTRACE_OPS_FL_ADDING; 3009 3010 return 0; 3011 } 3012 3013 int ftrace_shutdown(struct ftrace_ops *ops, int command) 3014 { 3015 int ret; 3016 3017 if (unlikely(ftrace_disabled)) 3018 return -ENODEV; 3019 3020 ret = __unregister_ftrace_function(ops); 3021 if (ret) 3022 return ret; 3023 3024 ftrace_start_up--; 3025 /* 3026 * Just warn in case of unbalance, no need to kill ftrace, it's not 3027 * critical but the ftrace_call callers may be never nopped again after 3028 * further ftrace uses. 3029 */ 3030 WARN_ON_ONCE(ftrace_start_up < 0); 3031 3032 /* Disabling ipmodify never fails */ 3033 ftrace_hash_ipmodify_disable(ops); 3034 3035 if (ftrace_hash_rec_disable(ops, 1)) 3036 command |= FTRACE_UPDATE_CALLS; 3037 3038 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 3039 3040 if (saved_ftrace_func != ftrace_trace_function) { 3041 saved_ftrace_func = ftrace_trace_function; 3042 command |= FTRACE_UPDATE_TRACE_FUNC; 3043 } 3044 3045 if (!command || !ftrace_enabled) 3046 goto out; 3047 3048 /* 3049 * If the ops uses a trampoline, then it needs to be 3050 * tested first on update. 3051 */ 3052 ops->flags |= FTRACE_OPS_FL_REMOVING; 3053 removed_ops = ops; 3054 3055 /* The trampoline logic checks the old hashes */ 3056 ops->old_hash.filter_hash = ops->func_hash->filter_hash; 3057 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; 3058 3059 ftrace_run_update_code(command); 3060 3061 /* 3062 * If there's no more ops registered with ftrace, run a 3063 * sanity check to make sure all rec flags are cleared. 3064 */ 3065 if (rcu_dereference_protected(ftrace_ops_list, 3066 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 3067 struct ftrace_page *pg; 3068 struct dyn_ftrace *rec; 3069 3070 do_for_each_ftrace_rec(pg, rec) { 3071 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) 3072 pr_warn(" %pS flags:%lx\n", 3073 (void *)rec->ip, rec->flags); 3074 } while_for_each_ftrace_rec(); 3075 } 3076 3077 ops->old_hash.filter_hash = NULL; 3078 ops->old_hash.notrace_hash = NULL; 3079 3080 removed_ops = NULL; 3081 ops->flags &= ~FTRACE_OPS_FL_REMOVING; 3082 3083 out: 3084 /* 3085 * Dynamic ops may be freed, we must make sure that all 3086 * callers are done before leaving this function. 3087 */ 3088 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { 3089 /* 3090 * We need to do a hard force of sched synchronization. 3091 * This is because we use preempt_disable() to do RCU, but 3092 * the function tracers can be called where RCU is not watching 3093 * (like before user_exit()). We can not rely on the RCU 3094 * infrastructure to do the synchronization, thus we must do it 3095 * ourselves. 3096 */ 3097 synchronize_rcu_tasks_rude(); 3098 3099 /* 3100 * When the kernel is preemptive, tasks can be preempted 3101 * while on a ftrace trampoline. Just scheduling a task on 3102 * a CPU is not good enough to flush them. Calling 3103 * synchronize_rcu_tasks() will wait for those tasks to 3104 * execute and either schedule voluntarily or enter user space. 3105 */ 3106 if (IS_ENABLED(CONFIG_PREEMPTION)) 3107 synchronize_rcu_tasks(); 3108 3109 ftrace_trampoline_free(ops); 3110 } 3111 3112 return 0; 3113 } 3114 3115 static u64 ftrace_update_time; 3116 unsigned long ftrace_update_tot_cnt; 3117 unsigned long ftrace_number_of_pages; 3118 unsigned long ftrace_number_of_groups; 3119 3120 static inline int ops_traces_mod(struct ftrace_ops *ops) 3121 { 3122 /* 3123 * Filter_hash being empty will default to trace module. 3124 * But notrace hash requires a test of individual module functions. 3125 */ 3126 return ftrace_hash_empty(ops->func_hash->filter_hash) && 3127 ftrace_hash_empty(ops->func_hash->notrace_hash); 3128 } 3129 3130 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) 3131 { 3132 bool init_nop = ftrace_need_init_nop(); 3133 struct ftrace_page *pg; 3134 struct dyn_ftrace *p; 3135 u64 start, stop; 3136 unsigned long update_cnt = 0; 3137 unsigned long rec_flags = 0; 3138 int i; 3139 3140 start = ftrace_now(raw_smp_processor_id()); 3141 3142 /* 3143 * When a module is loaded, this function is called to convert 3144 * the calls to mcount in its text to nops, and also to create 3145 * an entry in the ftrace data. Now, if ftrace is activated 3146 * after this call, but before the module sets its text to 3147 * read-only, the modification of enabling ftrace can fail if 3148 * the read-only is done while ftrace is converting the calls. 3149 * To prevent this, the module's records are set as disabled 3150 * and will be enabled after the call to set the module's text 3151 * to read-only. 3152 */ 3153 if (mod) 3154 rec_flags |= FTRACE_FL_DISABLED; 3155 3156 for (pg = new_pgs; pg; pg = pg->next) { 3157 3158 for (i = 0; i < pg->index; i++) { 3159 3160 /* If something went wrong, bail without enabling anything */ 3161 if (unlikely(ftrace_disabled)) 3162 return -1; 3163 3164 p = &pg->records[i]; 3165 p->flags = rec_flags; 3166 3167 /* 3168 * Do the initial record conversion from mcount jump 3169 * to the NOP instructions. 3170 */ 3171 if (init_nop && !ftrace_nop_initialize(mod, p)) 3172 break; 3173 3174 update_cnt++; 3175 } 3176 } 3177 3178 stop = ftrace_now(raw_smp_processor_id()); 3179 ftrace_update_time = stop - start; 3180 ftrace_update_tot_cnt += update_cnt; 3181 3182 return 0; 3183 } 3184 3185 static int ftrace_allocate_records(struct ftrace_page *pg, int count) 3186 { 3187 int order; 3188 int pages; 3189 int cnt; 3190 3191 if (WARN_ON(!count)) 3192 return -EINVAL; 3193 3194 /* We want to fill as much as possible, with no empty pages */ 3195 pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); 3196 order = fls(pages) - 1; 3197 3198 again: 3199 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 3200 3201 if (!pg->records) { 3202 /* if we can't allocate this size, try something smaller */ 3203 if (!order) 3204 return -ENOMEM; 3205 order--; 3206 goto again; 3207 } 3208 3209 ftrace_number_of_pages += 1 << order; 3210 ftrace_number_of_groups++; 3211 3212 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; 3213 pg->order = order; 3214 3215 if (cnt > count) 3216 cnt = count; 3217 3218 return cnt; 3219 } 3220 3221 static struct ftrace_page * 3222 ftrace_allocate_pages(unsigned long num_to_init) 3223 { 3224 struct ftrace_page *start_pg; 3225 struct ftrace_page *pg; 3226 int cnt; 3227 3228 if (!num_to_init) 3229 return NULL; 3230 3231 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); 3232 if (!pg) 3233 return NULL; 3234 3235 /* 3236 * Try to allocate as much as possible in one continues 3237 * location that fills in all of the space. We want to 3238 * waste as little space as possible. 3239 */ 3240 for (;;) { 3241 cnt = ftrace_allocate_records(pg, num_to_init); 3242 if (cnt < 0) 3243 goto free_pages; 3244 3245 num_to_init -= cnt; 3246 if (!num_to_init) 3247 break; 3248 3249 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); 3250 if (!pg->next) 3251 goto free_pages; 3252 3253 pg = pg->next; 3254 } 3255 3256 return start_pg; 3257 3258 free_pages: 3259 pg = start_pg; 3260 while (pg) { 3261 if (pg->records) { 3262 free_pages((unsigned long)pg->records, pg->order); 3263 ftrace_number_of_pages -= 1 << pg->order; 3264 } 3265 start_pg = pg->next; 3266 kfree(pg); 3267 pg = start_pg; 3268 ftrace_number_of_groups--; 3269 } 3270 pr_info("ftrace: FAILED to allocate memory for functions\n"); 3271 return NULL; 3272 } 3273 3274 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 3275 3276 struct ftrace_iterator { 3277 loff_t pos; 3278 loff_t func_pos; 3279 loff_t mod_pos; 3280 struct ftrace_page *pg; 3281 struct dyn_ftrace *func; 3282 struct ftrace_func_probe *probe; 3283 struct ftrace_func_entry *probe_entry; 3284 struct trace_parser parser; 3285 struct ftrace_hash *hash; 3286 struct ftrace_ops *ops; 3287 struct trace_array *tr; 3288 struct list_head *mod_list; 3289 int pidx; 3290 int idx; 3291 unsigned flags; 3292 }; 3293 3294 static void * 3295 t_probe_next(struct seq_file *m, loff_t *pos) 3296 { 3297 struct ftrace_iterator *iter = m->private; 3298 struct trace_array *tr = iter->ops->private; 3299 struct list_head *func_probes; 3300 struct ftrace_hash *hash; 3301 struct list_head *next; 3302 struct hlist_node *hnd = NULL; 3303 struct hlist_head *hhd; 3304 int size; 3305 3306 (*pos)++; 3307 iter->pos = *pos; 3308 3309 if (!tr) 3310 return NULL; 3311 3312 func_probes = &tr->func_probes; 3313 if (list_empty(func_probes)) 3314 return NULL; 3315 3316 if (!iter->probe) { 3317 next = func_probes->next; 3318 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3319 } 3320 3321 if (iter->probe_entry) 3322 hnd = &iter->probe_entry->hlist; 3323 3324 hash = iter->probe->ops.func_hash->filter_hash; 3325 3326 /* 3327 * A probe being registered may temporarily have an empty hash 3328 * and it's at the end of the func_probes list. 3329 */ 3330 if (!hash || hash == EMPTY_HASH) 3331 return NULL; 3332 3333 size = 1 << hash->size_bits; 3334 3335 retry: 3336 if (iter->pidx >= size) { 3337 if (iter->probe->list.next == func_probes) 3338 return NULL; 3339 next = iter->probe->list.next; 3340 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3341 hash = iter->probe->ops.func_hash->filter_hash; 3342 size = 1 << hash->size_bits; 3343 iter->pidx = 0; 3344 } 3345 3346 hhd = &hash->buckets[iter->pidx]; 3347 3348 if (hlist_empty(hhd)) { 3349 iter->pidx++; 3350 hnd = NULL; 3351 goto retry; 3352 } 3353 3354 if (!hnd) 3355 hnd = hhd->first; 3356 else { 3357 hnd = hnd->next; 3358 if (!hnd) { 3359 iter->pidx++; 3360 goto retry; 3361 } 3362 } 3363 3364 if (WARN_ON_ONCE(!hnd)) 3365 return NULL; 3366 3367 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); 3368 3369 return iter; 3370 } 3371 3372 static void *t_probe_start(struct seq_file *m, loff_t *pos) 3373 { 3374 struct ftrace_iterator *iter = m->private; 3375 void *p = NULL; 3376 loff_t l; 3377 3378 if (!(iter->flags & FTRACE_ITER_DO_PROBES)) 3379 return NULL; 3380 3381 if (iter->mod_pos > *pos) 3382 return NULL; 3383 3384 iter->probe = NULL; 3385 iter->probe_entry = NULL; 3386 iter->pidx = 0; 3387 for (l = 0; l <= (*pos - iter->mod_pos); ) { 3388 p = t_probe_next(m, &l); 3389 if (!p) 3390 break; 3391 } 3392 if (!p) 3393 return NULL; 3394 3395 /* Only set this if we have an item */ 3396 iter->flags |= FTRACE_ITER_PROBE; 3397 3398 return iter; 3399 } 3400 3401 static int 3402 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) 3403 { 3404 struct ftrace_func_entry *probe_entry; 3405 struct ftrace_probe_ops *probe_ops; 3406 struct ftrace_func_probe *probe; 3407 3408 probe = iter->probe; 3409 probe_entry = iter->probe_entry; 3410 3411 if (WARN_ON_ONCE(!probe || !probe_entry)) 3412 return -EIO; 3413 3414 probe_ops = probe->probe_ops; 3415 3416 if (probe_ops->print) 3417 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); 3418 3419 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, 3420 (void *)probe_ops->func); 3421 3422 return 0; 3423 } 3424 3425 static void * 3426 t_mod_next(struct seq_file *m, loff_t *pos) 3427 { 3428 struct ftrace_iterator *iter = m->private; 3429 struct trace_array *tr = iter->tr; 3430 3431 (*pos)++; 3432 iter->pos = *pos; 3433 3434 iter->mod_list = iter->mod_list->next; 3435 3436 if (iter->mod_list == &tr->mod_trace || 3437 iter->mod_list == &tr->mod_notrace) { 3438 iter->flags &= ~FTRACE_ITER_MOD; 3439 return NULL; 3440 } 3441 3442 iter->mod_pos = *pos; 3443 3444 return iter; 3445 } 3446 3447 static void *t_mod_start(struct seq_file *m, loff_t *pos) 3448 { 3449 struct ftrace_iterator *iter = m->private; 3450 void *p = NULL; 3451 loff_t l; 3452 3453 if (iter->func_pos > *pos) 3454 return NULL; 3455 3456 iter->mod_pos = iter->func_pos; 3457 3458 /* probes are only available if tr is set */ 3459 if (!iter->tr) 3460 return NULL; 3461 3462 for (l = 0; l <= (*pos - iter->func_pos); ) { 3463 p = t_mod_next(m, &l); 3464 if (!p) 3465 break; 3466 } 3467 if (!p) { 3468 iter->flags &= ~FTRACE_ITER_MOD; 3469 return t_probe_start(m, pos); 3470 } 3471 3472 /* Only set this if we have an item */ 3473 iter->flags |= FTRACE_ITER_MOD; 3474 3475 return iter; 3476 } 3477 3478 static int 3479 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) 3480 { 3481 struct ftrace_mod_load *ftrace_mod; 3482 struct trace_array *tr = iter->tr; 3483 3484 if (WARN_ON_ONCE(!iter->mod_list) || 3485 iter->mod_list == &tr->mod_trace || 3486 iter->mod_list == &tr->mod_notrace) 3487 return -EIO; 3488 3489 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); 3490 3491 if (ftrace_mod->func) 3492 seq_printf(m, "%s", ftrace_mod->func); 3493 else 3494 seq_putc(m, '*'); 3495 3496 seq_printf(m, ":mod:%s\n", ftrace_mod->module); 3497 3498 return 0; 3499 } 3500 3501 static void * 3502 t_func_next(struct seq_file *m, loff_t *pos) 3503 { 3504 struct ftrace_iterator *iter = m->private; 3505 struct dyn_ftrace *rec = NULL; 3506 3507 (*pos)++; 3508 3509 retry: 3510 if (iter->idx >= iter->pg->index) { 3511 if (iter->pg->next) { 3512 iter->pg = iter->pg->next; 3513 iter->idx = 0; 3514 goto retry; 3515 } 3516 } else { 3517 rec = &iter->pg->records[iter->idx++]; 3518 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3519 !ftrace_lookup_ip(iter->hash, rec->ip)) || 3520 3521 ((iter->flags & FTRACE_ITER_ENABLED) && 3522 !(rec->flags & FTRACE_FL_ENABLED))) { 3523 3524 rec = NULL; 3525 goto retry; 3526 } 3527 } 3528 3529 if (!rec) 3530 return NULL; 3531 3532 iter->pos = iter->func_pos = *pos; 3533 iter->func = rec; 3534 3535 return iter; 3536 } 3537 3538 static void * 3539 t_next(struct seq_file *m, void *v, loff_t *pos) 3540 { 3541 struct ftrace_iterator *iter = m->private; 3542 loff_t l = *pos; /* t_probe_start() must use original pos */ 3543 void *ret; 3544 3545 if (unlikely(ftrace_disabled)) 3546 return NULL; 3547 3548 if (iter->flags & FTRACE_ITER_PROBE) 3549 return t_probe_next(m, pos); 3550 3551 if (iter->flags & FTRACE_ITER_MOD) 3552 return t_mod_next(m, pos); 3553 3554 if (iter->flags & FTRACE_ITER_PRINTALL) { 3555 /* next must increment pos, and t_probe_start does not */ 3556 (*pos)++; 3557 return t_mod_start(m, &l); 3558 } 3559 3560 ret = t_func_next(m, pos); 3561 3562 if (!ret) 3563 return t_mod_start(m, &l); 3564 3565 return ret; 3566 } 3567 3568 static void reset_iter_read(struct ftrace_iterator *iter) 3569 { 3570 iter->pos = 0; 3571 iter->func_pos = 0; 3572 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); 3573 } 3574 3575 static void *t_start(struct seq_file *m, loff_t *pos) 3576 { 3577 struct ftrace_iterator *iter = m->private; 3578 void *p = NULL; 3579 loff_t l; 3580 3581 mutex_lock(&ftrace_lock); 3582 3583 if (unlikely(ftrace_disabled)) 3584 return NULL; 3585 3586 /* 3587 * If an lseek was done, then reset and start from beginning. 3588 */ 3589 if (*pos < iter->pos) 3590 reset_iter_read(iter); 3591 3592 /* 3593 * For set_ftrace_filter reading, if we have the filter 3594 * off, we can short cut and just print out that all 3595 * functions are enabled. 3596 */ 3597 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3598 ftrace_hash_empty(iter->hash)) { 3599 iter->func_pos = 1; /* Account for the message */ 3600 if (*pos > 0) 3601 return t_mod_start(m, pos); 3602 iter->flags |= FTRACE_ITER_PRINTALL; 3603 /* reset in case of seek/pread */ 3604 iter->flags &= ~FTRACE_ITER_PROBE; 3605 return iter; 3606 } 3607 3608 if (iter->flags & FTRACE_ITER_MOD) 3609 return t_mod_start(m, pos); 3610 3611 /* 3612 * Unfortunately, we need to restart at ftrace_pages_start 3613 * every time we let go of the ftrace_mutex. This is because 3614 * those pointers can change without the lock. 3615 */ 3616 iter->pg = ftrace_pages_start; 3617 iter->idx = 0; 3618 for (l = 0; l <= *pos; ) { 3619 p = t_func_next(m, &l); 3620 if (!p) 3621 break; 3622 } 3623 3624 if (!p) 3625 return t_mod_start(m, pos); 3626 3627 return iter; 3628 } 3629 3630 static void t_stop(struct seq_file *m, void *p) 3631 { 3632 mutex_unlock(&ftrace_lock); 3633 } 3634 3635 void * __weak 3636 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 3637 { 3638 return NULL; 3639 } 3640 3641 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, 3642 struct dyn_ftrace *rec) 3643 { 3644 void *ptr; 3645 3646 ptr = arch_ftrace_trampoline_func(ops, rec); 3647 if (ptr) 3648 seq_printf(m, " ->%pS", ptr); 3649 } 3650 3651 #ifdef FTRACE_MCOUNT_MAX_OFFSET 3652 /* 3653 * Weak functions can still have an mcount/fentry that is saved in 3654 * the __mcount_loc section. These can be detected by having a 3655 * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the 3656 * symbol found by kallsyms is not the function that the mcount/fentry 3657 * is part of. The offset is much greater in these cases. 3658 * 3659 * Test the record to make sure that the ip points to a valid kallsyms 3660 * and if not, mark it disabled. 3661 */ 3662 static int test_for_valid_rec(struct dyn_ftrace *rec) 3663 { 3664 char str[KSYM_SYMBOL_LEN]; 3665 unsigned long offset; 3666 const char *ret; 3667 3668 ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str); 3669 3670 /* Weak functions can cause invalid addresses */ 3671 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { 3672 rec->flags |= FTRACE_FL_DISABLED; 3673 return 0; 3674 } 3675 return 1; 3676 } 3677 3678 static struct workqueue_struct *ftrace_check_wq __initdata; 3679 static struct work_struct ftrace_check_work __initdata; 3680 3681 /* 3682 * Scan all the mcount/fentry entries to make sure they are valid. 3683 */ 3684 static __init void ftrace_check_work_func(struct work_struct *work) 3685 { 3686 struct ftrace_page *pg; 3687 struct dyn_ftrace *rec; 3688 3689 mutex_lock(&ftrace_lock); 3690 do_for_each_ftrace_rec(pg, rec) { 3691 test_for_valid_rec(rec); 3692 } while_for_each_ftrace_rec(); 3693 mutex_unlock(&ftrace_lock); 3694 } 3695 3696 static int __init ftrace_check_for_weak_functions(void) 3697 { 3698 INIT_WORK(&ftrace_check_work, ftrace_check_work_func); 3699 3700 ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0); 3701 3702 queue_work(ftrace_check_wq, &ftrace_check_work); 3703 return 0; 3704 } 3705 3706 static int __init ftrace_check_sync(void) 3707 { 3708 /* Make sure the ftrace_check updates are finished */ 3709 if (ftrace_check_wq) 3710 destroy_workqueue(ftrace_check_wq); 3711 return 0; 3712 } 3713 3714 late_initcall_sync(ftrace_check_sync); 3715 subsys_initcall(ftrace_check_for_weak_functions); 3716 3717 static int print_rec(struct seq_file *m, unsigned long ip) 3718 { 3719 unsigned long offset; 3720 char str[KSYM_SYMBOL_LEN]; 3721 char *modname; 3722 const char *ret; 3723 3724 ret = kallsyms_lookup(ip, NULL, &offset, &modname, str); 3725 /* Weak functions can cause invalid addresses */ 3726 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { 3727 snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld", 3728 FTRACE_INVALID_FUNCTION, offset); 3729 ret = NULL; 3730 } 3731 3732 seq_puts(m, str); 3733 if (modname) 3734 seq_printf(m, " [%s]", modname); 3735 return ret == NULL ? -1 : 0; 3736 } 3737 #else 3738 static inline int test_for_valid_rec(struct dyn_ftrace *rec) 3739 { 3740 return 1; 3741 } 3742 3743 static inline int print_rec(struct seq_file *m, unsigned long ip) 3744 { 3745 seq_printf(m, "%ps", (void *)ip); 3746 return 0; 3747 } 3748 #endif 3749 3750 static int t_show(struct seq_file *m, void *v) 3751 { 3752 struct ftrace_iterator *iter = m->private; 3753 struct dyn_ftrace *rec; 3754 3755 if (iter->flags & FTRACE_ITER_PROBE) 3756 return t_probe_show(m, iter); 3757 3758 if (iter->flags & FTRACE_ITER_MOD) 3759 return t_mod_show(m, iter); 3760 3761 if (iter->flags & FTRACE_ITER_PRINTALL) { 3762 if (iter->flags & FTRACE_ITER_NOTRACE) 3763 seq_puts(m, "#### no functions disabled ####\n"); 3764 else 3765 seq_puts(m, "#### all functions enabled ####\n"); 3766 return 0; 3767 } 3768 3769 rec = iter->func; 3770 3771 if (!rec) 3772 return 0; 3773 3774 if (print_rec(m, rec->ip)) { 3775 /* This should only happen when a rec is disabled */ 3776 WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED)); 3777 seq_putc(m, '\n'); 3778 return 0; 3779 } 3780 3781 if (iter->flags & FTRACE_ITER_ENABLED) { 3782 struct ftrace_ops *ops; 3783 3784 seq_printf(m, " (%ld)%s%s%s", 3785 ftrace_rec_count(rec), 3786 rec->flags & FTRACE_FL_REGS ? " R" : " ", 3787 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", 3788 rec->flags & FTRACE_FL_DIRECT ? " D" : " "); 3789 if (rec->flags & FTRACE_FL_TRAMP_EN) { 3790 ops = ftrace_find_tramp_ops_any(rec); 3791 if (ops) { 3792 do { 3793 seq_printf(m, "\ttramp: %pS (%pS)", 3794 (void *)ops->trampoline, 3795 (void *)ops->func); 3796 add_trampoline_func(m, ops, rec); 3797 ops = ftrace_find_tramp_ops_next(rec, ops); 3798 } while (ops); 3799 } else 3800 seq_puts(m, "\ttramp: ERROR!"); 3801 } else { 3802 add_trampoline_func(m, NULL, rec); 3803 } 3804 if (rec->flags & FTRACE_FL_DIRECT) { 3805 unsigned long direct; 3806 3807 direct = ftrace_find_rec_direct(rec->ip); 3808 if (direct) 3809 seq_printf(m, "\n\tdirect-->%pS", (void *)direct); 3810 } 3811 } 3812 3813 seq_putc(m, '\n'); 3814 3815 return 0; 3816 } 3817 3818 static const struct seq_operations show_ftrace_seq_ops = { 3819 .start = t_start, 3820 .next = t_next, 3821 .stop = t_stop, 3822 .show = t_show, 3823 }; 3824 3825 static int 3826 ftrace_avail_open(struct inode *inode, struct file *file) 3827 { 3828 struct ftrace_iterator *iter; 3829 int ret; 3830 3831 ret = security_locked_down(LOCKDOWN_TRACEFS); 3832 if (ret) 3833 return ret; 3834 3835 if (unlikely(ftrace_disabled)) 3836 return -ENODEV; 3837 3838 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3839 if (!iter) 3840 return -ENOMEM; 3841 3842 iter->pg = ftrace_pages_start; 3843 iter->ops = &global_ops; 3844 3845 return 0; 3846 } 3847 3848 static int 3849 ftrace_enabled_open(struct inode *inode, struct file *file) 3850 { 3851 struct ftrace_iterator *iter; 3852 3853 /* 3854 * This shows us what functions are currently being 3855 * traced and by what. Not sure if we want lockdown 3856 * to hide such critical information for an admin. 3857 * Although, perhaps it can show information we don't 3858 * want people to see, but if something is tracing 3859 * something, we probably want to know about it. 3860 */ 3861 3862 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3863 if (!iter) 3864 return -ENOMEM; 3865 3866 iter->pg = ftrace_pages_start; 3867 iter->flags = FTRACE_ITER_ENABLED; 3868 iter->ops = &global_ops; 3869 3870 return 0; 3871 } 3872 3873 /** 3874 * ftrace_regex_open - initialize function tracer filter files 3875 * @ops: The ftrace_ops that hold the hash filters 3876 * @flag: The type of filter to process 3877 * @inode: The inode, usually passed in to your open routine 3878 * @file: The file, usually passed in to your open routine 3879 * 3880 * ftrace_regex_open() initializes the filter files for the 3881 * @ops. Depending on @flag it may process the filter hash or 3882 * the notrace hash of @ops. With this called from the open 3883 * routine, you can use ftrace_filter_write() for the write 3884 * routine if @flag has FTRACE_ITER_FILTER set, or 3885 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 3886 * tracing_lseek() should be used as the lseek routine, and 3887 * release must call ftrace_regex_release(). 3888 */ 3889 int 3890 ftrace_regex_open(struct ftrace_ops *ops, int flag, 3891 struct inode *inode, struct file *file) 3892 { 3893 struct ftrace_iterator *iter; 3894 struct ftrace_hash *hash; 3895 struct list_head *mod_head; 3896 struct trace_array *tr = ops->private; 3897 int ret = -ENOMEM; 3898 3899 ftrace_ops_init(ops); 3900 3901 if (unlikely(ftrace_disabled)) 3902 return -ENODEV; 3903 3904 if (tracing_check_open_get_tr(tr)) 3905 return -ENODEV; 3906 3907 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3908 if (!iter) 3909 goto out; 3910 3911 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) 3912 goto out; 3913 3914 iter->ops = ops; 3915 iter->flags = flag; 3916 iter->tr = tr; 3917 3918 mutex_lock(&ops->func_hash->regex_lock); 3919 3920 if (flag & FTRACE_ITER_NOTRACE) { 3921 hash = ops->func_hash->notrace_hash; 3922 mod_head = tr ? &tr->mod_notrace : NULL; 3923 } else { 3924 hash = ops->func_hash->filter_hash; 3925 mod_head = tr ? &tr->mod_trace : NULL; 3926 } 3927 3928 iter->mod_list = mod_head; 3929 3930 if (file->f_mode & FMODE_WRITE) { 3931 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 3932 3933 if (file->f_flags & O_TRUNC) { 3934 iter->hash = alloc_ftrace_hash(size_bits); 3935 clear_ftrace_mod_list(mod_head); 3936 } else { 3937 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); 3938 } 3939 3940 if (!iter->hash) { 3941 trace_parser_put(&iter->parser); 3942 goto out_unlock; 3943 } 3944 } else 3945 iter->hash = hash; 3946 3947 ret = 0; 3948 3949 if (file->f_mode & FMODE_READ) { 3950 iter->pg = ftrace_pages_start; 3951 3952 ret = seq_open(file, &show_ftrace_seq_ops); 3953 if (!ret) { 3954 struct seq_file *m = file->private_data; 3955 m->private = iter; 3956 } else { 3957 /* Failed */ 3958 free_ftrace_hash(iter->hash); 3959 trace_parser_put(&iter->parser); 3960 } 3961 } else 3962 file->private_data = iter; 3963 3964 out_unlock: 3965 mutex_unlock(&ops->func_hash->regex_lock); 3966 3967 out: 3968 if (ret) { 3969 kfree(iter); 3970 if (tr) 3971 trace_array_put(tr); 3972 } 3973 3974 return ret; 3975 } 3976 3977 static int 3978 ftrace_filter_open(struct inode *inode, struct file *file) 3979 { 3980 struct ftrace_ops *ops = inode->i_private; 3981 3982 /* Checks for tracefs lockdown */ 3983 return ftrace_regex_open(ops, 3984 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, 3985 inode, file); 3986 } 3987 3988 static int 3989 ftrace_notrace_open(struct inode *inode, struct file *file) 3990 { 3991 struct ftrace_ops *ops = inode->i_private; 3992 3993 /* Checks for tracefs lockdown */ 3994 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, 3995 inode, file); 3996 } 3997 3998 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ 3999 struct ftrace_glob { 4000 char *search; 4001 unsigned len; 4002 int type; 4003 }; 4004 4005 /* 4006 * If symbols in an architecture don't correspond exactly to the user-visible 4007 * name of what they represent, it is possible to define this function to 4008 * perform the necessary adjustments. 4009 */ 4010 char * __weak arch_ftrace_match_adjust(char *str, const char *search) 4011 { 4012 return str; 4013 } 4014 4015 static int ftrace_match(char *str, struct ftrace_glob *g) 4016 { 4017 int matched = 0; 4018 int slen; 4019 4020 str = arch_ftrace_match_adjust(str, g->search); 4021 4022 switch (g->type) { 4023 case MATCH_FULL: 4024 if (strcmp(str, g->search) == 0) 4025 matched = 1; 4026 break; 4027 case MATCH_FRONT_ONLY: 4028 if (strncmp(str, g->search, g->len) == 0) 4029 matched = 1; 4030 break; 4031 case MATCH_MIDDLE_ONLY: 4032 if (strstr(str, g->search)) 4033 matched = 1; 4034 break; 4035 case MATCH_END_ONLY: 4036 slen = strlen(str); 4037 if (slen >= g->len && 4038 memcmp(str + slen - g->len, g->search, g->len) == 0) 4039 matched = 1; 4040 break; 4041 case MATCH_GLOB: 4042 if (glob_match(g->search, str)) 4043 matched = 1; 4044 break; 4045 } 4046 4047 return matched; 4048 } 4049 4050 static int 4051 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) 4052 { 4053 struct ftrace_func_entry *entry; 4054 int ret = 0; 4055 4056 entry = ftrace_lookup_ip(hash, rec->ip); 4057 if (clear_filter) { 4058 /* Do nothing if it doesn't exist */ 4059 if (!entry) 4060 return 0; 4061 4062 free_hash_entry(hash, entry); 4063 } else { 4064 /* Do nothing if it exists */ 4065 if (entry) 4066 return 0; 4067 4068 ret = add_hash_entry(hash, rec->ip); 4069 } 4070 return ret; 4071 } 4072 4073 static int 4074 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, 4075 int clear_filter) 4076 { 4077 long index = simple_strtoul(func_g->search, NULL, 0); 4078 struct ftrace_page *pg; 4079 struct dyn_ftrace *rec; 4080 4081 /* The index starts at 1 */ 4082 if (--index < 0) 4083 return 0; 4084 4085 do_for_each_ftrace_rec(pg, rec) { 4086 if (pg->index <= index) { 4087 index -= pg->index; 4088 /* this is a double loop, break goes to the next page */ 4089 break; 4090 } 4091 rec = &pg->records[index]; 4092 enter_record(hash, rec, clear_filter); 4093 return 1; 4094 } while_for_each_ftrace_rec(); 4095 return 0; 4096 } 4097 4098 #ifdef FTRACE_MCOUNT_MAX_OFFSET 4099 static int lookup_ip(unsigned long ip, char **modname, char *str) 4100 { 4101 unsigned long offset; 4102 4103 kallsyms_lookup(ip, NULL, &offset, modname, str); 4104 if (offset > FTRACE_MCOUNT_MAX_OFFSET) 4105 return -1; 4106 return 0; 4107 } 4108 #else 4109 static int lookup_ip(unsigned long ip, char **modname, char *str) 4110 { 4111 kallsyms_lookup(ip, NULL, NULL, modname, str); 4112 return 0; 4113 } 4114 #endif 4115 4116 static int 4117 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, 4118 struct ftrace_glob *mod_g, int exclude_mod) 4119 { 4120 char str[KSYM_SYMBOL_LEN]; 4121 char *modname; 4122 4123 if (lookup_ip(rec->ip, &modname, str)) { 4124 /* This should only happen when a rec is disabled */ 4125 WARN_ON_ONCE(system_state == SYSTEM_RUNNING && 4126 !(rec->flags & FTRACE_FL_DISABLED)); 4127 return 0; 4128 } 4129 4130 if (mod_g) { 4131 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0; 4132 4133 /* blank module name to match all modules */ 4134 if (!mod_g->len) { 4135 /* blank module globbing: modname xor exclude_mod */ 4136 if (!exclude_mod != !modname) 4137 goto func_match; 4138 return 0; 4139 } 4140 4141 /* 4142 * exclude_mod is set to trace everything but the given 4143 * module. If it is set and the module matches, then 4144 * return 0. If it is not set, and the module doesn't match 4145 * also return 0. Otherwise, check the function to see if 4146 * that matches. 4147 */ 4148 if (!mod_matches == !exclude_mod) 4149 return 0; 4150 func_match: 4151 /* blank search means to match all funcs in the mod */ 4152 if (!func_g->len) 4153 return 1; 4154 } 4155 4156 return ftrace_match(str, func_g); 4157 } 4158 4159 static int 4160 match_records(struct ftrace_hash *hash, char *func, int len, char *mod) 4161 { 4162 struct ftrace_page *pg; 4163 struct dyn_ftrace *rec; 4164 struct ftrace_glob func_g = { .type = MATCH_FULL }; 4165 struct ftrace_glob mod_g = { .type = MATCH_FULL }; 4166 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; 4167 int exclude_mod = 0; 4168 int found = 0; 4169 int ret; 4170 int clear_filter = 0; 4171 4172 if (func) { 4173 func_g.type = filter_parse_regex(func, len, &func_g.search, 4174 &clear_filter); 4175 func_g.len = strlen(func_g.search); 4176 } 4177 4178 if (mod) { 4179 mod_g.type = filter_parse_regex(mod, strlen(mod), 4180 &mod_g.search, &exclude_mod); 4181 mod_g.len = strlen(mod_g.search); 4182 } 4183 4184 mutex_lock(&ftrace_lock); 4185 4186 if (unlikely(ftrace_disabled)) 4187 goto out_unlock; 4188 4189 if (func_g.type == MATCH_INDEX) { 4190 found = add_rec_by_index(hash, &func_g, clear_filter); 4191 goto out_unlock; 4192 } 4193 4194 do_for_each_ftrace_rec(pg, rec) { 4195 4196 if (rec->flags & FTRACE_FL_DISABLED) 4197 continue; 4198 4199 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 4200 ret = enter_record(hash, rec, clear_filter); 4201 if (ret < 0) { 4202 found = ret; 4203 goto out_unlock; 4204 } 4205 found = 1; 4206 } 4207 cond_resched(); 4208 } while_for_each_ftrace_rec(); 4209 out_unlock: 4210 mutex_unlock(&ftrace_lock); 4211 4212 return found; 4213 } 4214 4215 static int 4216 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) 4217 { 4218 return match_records(hash, buff, len, NULL); 4219 } 4220 4221 static void ftrace_ops_update_code(struct ftrace_ops *ops, 4222 struct ftrace_ops_hash *old_hash) 4223 { 4224 struct ftrace_ops *op; 4225 4226 if (!ftrace_enabled) 4227 return; 4228 4229 if (ops->flags & FTRACE_OPS_FL_ENABLED) { 4230 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); 4231 return; 4232 } 4233 4234 /* 4235 * If this is the shared global_ops filter, then we need to 4236 * check if there is another ops that shares it, is enabled. 4237 * If so, we still need to run the modify code. 4238 */ 4239 if (ops->func_hash != &global_ops.local_hash) 4240 return; 4241 4242 do_for_each_ftrace_op(op, ftrace_ops_list) { 4243 if (op->func_hash == &global_ops.local_hash && 4244 op->flags & FTRACE_OPS_FL_ENABLED) { 4245 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); 4246 /* Only need to do this once */ 4247 return; 4248 } 4249 } while_for_each_ftrace_op(op); 4250 } 4251 4252 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, 4253 struct ftrace_hash **orig_hash, 4254 struct ftrace_hash *hash, 4255 int enable) 4256 { 4257 struct ftrace_ops_hash old_hash_ops; 4258 struct ftrace_hash *old_hash; 4259 int ret; 4260 4261 old_hash = *orig_hash; 4262 old_hash_ops.filter_hash = ops->func_hash->filter_hash; 4263 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; 4264 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 4265 if (!ret) { 4266 ftrace_ops_update_code(ops, &old_hash_ops); 4267 free_ftrace_hash_rcu(old_hash); 4268 } 4269 return ret; 4270 } 4271 4272 static bool module_exists(const char *module) 4273 { 4274 /* All modules have the symbol __this_module */ 4275 static const char this_mod[] = "__this_module"; 4276 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 4277 unsigned long val; 4278 int n; 4279 4280 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); 4281 4282 if (n > sizeof(modname) - 1) 4283 return false; 4284 4285 val = module_kallsyms_lookup_name(modname); 4286 return val != 0; 4287 } 4288 4289 static int cache_mod(struct trace_array *tr, 4290 const char *func, char *module, int enable) 4291 { 4292 struct ftrace_mod_load *ftrace_mod, *n; 4293 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; 4294 int ret; 4295 4296 mutex_lock(&ftrace_lock); 4297 4298 /* We do not cache inverse filters */ 4299 if (func[0] == '!') { 4300 func++; 4301 ret = -EINVAL; 4302 4303 /* Look to remove this hash */ 4304 list_for_each_entry_safe(ftrace_mod, n, head, list) { 4305 if (strcmp(ftrace_mod->module, module) != 0) 4306 continue; 4307 4308 /* no func matches all */ 4309 if (strcmp(func, "*") == 0 || 4310 (ftrace_mod->func && 4311 strcmp(ftrace_mod->func, func) == 0)) { 4312 ret = 0; 4313 free_ftrace_mod(ftrace_mod); 4314 continue; 4315 } 4316 } 4317 goto out; 4318 } 4319 4320 ret = -EINVAL; 4321 /* We only care about modules that have not been loaded yet */ 4322 if (module_exists(module)) 4323 goto out; 4324 4325 /* Save this string off, and execute it when the module is loaded */ 4326 ret = ftrace_add_mod(tr, func, module, enable); 4327 out: 4328 mutex_unlock(&ftrace_lock); 4329 4330 return ret; 4331 } 4332 4333 static int 4334 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 4335 int reset, int enable); 4336 4337 #ifdef CONFIG_MODULES 4338 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, 4339 char *mod, bool enable) 4340 { 4341 struct ftrace_mod_load *ftrace_mod, *n; 4342 struct ftrace_hash **orig_hash, *new_hash; 4343 LIST_HEAD(process_mods); 4344 char *func; 4345 4346 mutex_lock(&ops->func_hash->regex_lock); 4347 4348 if (enable) 4349 orig_hash = &ops->func_hash->filter_hash; 4350 else 4351 orig_hash = &ops->func_hash->notrace_hash; 4352 4353 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, 4354 *orig_hash); 4355 if (!new_hash) 4356 goto out; /* warn? */ 4357 4358 mutex_lock(&ftrace_lock); 4359 4360 list_for_each_entry_safe(ftrace_mod, n, head, list) { 4361 4362 if (strcmp(ftrace_mod->module, mod) != 0) 4363 continue; 4364 4365 if (ftrace_mod->func) 4366 func = kstrdup(ftrace_mod->func, GFP_KERNEL); 4367 else 4368 func = kstrdup("*", GFP_KERNEL); 4369 4370 if (!func) /* warn? */ 4371 continue; 4372 4373 list_move(&ftrace_mod->list, &process_mods); 4374 4375 /* Use the newly allocated func, as it may be "*" */ 4376 kfree(ftrace_mod->func); 4377 ftrace_mod->func = func; 4378 } 4379 4380 mutex_unlock(&ftrace_lock); 4381 4382 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { 4383 4384 func = ftrace_mod->func; 4385 4386 /* Grabs ftrace_lock, which is why we have this extra step */ 4387 match_records(new_hash, func, strlen(func), mod); 4388 free_ftrace_mod(ftrace_mod); 4389 } 4390 4391 if (enable && list_empty(head)) 4392 new_hash->flags &= ~FTRACE_HASH_FL_MOD; 4393 4394 mutex_lock(&ftrace_lock); 4395 4396 ftrace_hash_move_and_update_ops(ops, orig_hash, 4397 new_hash, enable); 4398 mutex_unlock(&ftrace_lock); 4399 4400 out: 4401 mutex_unlock(&ops->func_hash->regex_lock); 4402 4403 free_ftrace_hash(new_hash); 4404 } 4405 4406 static void process_cached_mods(const char *mod_name) 4407 { 4408 struct trace_array *tr; 4409 char *mod; 4410 4411 mod = kstrdup(mod_name, GFP_KERNEL); 4412 if (!mod) 4413 return; 4414 4415 mutex_lock(&trace_types_lock); 4416 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 4417 if (!list_empty(&tr->mod_trace)) 4418 process_mod_list(&tr->mod_trace, tr->ops, mod, true); 4419 if (!list_empty(&tr->mod_notrace)) 4420 process_mod_list(&tr->mod_notrace, tr->ops, mod, false); 4421 } 4422 mutex_unlock(&trace_types_lock); 4423 4424 kfree(mod); 4425 } 4426 #endif 4427 4428 /* 4429 * We register the module command as a template to show others how 4430 * to register the a command as well. 4431 */ 4432 4433 static int 4434 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, 4435 char *func_orig, char *cmd, char *module, int enable) 4436 { 4437 char *func; 4438 int ret; 4439 4440 /* match_records() modifies func, and we need the original */ 4441 func = kstrdup(func_orig, GFP_KERNEL); 4442 if (!func) 4443 return -ENOMEM; 4444 4445 /* 4446 * cmd == 'mod' because we only registered this func 4447 * for the 'mod' ftrace_func_command. 4448 * But if you register one func with multiple commands, 4449 * you can tell which command was used by the cmd 4450 * parameter. 4451 */ 4452 ret = match_records(hash, func, strlen(func), module); 4453 kfree(func); 4454 4455 if (!ret) 4456 return cache_mod(tr, func_orig, module, enable); 4457 if (ret < 0) 4458 return ret; 4459 return 0; 4460 } 4461 4462 static struct ftrace_func_command ftrace_mod_cmd = { 4463 .name = "mod", 4464 .func = ftrace_mod_callback, 4465 }; 4466 4467 static int __init ftrace_mod_cmd_init(void) 4468 { 4469 return register_ftrace_command(&ftrace_mod_cmd); 4470 } 4471 core_initcall(ftrace_mod_cmd_init); 4472 4473 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 4474 struct ftrace_ops *op, struct ftrace_regs *fregs) 4475 { 4476 struct ftrace_probe_ops *probe_ops; 4477 struct ftrace_func_probe *probe; 4478 4479 probe = container_of(op, struct ftrace_func_probe, ops); 4480 probe_ops = probe->probe_ops; 4481 4482 /* 4483 * Disable preemption for these calls to prevent a RCU grace 4484 * period. This syncs the hash iteration and freeing of items 4485 * on the hash. rcu_read_lock is too dangerous here. 4486 */ 4487 preempt_disable_notrace(); 4488 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); 4489 preempt_enable_notrace(); 4490 } 4491 4492 struct ftrace_func_map { 4493 struct ftrace_func_entry entry; 4494 void *data; 4495 }; 4496 4497 struct ftrace_func_mapper { 4498 struct ftrace_hash hash; 4499 }; 4500 4501 /** 4502 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper 4503 * 4504 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data. 4505 */ 4506 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) 4507 { 4508 struct ftrace_hash *hash; 4509 4510 /* 4511 * The mapper is simply a ftrace_hash, but since the entries 4512 * in the hash are not ftrace_func_entry type, we define it 4513 * as a separate structure. 4514 */ 4515 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4516 return (struct ftrace_func_mapper *)hash; 4517 } 4518 4519 /** 4520 * ftrace_func_mapper_find_ip - Find some data mapped to an ip 4521 * @mapper: The mapper that has the ip maps 4522 * @ip: the instruction pointer to find the data for 4523 * 4524 * Returns the data mapped to @ip if found otherwise NULL. The return 4525 * is actually the address of the mapper data pointer. The address is 4526 * returned for use cases where the data is no bigger than a long, and 4527 * the user can use the data pointer as its data instead of having to 4528 * allocate more memory for the reference. 4529 */ 4530 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 4531 unsigned long ip) 4532 { 4533 struct ftrace_func_entry *entry; 4534 struct ftrace_func_map *map; 4535 4536 entry = ftrace_lookup_ip(&mapper->hash, ip); 4537 if (!entry) 4538 return NULL; 4539 4540 map = (struct ftrace_func_map *)entry; 4541 return &map->data; 4542 } 4543 4544 /** 4545 * ftrace_func_mapper_add_ip - Map some data to an ip 4546 * @mapper: The mapper that has the ip maps 4547 * @ip: The instruction pointer address to map @data to 4548 * @data: The data to map to @ip 4549 * 4550 * Returns 0 on success otherwise an error. 4551 */ 4552 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 4553 unsigned long ip, void *data) 4554 { 4555 struct ftrace_func_entry *entry; 4556 struct ftrace_func_map *map; 4557 4558 entry = ftrace_lookup_ip(&mapper->hash, ip); 4559 if (entry) 4560 return -EBUSY; 4561 4562 map = kmalloc(sizeof(*map), GFP_KERNEL); 4563 if (!map) 4564 return -ENOMEM; 4565 4566 map->entry.ip = ip; 4567 map->data = data; 4568 4569 __add_hash_entry(&mapper->hash, &map->entry); 4570 4571 return 0; 4572 } 4573 4574 /** 4575 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping 4576 * @mapper: The mapper that has the ip maps 4577 * @ip: The instruction pointer address to remove the data from 4578 * 4579 * Returns the data if it is found, otherwise NULL. 4580 * Note, if the data pointer is used as the data itself, (see 4581 * ftrace_func_mapper_find_ip(), then the return value may be meaningless, 4582 * if the data pointer was set to zero. 4583 */ 4584 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 4585 unsigned long ip) 4586 { 4587 struct ftrace_func_entry *entry; 4588 struct ftrace_func_map *map; 4589 void *data; 4590 4591 entry = ftrace_lookup_ip(&mapper->hash, ip); 4592 if (!entry) 4593 return NULL; 4594 4595 map = (struct ftrace_func_map *)entry; 4596 data = map->data; 4597 4598 remove_hash_entry(&mapper->hash, entry); 4599 kfree(entry); 4600 4601 return data; 4602 } 4603 4604 /** 4605 * free_ftrace_func_mapper - free a mapping of ips and data 4606 * @mapper: The mapper that has the ip maps 4607 * @free_func: A function to be called on each data item. 4608 * 4609 * This is used to free the function mapper. The @free_func is optional 4610 * and can be used if the data needs to be freed as well. 4611 */ 4612 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 4613 ftrace_mapper_func free_func) 4614 { 4615 struct ftrace_func_entry *entry; 4616 struct ftrace_func_map *map; 4617 struct hlist_head *hhd; 4618 int size, i; 4619 4620 if (!mapper) 4621 return; 4622 4623 if (free_func && mapper->hash.count) { 4624 size = 1 << mapper->hash.size_bits; 4625 for (i = 0; i < size; i++) { 4626 hhd = &mapper->hash.buckets[i]; 4627 hlist_for_each_entry(entry, hhd, hlist) { 4628 map = (struct ftrace_func_map *)entry; 4629 free_func(map); 4630 } 4631 } 4632 } 4633 free_ftrace_hash(&mapper->hash); 4634 } 4635 4636 static void release_probe(struct ftrace_func_probe *probe) 4637 { 4638 struct ftrace_probe_ops *probe_ops; 4639 4640 mutex_lock(&ftrace_lock); 4641 4642 WARN_ON(probe->ref <= 0); 4643 4644 /* Subtract the ref that was used to protect this instance */ 4645 probe->ref--; 4646 4647 if (!probe->ref) { 4648 probe_ops = probe->probe_ops; 4649 /* 4650 * Sending zero as ip tells probe_ops to free 4651 * the probe->data itself 4652 */ 4653 if (probe_ops->free) 4654 probe_ops->free(probe_ops, probe->tr, 0, probe->data); 4655 list_del(&probe->list); 4656 kfree(probe); 4657 } 4658 mutex_unlock(&ftrace_lock); 4659 } 4660 4661 static void acquire_probe_locked(struct ftrace_func_probe *probe) 4662 { 4663 /* 4664 * Add one ref to keep it from being freed when releasing the 4665 * ftrace_lock mutex. 4666 */ 4667 probe->ref++; 4668 } 4669 4670 int 4671 register_ftrace_function_probe(char *glob, struct trace_array *tr, 4672 struct ftrace_probe_ops *probe_ops, 4673 void *data) 4674 { 4675 struct ftrace_func_probe *probe = NULL, *iter; 4676 struct ftrace_func_entry *entry; 4677 struct ftrace_hash **orig_hash; 4678 struct ftrace_hash *old_hash; 4679 struct ftrace_hash *hash; 4680 int count = 0; 4681 int size; 4682 int ret; 4683 int i; 4684 4685 if (WARN_ON(!tr)) 4686 return -EINVAL; 4687 4688 /* We do not support '!' for function probes */ 4689 if (WARN_ON(glob[0] == '!')) 4690 return -EINVAL; 4691 4692 4693 mutex_lock(&ftrace_lock); 4694 /* Check if the probe_ops is already registered */ 4695 list_for_each_entry(iter, &tr->func_probes, list) { 4696 if (iter->probe_ops == probe_ops) { 4697 probe = iter; 4698 break; 4699 } 4700 } 4701 if (!probe) { 4702 probe = kzalloc(sizeof(*probe), GFP_KERNEL); 4703 if (!probe) { 4704 mutex_unlock(&ftrace_lock); 4705 return -ENOMEM; 4706 } 4707 probe->probe_ops = probe_ops; 4708 probe->ops.func = function_trace_probe_call; 4709 probe->tr = tr; 4710 ftrace_ops_init(&probe->ops); 4711 list_add(&probe->list, &tr->func_probes); 4712 } 4713 4714 acquire_probe_locked(probe); 4715 4716 mutex_unlock(&ftrace_lock); 4717 4718 /* 4719 * Note, there's a small window here that the func_hash->filter_hash 4720 * may be NULL or empty. Need to be careful when reading the loop. 4721 */ 4722 mutex_lock(&probe->ops.func_hash->regex_lock); 4723 4724 orig_hash = &probe->ops.func_hash->filter_hash; 4725 old_hash = *orig_hash; 4726 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4727 4728 if (!hash) { 4729 ret = -ENOMEM; 4730 goto out; 4731 } 4732 4733 ret = ftrace_match_records(hash, glob, strlen(glob)); 4734 4735 /* Nothing found? */ 4736 if (!ret) 4737 ret = -EINVAL; 4738 4739 if (ret < 0) 4740 goto out; 4741 4742 size = 1 << hash->size_bits; 4743 for (i = 0; i < size; i++) { 4744 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4745 if (ftrace_lookup_ip(old_hash, entry->ip)) 4746 continue; 4747 /* 4748 * The caller might want to do something special 4749 * for each function we find. We call the callback 4750 * to give the caller an opportunity to do so. 4751 */ 4752 if (probe_ops->init) { 4753 ret = probe_ops->init(probe_ops, tr, 4754 entry->ip, data, 4755 &probe->data); 4756 if (ret < 0) { 4757 if (probe_ops->free && count) 4758 probe_ops->free(probe_ops, tr, 4759 0, probe->data); 4760 probe->data = NULL; 4761 goto out; 4762 } 4763 } 4764 count++; 4765 } 4766 } 4767 4768 mutex_lock(&ftrace_lock); 4769 4770 if (!count) { 4771 /* Nothing was added? */ 4772 ret = -EINVAL; 4773 goto out_unlock; 4774 } 4775 4776 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 4777 hash, 1); 4778 if (ret < 0) 4779 goto err_unlock; 4780 4781 /* One ref for each new function traced */ 4782 probe->ref += count; 4783 4784 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) 4785 ret = ftrace_startup(&probe->ops, 0); 4786 4787 out_unlock: 4788 mutex_unlock(&ftrace_lock); 4789 4790 if (!ret) 4791 ret = count; 4792 out: 4793 mutex_unlock(&probe->ops.func_hash->regex_lock); 4794 free_ftrace_hash(hash); 4795 4796 release_probe(probe); 4797 4798 return ret; 4799 4800 err_unlock: 4801 if (!probe_ops->free || !count) 4802 goto out_unlock; 4803 4804 /* Failed to do the move, need to call the free functions */ 4805 for (i = 0; i < size; i++) { 4806 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4807 if (ftrace_lookup_ip(old_hash, entry->ip)) 4808 continue; 4809 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 4810 } 4811 } 4812 goto out_unlock; 4813 } 4814 4815 int 4816 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 4817 struct ftrace_probe_ops *probe_ops) 4818 { 4819 struct ftrace_func_probe *probe = NULL, *iter; 4820 struct ftrace_ops_hash old_hash_ops; 4821 struct ftrace_func_entry *entry; 4822 struct ftrace_glob func_g; 4823 struct ftrace_hash **orig_hash; 4824 struct ftrace_hash *old_hash; 4825 struct ftrace_hash *hash = NULL; 4826 struct hlist_node *tmp; 4827 struct hlist_head hhd; 4828 char str[KSYM_SYMBOL_LEN]; 4829 int count = 0; 4830 int i, ret = -ENODEV; 4831 int size; 4832 4833 if (!glob || !strlen(glob) || !strcmp(glob, "*")) 4834 func_g.search = NULL; 4835 else { 4836 int not; 4837 4838 func_g.type = filter_parse_regex(glob, strlen(glob), 4839 &func_g.search, ¬); 4840 func_g.len = strlen(func_g.search); 4841 4842 /* we do not support '!' for function probes */ 4843 if (WARN_ON(not)) 4844 return -EINVAL; 4845 } 4846 4847 mutex_lock(&ftrace_lock); 4848 /* Check if the probe_ops is already registered */ 4849 list_for_each_entry(iter, &tr->func_probes, list) { 4850 if (iter->probe_ops == probe_ops) { 4851 probe = iter; 4852 break; 4853 } 4854 } 4855 if (!probe) 4856 goto err_unlock_ftrace; 4857 4858 ret = -EINVAL; 4859 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) 4860 goto err_unlock_ftrace; 4861 4862 acquire_probe_locked(probe); 4863 4864 mutex_unlock(&ftrace_lock); 4865 4866 mutex_lock(&probe->ops.func_hash->regex_lock); 4867 4868 orig_hash = &probe->ops.func_hash->filter_hash; 4869 old_hash = *orig_hash; 4870 4871 if (ftrace_hash_empty(old_hash)) 4872 goto out_unlock; 4873 4874 old_hash_ops.filter_hash = old_hash; 4875 /* Probes only have filters */ 4876 old_hash_ops.notrace_hash = NULL; 4877 4878 ret = -ENOMEM; 4879 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4880 if (!hash) 4881 goto out_unlock; 4882 4883 INIT_HLIST_HEAD(&hhd); 4884 4885 size = 1 << hash->size_bits; 4886 for (i = 0; i < size; i++) { 4887 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { 4888 4889 if (func_g.search) { 4890 kallsyms_lookup(entry->ip, NULL, NULL, 4891 NULL, str); 4892 if (!ftrace_match(str, &func_g)) 4893 continue; 4894 } 4895 count++; 4896 remove_hash_entry(hash, entry); 4897 hlist_add_head(&entry->hlist, &hhd); 4898 } 4899 } 4900 4901 /* Nothing found? */ 4902 if (!count) { 4903 ret = -EINVAL; 4904 goto out_unlock; 4905 } 4906 4907 mutex_lock(&ftrace_lock); 4908 4909 WARN_ON(probe->ref < count); 4910 4911 probe->ref -= count; 4912 4913 if (ftrace_hash_empty(hash)) 4914 ftrace_shutdown(&probe->ops, 0); 4915 4916 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 4917 hash, 1); 4918 4919 /* still need to update the function call sites */ 4920 if (ftrace_enabled && !ftrace_hash_empty(hash)) 4921 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, 4922 &old_hash_ops); 4923 synchronize_rcu(); 4924 4925 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { 4926 hlist_del(&entry->hlist); 4927 if (probe_ops->free) 4928 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 4929 kfree(entry); 4930 } 4931 mutex_unlock(&ftrace_lock); 4932 4933 out_unlock: 4934 mutex_unlock(&probe->ops.func_hash->regex_lock); 4935 free_ftrace_hash(hash); 4936 4937 release_probe(probe); 4938 4939 return ret; 4940 4941 err_unlock_ftrace: 4942 mutex_unlock(&ftrace_lock); 4943 return ret; 4944 } 4945 4946 void clear_ftrace_function_probes(struct trace_array *tr) 4947 { 4948 struct ftrace_func_probe *probe, *n; 4949 4950 list_for_each_entry_safe(probe, n, &tr->func_probes, list) 4951 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); 4952 } 4953 4954 static LIST_HEAD(ftrace_commands); 4955 static DEFINE_MUTEX(ftrace_cmd_mutex); 4956 4957 /* 4958 * Currently we only register ftrace commands from __init, so mark this 4959 * __init too. 4960 */ 4961 __init int register_ftrace_command(struct ftrace_func_command *cmd) 4962 { 4963 struct ftrace_func_command *p; 4964 int ret = 0; 4965 4966 mutex_lock(&ftrace_cmd_mutex); 4967 list_for_each_entry(p, &ftrace_commands, list) { 4968 if (strcmp(cmd->name, p->name) == 0) { 4969 ret = -EBUSY; 4970 goto out_unlock; 4971 } 4972 } 4973 list_add(&cmd->list, &ftrace_commands); 4974 out_unlock: 4975 mutex_unlock(&ftrace_cmd_mutex); 4976 4977 return ret; 4978 } 4979 4980 /* 4981 * Currently we only unregister ftrace commands from __init, so mark 4982 * this __init too. 4983 */ 4984 __init int unregister_ftrace_command(struct ftrace_func_command *cmd) 4985 { 4986 struct ftrace_func_command *p, *n; 4987 int ret = -ENODEV; 4988 4989 mutex_lock(&ftrace_cmd_mutex); 4990 list_for_each_entry_safe(p, n, &ftrace_commands, list) { 4991 if (strcmp(cmd->name, p->name) == 0) { 4992 ret = 0; 4993 list_del_init(&p->list); 4994 goto out_unlock; 4995 } 4996 } 4997 out_unlock: 4998 mutex_unlock(&ftrace_cmd_mutex); 4999 5000 return ret; 5001 } 5002 5003 static int ftrace_process_regex(struct ftrace_iterator *iter, 5004 char *buff, int len, int enable) 5005 { 5006 struct ftrace_hash *hash = iter->hash; 5007 struct trace_array *tr = iter->ops->private; 5008 char *func, *command, *next = buff; 5009 struct ftrace_func_command *p; 5010 int ret = -EINVAL; 5011 5012 func = strsep(&next, ":"); 5013 5014 if (!next) { 5015 ret = ftrace_match_records(hash, func, len); 5016 if (!ret) 5017 ret = -EINVAL; 5018 if (ret < 0) 5019 return ret; 5020 return 0; 5021 } 5022 5023 /* command found */ 5024 5025 command = strsep(&next, ":"); 5026 5027 mutex_lock(&ftrace_cmd_mutex); 5028 list_for_each_entry(p, &ftrace_commands, list) { 5029 if (strcmp(p->name, command) == 0) { 5030 ret = p->func(tr, hash, func, command, next, enable); 5031 goto out_unlock; 5032 } 5033 } 5034 out_unlock: 5035 mutex_unlock(&ftrace_cmd_mutex); 5036 5037 return ret; 5038 } 5039 5040 static ssize_t 5041 ftrace_regex_write(struct file *file, const char __user *ubuf, 5042 size_t cnt, loff_t *ppos, int enable) 5043 { 5044 struct ftrace_iterator *iter; 5045 struct trace_parser *parser; 5046 ssize_t ret, read; 5047 5048 if (!cnt) 5049 return 0; 5050 5051 if (file->f_mode & FMODE_READ) { 5052 struct seq_file *m = file->private_data; 5053 iter = m->private; 5054 } else 5055 iter = file->private_data; 5056 5057 if (unlikely(ftrace_disabled)) 5058 return -ENODEV; 5059 5060 /* iter->hash is a local copy, so we don't need regex_lock */ 5061 5062 parser = &iter->parser; 5063 read = trace_get_user(parser, ubuf, cnt, ppos); 5064 5065 if (read >= 0 && trace_parser_loaded(parser) && 5066 !trace_parser_cont(parser)) { 5067 ret = ftrace_process_regex(iter, parser->buffer, 5068 parser->idx, enable); 5069 trace_parser_clear(parser); 5070 if (ret < 0) 5071 goto out; 5072 } 5073 5074 ret = read; 5075 out: 5076 return ret; 5077 } 5078 5079 ssize_t 5080 ftrace_filter_write(struct file *file, const char __user *ubuf, 5081 size_t cnt, loff_t *ppos) 5082 { 5083 return ftrace_regex_write(file, ubuf, cnt, ppos, 1); 5084 } 5085 5086 ssize_t 5087 ftrace_notrace_write(struct file *file, const char __user *ubuf, 5088 size_t cnt, loff_t *ppos) 5089 { 5090 return ftrace_regex_write(file, ubuf, cnt, ppos, 0); 5091 } 5092 5093 static int 5094 __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) 5095 { 5096 struct ftrace_func_entry *entry; 5097 5098 ip = ftrace_location(ip); 5099 if (!ip) 5100 return -EINVAL; 5101 5102 if (remove) { 5103 entry = ftrace_lookup_ip(hash, ip); 5104 if (!entry) 5105 return -ENOENT; 5106 free_hash_entry(hash, entry); 5107 return 0; 5108 } 5109 5110 return add_hash_entry(hash, ip); 5111 } 5112 5113 static int 5114 ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips, 5115 unsigned int cnt, int remove) 5116 { 5117 unsigned int i; 5118 int err; 5119 5120 for (i = 0; i < cnt; i++) { 5121 err = __ftrace_match_addr(hash, ips[i], remove); 5122 if (err) { 5123 /* 5124 * This expects the @hash is a temporary hash and if this 5125 * fails the caller must free the @hash. 5126 */ 5127 return err; 5128 } 5129 } 5130 return 0; 5131 } 5132 5133 static int 5134 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 5135 unsigned long *ips, unsigned int cnt, 5136 int remove, int reset, int enable) 5137 { 5138 struct ftrace_hash **orig_hash; 5139 struct ftrace_hash *hash; 5140 int ret; 5141 5142 if (unlikely(ftrace_disabled)) 5143 return -ENODEV; 5144 5145 mutex_lock(&ops->func_hash->regex_lock); 5146 5147 if (enable) 5148 orig_hash = &ops->func_hash->filter_hash; 5149 else 5150 orig_hash = &ops->func_hash->notrace_hash; 5151 5152 if (reset) 5153 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 5154 else 5155 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 5156 5157 if (!hash) { 5158 ret = -ENOMEM; 5159 goto out_regex_unlock; 5160 } 5161 5162 if (buf && !ftrace_match_records(hash, buf, len)) { 5163 ret = -EINVAL; 5164 goto out_regex_unlock; 5165 } 5166 if (ips) { 5167 ret = ftrace_match_addr(hash, ips, cnt, remove); 5168 if (ret < 0) 5169 goto out_regex_unlock; 5170 } 5171 5172 mutex_lock(&ftrace_lock); 5173 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); 5174 mutex_unlock(&ftrace_lock); 5175 5176 out_regex_unlock: 5177 mutex_unlock(&ops->func_hash->regex_lock); 5178 5179 free_ftrace_hash(hash); 5180 return ret; 5181 } 5182 5183 static int 5184 ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt, 5185 int remove, int reset, int enable) 5186 { 5187 return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable); 5188 } 5189 5190 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 5191 5192 struct ftrace_direct_func { 5193 struct list_head next; 5194 unsigned long addr; 5195 int count; 5196 }; 5197 5198 static LIST_HEAD(ftrace_direct_funcs); 5199 5200 /** 5201 * ftrace_find_direct_func - test an address if it is a registered direct caller 5202 * @addr: The address of a registered direct caller 5203 * 5204 * This searches to see if a ftrace direct caller has been registered 5205 * at a specific address, and if so, it returns a descriptor for it. 5206 * 5207 * This can be used by architecture code to see if an address is 5208 * a direct caller (trampoline) attached to a fentry/mcount location. 5209 * This is useful for the function_graph tracer, as it may need to 5210 * do adjustments if it traced a location that also has a direct 5211 * trampoline attached to it. 5212 */ 5213 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) 5214 { 5215 struct ftrace_direct_func *entry; 5216 bool found = false; 5217 5218 /* May be called by fgraph trampoline (protected by rcu tasks) */ 5219 list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) { 5220 if (entry->addr == addr) { 5221 found = true; 5222 break; 5223 } 5224 } 5225 if (found) 5226 return entry; 5227 5228 return NULL; 5229 } 5230 5231 static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr) 5232 { 5233 struct ftrace_direct_func *direct; 5234 5235 direct = kmalloc(sizeof(*direct), GFP_KERNEL); 5236 if (!direct) 5237 return NULL; 5238 direct->addr = addr; 5239 direct->count = 0; 5240 list_add_rcu(&direct->next, &ftrace_direct_funcs); 5241 ftrace_direct_func_count++; 5242 return direct; 5243 } 5244 5245 static int register_ftrace_function_nolock(struct ftrace_ops *ops); 5246 5247 /** 5248 * register_ftrace_direct - Call a custom trampoline directly 5249 * @ip: The address of the nop at the beginning of a function 5250 * @addr: The address of the trampoline to call at @ip 5251 * 5252 * This is used to connect a direct call from the nop location (@ip) 5253 * at the start of ftrace traced functions. The location that it calls 5254 * (@addr) must be able to handle a direct call, and save the parameters 5255 * of the function being traced, and restore them (or inject new ones 5256 * if needed), before returning. 5257 * 5258 * Returns: 5259 * 0 on success 5260 * -EBUSY - Another direct function is already attached (there can be only one) 5261 * -ENODEV - @ip does not point to a ftrace nop location (or not supported) 5262 * -ENOMEM - There was an allocation failure. 5263 */ 5264 int register_ftrace_direct(unsigned long ip, unsigned long addr) 5265 { 5266 struct ftrace_direct_func *direct; 5267 struct ftrace_func_entry *entry; 5268 struct ftrace_hash *free_hash = NULL; 5269 struct dyn_ftrace *rec; 5270 int ret = -ENODEV; 5271 5272 mutex_lock(&direct_mutex); 5273 5274 ip = ftrace_location(ip); 5275 if (!ip) 5276 goto out_unlock; 5277 5278 /* See if there's a direct function at @ip already */ 5279 ret = -EBUSY; 5280 if (ftrace_find_rec_direct(ip)) 5281 goto out_unlock; 5282 5283 ret = -ENODEV; 5284 rec = lookup_rec(ip, ip); 5285 if (!rec) 5286 goto out_unlock; 5287 5288 /* 5289 * Check if the rec says it has a direct call but we didn't 5290 * find one earlier? 5291 */ 5292 if (WARN_ON(rec->flags & FTRACE_FL_DIRECT)) 5293 goto out_unlock; 5294 5295 /* Make sure the ip points to the exact record */ 5296 if (ip != rec->ip) { 5297 ip = rec->ip; 5298 /* Need to check this ip for a direct. */ 5299 if (ftrace_find_rec_direct(ip)) 5300 goto out_unlock; 5301 } 5302 5303 ret = -ENOMEM; 5304 direct = ftrace_find_direct_func(addr); 5305 if (!direct) { 5306 direct = ftrace_alloc_direct_func(addr); 5307 if (!direct) 5308 goto out_unlock; 5309 } 5310 5311 entry = ftrace_add_rec_direct(ip, addr, &free_hash); 5312 if (!entry) 5313 goto out_unlock; 5314 5315 ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0); 5316 5317 if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) { 5318 ret = register_ftrace_function_nolock(&direct_ops); 5319 if (ret) 5320 ftrace_set_filter_ip(&direct_ops, ip, 1, 0); 5321 } 5322 5323 if (ret) { 5324 remove_hash_entry(direct_functions, entry); 5325 kfree(entry); 5326 if (!direct->count) { 5327 list_del_rcu(&direct->next); 5328 synchronize_rcu_tasks(); 5329 kfree(direct); 5330 if (free_hash) 5331 free_ftrace_hash(free_hash); 5332 free_hash = NULL; 5333 ftrace_direct_func_count--; 5334 } 5335 } else { 5336 direct->count++; 5337 } 5338 out_unlock: 5339 mutex_unlock(&direct_mutex); 5340 5341 if (free_hash) { 5342 synchronize_rcu_tasks(); 5343 free_ftrace_hash(free_hash); 5344 } 5345 5346 return ret; 5347 } 5348 EXPORT_SYMBOL_GPL(register_ftrace_direct); 5349 5350 static struct ftrace_func_entry *find_direct_entry(unsigned long *ip, 5351 struct dyn_ftrace **recp) 5352 { 5353 struct ftrace_func_entry *entry; 5354 struct dyn_ftrace *rec; 5355 5356 rec = lookup_rec(*ip, *ip); 5357 if (!rec) 5358 return NULL; 5359 5360 entry = __ftrace_lookup_ip(direct_functions, rec->ip); 5361 if (!entry) { 5362 WARN_ON(rec->flags & FTRACE_FL_DIRECT); 5363 return NULL; 5364 } 5365 5366 WARN_ON(!(rec->flags & FTRACE_FL_DIRECT)); 5367 5368 /* Passed in ip just needs to be on the call site */ 5369 *ip = rec->ip; 5370 5371 if (recp) 5372 *recp = rec; 5373 5374 return entry; 5375 } 5376 5377 int unregister_ftrace_direct(unsigned long ip, unsigned long addr) 5378 { 5379 struct ftrace_direct_func *direct; 5380 struct ftrace_func_entry *entry; 5381 struct ftrace_hash *hash; 5382 int ret = -ENODEV; 5383 5384 mutex_lock(&direct_mutex); 5385 5386 ip = ftrace_location(ip); 5387 if (!ip) 5388 goto out_unlock; 5389 5390 entry = find_direct_entry(&ip, NULL); 5391 if (!entry) 5392 goto out_unlock; 5393 5394 hash = direct_ops.func_hash->filter_hash; 5395 if (hash->count == 1) 5396 unregister_ftrace_function(&direct_ops); 5397 5398 ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0); 5399 5400 WARN_ON(ret); 5401 5402 remove_hash_entry(direct_functions, entry); 5403 5404 direct = ftrace_find_direct_func(addr); 5405 if (!WARN_ON(!direct)) { 5406 /* This is the good path (see the ! before WARN) */ 5407 direct->count--; 5408 WARN_ON(direct->count < 0); 5409 if (!direct->count) { 5410 list_del_rcu(&direct->next); 5411 synchronize_rcu_tasks(); 5412 kfree(direct); 5413 kfree(entry); 5414 ftrace_direct_func_count--; 5415 } 5416 } 5417 out_unlock: 5418 mutex_unlock(&direct_mutex); 5419 5420 return ret; 5421 } 5422 EXPORT_SYMBOL_GPL(unregister_ftrace_direct); 5423 5424 static struct ftrace_ops stub_ops = { 5425 .func = ftrace_stub, 5426 }; 5427 5428 /** 5429 * ftrace_modify_direct_caller - modify ftrace nop directly 5430 * @entry: The ftrace hash entry of the direct helper for @rec 5431 * @rec: The record representing the function site to patch 5432 * @old_addr: The location that the site at @rec->ip currently calls 5433 * @new_addr: The location that the site at @rec->ip should call 5434 * 5435 * An architecture may overwrite this function to optimize the 5436 * changing of the direct callback on an ftrace nop location. 5437 * This is called with the ftrace_lock mutex held, and no other 5438 * ftrace callbacks are on the associated record (@rec). Thus, 5439 * it is safe to modify the ftrace record, where it should be 5440 * currently calling @old_addr directly, to call @new_addr. 5441 * 5442 * This is called with direct_mutex locked. 5443 * 5444 * Safety checks should be made to make sure that the code at 5445 * @rec->ip is currently calling @old_addr. And this must 5446 * also update entry->direct to @new_addr. 5447 */ 5448 int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 5449 struct dyn_ftrace *rec, 5450 unsigned long old_addr, 5451 unsigned long new_addr) 5452 { 5453 unsigned long ip = rec->ip; 5454 int ret; 5455 5456 lockdep_assert_held(&direct_mutex); 5457 5458 /* 5459 * The ftrace_lock was used to determine if the record 5460 * had more than one registered user to it. If it did, 5461 * we needed to prevent that from changing to do the quick 5462 * switch. But if it did not (only a direct caller was attached) 5463 * then this function is called. But this function can deal 5464 * with attached callers to the rec that we care about, and 5465 * since this function uses standard ftrace calls that take 5466 * the ftrace_lock mutex, we need to release it. 5467 */ 5468 mutex_unlock(&ftrace_lock); 5469 5470 /* 5471 * By setting a stub function at the same address, we force 5472 * the code to call the iterator and the direct_ops helper. 5473 * This means that @ip does not call the direct call, and 5474 * we can simply modify it. 5475 */ 5476 ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0); 5477 if (ret) 5478 goto out_lock; 5479 5480 ret = register_ftrace_function_nolock(&stub_ops); 5481 if (ret) { 5482 ftrace_set_filter_ip(&stub_ops, ip, 1, 0); 5483 goto out_lock; 5484 } 5485 5486 entry->direct = new_addr; 5487 5488 /* 5489 * By removing the stub, we put back the direct call, calling 5490 * the @new_addr. 5491 */ 5492 unregister_ftrace_function(&stub_ops); 5493 ftrace_set_filter_ip(&stub_ops, ip, 1, 0); 5494 5495 out_lock: 5496 mutex_lock(&ftrace_lock); 5497 5498 return ret; 5499 } 5500 5501 /** 5502 * modify_ftrace_direct - Modify an existing direct call to call something else 5503 * @ip: The instruction pointer to modify 5504 * @old_addr: The address that the current @ip calls directly 5505 * @new_addr: The address that the @ip should call 5506 * 5507 * This modifies a ftrace direct caller at an instruction pointer without 5508 * having to disable it first. The direct call will switch over to the 5509 * @new_addr without missing anything. 5510 * 5511 * Returns: zero on success. Non zero on error, which includes: 5512 * -ENODEV : the @ip given has no direct caller attached 5513 * -EINVAL : the @old_addr does not match the current direct caller 5514 */ 5515 int modify_ftrace_direct(unsigned long ip, 5516 unsigned long old_addr, unsigned long new_addr) 5517 { 5518 struct ftrace_direct_func *direct, *new_direct = NULL; 5519 struct ftrace_func_entry *entry; 5520 struct dyn_ftrace *rec; 5521 int ret = -ENODEV; 5522 5523 mutex_lock(&direct_mutex); 5524 5525 mutex_lock(&ftrace_lock); 5526 5527 ip = ftrace_location(ip); 5528 if (!ip) 5529 goto out_unlock; 5530 5531 entry = find_direct_entry(&ip, &rec); 5532 if (!entry) 5533 goto out_unlock; 5534 5535 ret = -EINVAL; 5536 if (entry->direct != old_addr) 5537 goto out_unlock; 5538 5539 direct = ftrace_find_direct_func(old_addr); 5540 if (WARN_ON(!direct)) 5541 goto out_unlock; 5542 if (direct->count > 1) { 5543 ret = -ENOMEM; 5544 new_direct = ftrace_alloc_direct_func(new_addr); 5545 if (!new_direct) 5546 goto out_unlock; 5547 direct->count--; 5548 new_direct->count++; 5549 } else { 5550 direct->addr = new_addr; 5551 } 5552 5553 /* 5554 * If there's no other ftrace callback on the rec->ip location, 5555 * then it can be changed directly by the architecture. 5556 * If there is another caller, then we just need to change the 5557 * direct caller helper to point to @new_addr. 5558 */ 5559 if (ftrace_rec_count(rec) == 1) { 5560 ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr); 5561 } else { 5562 entry->direct = new_addr; 5563 ret = 0; 5564 } 5565 5566 if (unlikely(ret && new_direct)) { 5567 direct->count++; 5568 list_del_rcu(&new_direct->next); 5569 synchronize_rcu_tasks(); 5570 kfree(new_direct); 5571 ftrace_direct_func_count--; 5572 } 5573 5574 out_unlock: 5575 mutex_unlock(&ftrace_lock); 5576 mutex_unlock(&direct_mutex); 5577 return ret; 5578 } 5579 EXPORT_SYMBOL_GPL(modify_ftrace_direct); 5580 5581 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS) 5582 5583 static int check_direct_multi(struct ftrace_ops *ops) 5584 { 5585 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) 5586 return -EINVAL; 5587 if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS) 5588 return -EINVAL; 5589 return 0; 5590 } 5591 5592 static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr) 5593 { 5594 struct ftrace_func_entry *entry, *del; 5595 int size, i; 5596 5597 size = 1 << hash->size_bits; 5598 for (i = 0; i < size; i++) { 5599 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 5600 del = __ftrace_lookup_ip(direct_functions, entry->ip); 5601 if (del && del->direct == addr) { 5602 remove_hash_entry(direct_functions, del); 5603 kfree(del); 5604 } 5605 } 5606 } 5607 } 5608 5609 /** 5610 * register_ftrace_direct_multi - Call a custom trampoline directly 5611 * for multiple functions registered in @ops 5612 * @ops: The address of the struct ftrace_ops object 5613 * @addr: The address of the trampoline to call at @ops functions 5614 * 5615 * This is used to connect a direct calls to @addr from the nop locations 5616 * of the functions registered in @ops (with by ftrace_set_filter_ip 5617 * function). 5618 * 5619 * The location that it calls (@addr) must be able to handle a direct call, 5620 * and save the parameters of the function being traced, and restore them 5621 * (or inject new ones if needed), before returning. 5622 * 5623 * Returns: 5624 * 0 on success 5625 * -EINVAL - The @ops object was already registered with this call or 5626 * when there are no functions in @ops object. 5627 * -EBUSY - Another direct function is already attached (there can be only one) 5628 * -ENODEV - @ip does not point to a ftrace nop location (or not supported) 5629 * -ENOMEM - There was an allocation failure. 5630 */ 5631 int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5632 { 5633 struct ftrace_hash *hash, *free_hash = NULL; 5634 struct ftrace_func_entry *entry, *new; 5635 int err = -EBUSY, size, i; 5636 5637 if (ops->func || ops->trampoline) 5638 return -EINVAL; 5639 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) 5640 return -EINVAL; 5641 if (ops->flags & FTRACE_OPS_FL_ENABLED) 5642 return -EINVAL; 5643 5644 hash = ops->func_hash->filter_hash; 5645 if (ftrace_hash_empty(hash)) 5646 return -EINVAL; 5647 5648 mutex_lock(&direct_mutex); 5649 5650 /* Make sure requested entries are not already registered.. */ 5651 size = 1 << hash->size_bits; 5652 for (i = 0; i < size; i++) { 5653 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 5654 if (ftrace_find_rec_direct(entry->ip)) 5655 goto out_unlock; 5656 } 5657 } 5658 5659 /* ... and insert them to direct_functions hash. */ 5660 err = -ENOMEM; 5661 for (i = 0; i < size; i++) { 5662 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 5663 new = ftrace_add_rec_direct(entry->ip, addr, &free_hash); 5664 if (!new) 5665 goto out_remove; 5666 entry->direct = addr; 5667 } 5668 } 5669 5670 ops->func = call_direct_funcs; 5671 ops->flags = MULTI_FLAGS; 5672 ops->trampoline = FTRACE_REGS_ADDR; 5673 5674 err = register_ftrace_function_nolock(ops); 5675 5676 out_remove: 5677 if (err) 5678 remove_direct_functions_hash(hash, addr); 5679 5680 out_unlock: 5681 mutex_unlock(&direct_mutex); 5682 5683 if (free_hash) { 5684 synchronize_rcu_tasks(); 5685 free_ftrace_hash(free_hash); 5686 } 5687 return err; 5688 } 5689 EXPORT_SYMBOL_GPL(register_ftrace_direct_multi); 5690 5691 /** 5692 * unregister_ftrace_direct_multi - Remove calls to custom trampoline 5693 * previously registered by register_ftrace_direct_multi for @ops object. 5694 * @ops: The address of the struct ftrace_ops object 5695 * 5696 * This is used to remove a direct calls to @addr from the nop locations 5697 * of the functions registered in @ops (with by ftrace_set_filter_ip 5698 * function). 5699 * 5700 * Returns: 5701 * 0 on success 5702 * -EINVAL - The @ops object was not properly registered. 5703 */ 5704 int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5705 { 5706 struct ftrace_hash *hash = ops->func_hash->filter_hash; 5707 int err; 5708 5709 if (check_direct_multi(ops)) 5710 return -EINVAL; 5711 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 5712 return -EINVAL; 5713 5714 mutex_lock(&direct_mutex); 5715 err = unregister_ftrace_function(ops); 5716 remove_direct_functions_hash(hash, addr); 5717 mutex_unlock(&direct_mutex); 5718 5719 /* cleanup for possible another register call */ 5720 ops->func = NULL; 5721 ops->trampoline = 0; 5722 return err; 5723 } 5724 EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi); 5725 5726 static int 5727 __modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5728 { 5729 struct ftrace_hash *hash; 5730 struct ftrace_func_entry *entry, *iter; 5731 static struct ftrace_ops tmp_ops = { 5732 .func = ftrace_stub, 5733 .flags = FTRACE_OPS_FL_STUB, 5734 }; 5735 int i, size; 5736 int err; 5737 5738 lockdep_assert_held_once(&direct_mutex); 5739 5740 /* Enable the tmp_ops to have the same functions as the direct ops */ 5741 ftrace_ops_init(&tmp_ops); 5742 tmp_ops.func_hash = ops->func_hash; 5743 5744 err = register_ftrace_function_nolock(&tmp_ops); 5745 if (err) 5746 return err; 5747 5748 /* 5749 * Now the ftrace_ops_list_func() is called to do the direct callers. 5750 * We can safely change the direct functions attached to each entry. 5751 */ 5752 mutex_lock(&ftrace_lock); 5753 5754 hash = ops->func_hash->filter_hash; 5755 size = 1 << hash->size_bits; 5756 for (i = 0; i < size; i++) { 5757 hlist_for_each_entry(iter, &hash->buckets[i], hlist) { 5758 entry = __ftrace_lookup_ip(direct_functions, iter->ip); 5759 if (!entry) 5760 continue; 5761 entry->direct = addr; 5762 } 5763 } 5764 5765 mutex_unlock(&ftrace_lock); 5766 5767 /* Removing the tmp_ops will add the updated direct callers to the functions */ 5768 unregister_ftrace_function(&tmp_ops); 5769 5770 return err; 5771 } 5772 5773 /** 5774 * modify_ftrace_direct_multi_nolock - Modify an existing direct 'multi' call 5775 * to call something else 5776 * @ops: The address of the struct ftrace_ops object 5777 * @addr: The address of the new trampoline to call at @ops functions 5778 * 5779 * This is used to unregister currently registered direct caller and 5780 * register new one @addr on functions registered in @ops object. 5781 * 5782 * Note there's window between ftrace_shutdown and ftrace_startup calls 5783 * where there will be no callbacks called. 5784 * 5785 * Caller should already have direct_mutex locked, so we don't lock 5786 * direct_mutex here. 5787 * 5788 * Returns: zero on success. Non zero on error, which includes: 5789 * -EINVAL - The @ops object was not properly registered. 5790 */ 5791 int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr) 5792 { 5793 if (check_direct_multi(ops)) 5794 return -EINVAL; 5795 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 5796 return -EINVAL; 5797 5798 return __modify_ftrace_direct_multi(ops, addr); 5799 } 5800 EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi_nolock); 5801 5802 /** 5803 * modify_ftrace_direct_multi - Modify an existing direct 'multi' call 5804 * to call something else 5805 * @ops: The address of the struct ftrace_ops object 5806 * @addr: The address of the new trampoline to call at @ops functions 5807 * 5808 * This is used to unregister currently registered direct caller and 5809 * register new one @addr on functions registered in @ops object. 5810 * 5811 * Note there's window between ftrace_shutdown and ftrace_startup calls 5812 * where there will be no callbacks called. 5813 * 5814 * Returns: zero on success. Non zero on error, which includes: 5815 * -EINVAL - The @ops object was not properly registered. 5816 */ 5817 int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5818 { 5819 int err; 5820 5821 if (check_direct_multi(ops)) 5822 return -EINVAL; 5823 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 5824 return -EINVAL; 5825 5826 mutex_lock(&direct_mutex); 5827 err = __modify_ftrace_direct_multi(ops, addr); 5828 mutex_unlock(&direct_mutex); 5829 return err; 5830 } 5831 EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi); 5832 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 5833 5834 /** 5835 * ftrace_set_filter_ip - set a function to filter on in ftrace by address 5836 * @ops - the ops to set the filter with 5837 * @ip - the address to add to or remove from the filter. 5838 * @remove - non zero to remove the ip from the filter 5839 * @reset - non zero to reset all filters before applying this filter. 5840 * 5841 * Filters denote which functions should be enabled when tracing is enabled 5842 * If @ip is NULL, it fails to update filter. 5843 */ 5844 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 5845 int remove, int reset) 5846 { 5847 ftrace_ops_init(ops); 5848 return ftrace_set_addr(ops, &ip, 1, remove, reset, 1); 5849 } 5850 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); 5851 5852 /** 5853 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses 5854 * @ops - the ops to set the filter with 5855 * @ips - the array of addresses to add to or remove from the filter. 5856 * @cnt - the number of addresses in @ips 5857 * @remove - non zero to remove ips from the filter 5858 * @reset - non zero to reset all filters before applying this filter. 5859 * 5860 * Filters denote which functions should be enabled when tracing is enabled 5861 * If @ips array or any ip specified within is NULL , it fails to update filter. 5862 */ 5863 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, 5864 unsigned int cnt, int remove, int reset) 5865 { 5866 ftrace_ops_init(ops); 5867 return ftrace_set_addr(ops, ips, cnt, remove, reset, 1); 5868 } 5869 EXPORT_SYMBOL_GPL(ftrace_set_filter_ips); 5870 5871 /** 5872 * ftrace_ops_set_global_filter - setup ops to use global filters 5873 * @ops - the ops which will use the global filters 5874 * 5875 * ftrace users who need global function trace filtering should call this. 5876 * It can set the global filter only if ops were not initialized before. 5877 */ 5878 void ftrace_ops_set_global_filter(struct ftrace_ops *ops) 5879 { 5880 if (ops->flags & FTRACE_OPS_FL_INITIALIZED) 5881 return; 5882 5883 ftrace_ops_init(ops); 5884 ops->func_hash = &global_ops.local_hash; 5885 } 5886 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter); 5887 5888 static int 5889 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 5890 int reset, int enable) 5891 { 5892 return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable); 5893 } 5894 5895 /** 5896 * ftrace_set_filter - set a function to filter on in ftrace 5897 * @ops - the ops to set the filter with 5898 * @buf - the string that holds the function filter text. 5899 * @len - the length of the string. 5900 * @reset - non zero to reset all filters before applying this filter. 5901 * 5902 * Filters denote which functions should be enabled when tracing is enabled. 5903 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 5904 */ 5905 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 5906 int len, int reset) 5907 { 5908 ftrace_ops_init(ops); 5909 return ftrace_set_regex(ops, buf, len, reset, 1); 5910 } 5911 EXPORT_SYMBOL_GPL(ftrace_set_filter); 5912 5913 /** 5914 * ftrace_set_notrace - set a function to not trace in ftrace 5915 * @ops - the ops to set the notrace filter with 5916 * @buf - the string that holds the function notrace text. 5917 * @len - the length of the string. 5918 * @reset - non zero to reset all filters before applying this filter. 5919 * 5920 * Notrace Filters denote which functions should not be enabled when tracing 5921 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 5922 * for tracing. 5923 */ 5924 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 5925 int len, int reset) 5926 { 5927 ftrace_ops_init(ops); 5928 return ftrace_set_regex(ops, buf, len, reset, 0); 5929 } 5930 EXPORT_SYMBOL_GPL(ftrace_set_notrace); 5931 /** 5932 * ftrace_set_global_filter - set a function to filter on with global tracers 5933 * @buf - the string that holds the function filter text. 5934 * @len - the length of the string. 5935 * @reset - non zero to reset all filters before applying this filter. 5936 * 5937 * Filters denote which functions should be enabled when tracing is enabled. 5938 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 5939 */ 5940 void ftrace_set_global_filter(unsigned char *buf, int len, int reset) 5941 { 5942 ftrace_set_regex(&global_ops, buf, len, reset, 1); 5943 } 5944 EXPORT_SYMBOL_GPL(ftrace_set_global_filter); 5945 5946 /** 5947 * ftrace_set_global_notrace - set a function to not trace with global tracers 5948 * @buf - the string that holds the function notrace text. 5949 * @len - the length of the string. 5950 * @reset - non zero to reset all filters before applying this filter. 5951 * 5952 * Notrace Filters denote which functions should not be enabled when tracing 5953 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 5954 * for tracing. 5955 */ 5956 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) 5957 { 5958 ftrace_set_regex(&global_ops, buf, len, reset, 0); 5959 } 5960 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); 5961 5962 /* 5963 * command line interface to allow users to set filters on boot up. 5964 */ 5965 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE 5966 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 5967 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 5968 5969 /* Used by function selftest to not test if filter is set */ 5970 bool ftrace_filter_param __initdata; 5971 5972 static int __init set_ftrace_notrace(char *str) 5973 { 5974 ftrace_filter_param = true; 5975 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 5976 return 1; 5977 } 5978 __setup("ftrace_notrace=", set_ftrace_notrace); 5979 5980 static int __init set_ftrace_filter(char *str) 5981 { 5982 ftrace_filter_param = true; 5983 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 5984 return 1; 5985 } 5986 __setup("ftrace_filter=", set_ftrace_filter); 5987 5988 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5989 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 5990 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 5991 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); 5992 5993 static int __init set_graph_function(char *str) 5994 { 5995 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 5996 return 1; 5997 } 5998 __setup("ftrace_graph_filter=", set_graph_function); 5999 6000 static int __init set_graph_notrace_function(char *str) 6001 { 6002 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); 6003 return 1; 6004 } 6005 __setup("ftrace_graph_notrace=", set_graph_notrace_function); 6006 6007 static int __init set_graph_max_depth_function(char *str) 6008 { 6009 if (!str) 6010 return 0; 6011 fgraph_max_depth = simple_strtoul(str, NULL, 0); 6012 return 1; 6013 } 6014 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function); 6015 6016 static void __init set_ftrace_early_graph(char *buf, int enable) 6017 { 6018 int ret; 6019 char *func; 6020 struct ftrace_hash *hash; 6021 6022 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 6023 if (MEM_FAIL(!hash, "Failed to allocate hash\n")) 6024 return; 6025 6026 while (buf) { 6027 func = strsep(&buf, ","); 6028 /* we allow only one expression at a time */ 6029 ret = ftrace_graph_set_hash(hash, func); 6030 if (ret) 6031 printk(KERN_DEBUG "ftrace: function %s not " 6032 "traceable\n", func); 6033 } 6034 6035 if (enable) 6036 ftrace_graph_hash = hash; 6037 else 6038 ftrace_graph_notrace_hash = hash; 6039 } 6040 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6041 6042 void __init 6043 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) 6044 { 6045 char *func; 6046 6047 ftrace_ops_init(ops); 6048 6049 while (buf) { 6050 func = strsep(&buf, ","); 6051 ftrace_set_regex(ops, func, strlen(func), 0, enable); 6052 } 6053 } 6054 6055 static void __init set_ftrace_early_filters(void) 6056 { 6057 if (ftrace_filter_buf[0]) 6058 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); 6059 if (ftrace_notrace_buf[0]) 6060 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); 6061 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6062 if (ftrace_graph_buf[0]) 6063 set_ftrace_early_graph(ftrace_graph_buf, 1); 6064 if (ftrace_graph_notrace_buf[0]) 6065 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); 6066 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6067 } 6068 6069 int ftrace_regex_release(struct inode *inode, struct file *file) 6070 { 6071 struct seq_file *m = (struct seq_file *)file->private_data; 6072 struct ftrace_iterator *iter; 6073 struct ftrace_hash **orig_hash; 6074 struct trace_parser *parser; 6075 int filter_hash; 6076 6077 if (file->f_mode & FMODE_READ) { 6078 iter = m->private; 6079 seq_release(inode, file); 6080 } else 6081 iter = file->private_data; 6082 6083 parser = &iter->parser; 6084 if (trace_parser_loaded(parser)) { 6085 int enable = !(iter->flags & FTRACE_ITER_NOTRACE); 6086 6087 ftrace_process_regex(iter, parser->buffer, 6088 parser->idx, enable); 6089 } 6090 6091 trace_parser_put(parser); 6092 6093 mutex_lock(&iter->ops->func_hash->regex_lock); 6094 6095 if (file->f_mode & FMODE_WRITE) { 6096 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 6097 6098 if (filter_hash) { 6099 orig_hash = &iter->ops->func_hash->filter_hash; 6100 if (iter->tr) { 6101 if (list_empty(&iter->tr->mod_trace)) 6102 iter->hash->flags &= ~FTRACE_HASH_FL_MOD; 6103 else 6104 iter->hash->flags |= FTRACE_HASH_FL_MOD; 6105 } 6106 } else 6107 orig_hash = &iter->ops->func_hash->notrace_hash; 6108 6109 mutex_lock(&ftrace_lock); 6110 ftrace_hash_move_and_update_ops(iter->ops, orig_hash, 6111 iter->hash, filter_hash); 6112 mutex_unlock(&ftrace_lock); 6113 } else { 6114 /* For read only, the hash is the ops hash */ 6115 iter->hash = NULL; 6116 } 6117 6118 mutex_unlock(&iter->ops->func_hash->regex_lock); 6119 free_ftrace_hash(iter->hash); 6120 if (iter->tr) 6121 trace_array_put(iter->tr); 6122 kfree(iter); 6123 6124 return 0; 6125 } 6126 6127 static const struct file_operations ftrace_avail_fops = { 6128 .open = ftrace_avail_open, 6129 .read = seq_read, 6130 .llseek = seq_lseek, 6131 .release = seq_release_private, 6132 }; 6133 6134 static const struct file_operations ftrace_enabled_fops = { 6135 .open = ftrace_enabled_open, 6136 .read = seq_read, 6137 .llseek = seq_lseek, 6138 .release = seq_release_private, 6139 }; 6140 6141 static const struct file_operations ftrace_filter_fops = { 6142 .open = ftrace_filter_open, 6143 .read = seq_read, 6144 .write = ftrace_filter_write, 6145 .llseek = tracing_lseek, 6146 .release = ftrace_regex_release, 6147 }; 6148 6149 static const struct file_operations ftrace_notrace_fops = { 6150 .open = ftrace_notrace_open, 6151 .read = seq_read, 6152 .write = ftrace_notrace_write, 6153 .llseek = tracing_lseek, 6154 .release = ftrace_regex_release, 6155 }; 6156 6157 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6158 6159 static DEFINE_MUTEX(graph_lock); 6160 6161 struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH; 6162 struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH; 6163 6164 enum graph_filter_type { 6165 GRAPH_FILTER_NOTRACE = 0, 6166 GRAPH_FILTER_FUNCTION, 6167 }; 6168 6169 #define FTRACE_GRAPH_EMPTY ((void *)1) 6170 6171 struct ftrace_graph_data { 6172 struct ftrace_hash *hash; 6173 struct ftrace_func_entry *entry; 6174 int idx; /* for hash table iteration */ 6175 enum graph_filter_type type; 6176 struct ftrace_hash *new_hash; 6177 const struct seq_operations *seq_ops; 6178 struct trace_parser parser; 6179 }; 6180 6181 static void * 6182 __g_next(struct seq_file *m, loff_t *pos) 6183 { 6184 struct ftrace_graph_data *fgd = m->private; 6185 struct ftrace_func_entry *entry = fgd->entry; 6186 struct hlist_head *head; 6187 int i, idx = fgd->idx; 6188 6189 if (*pos >= fgd->hash->count) 6190 return NULL; 6191 6192 if (entry) { 6193 hlist_for_each_entry_continue(entry, hlist) { 6194 fgd->entry = entry; 6195 return entry; 6196 } 6197 6198 idx++; 6199 } 6200 6201 for (i = idx; i < 1 << fgd->hash->size_bits; i++) { 6202 head = &fgd->hash->buckets[i]; 6203 hlist_for_each_entry(entry, head, hlist) { 6204 fgd->entry = entry; 6205 fgd->idx = i; 6206 return entry; 6207 } 6208 } 6209 return NULL; 6210 } 6211 6212 static void * 6213 g_next(struct seq_file *m, void *v, loff_t *pos) 6214 { 6215 (*pos)++; 6216 return __g_next(m, pos); 6217 } 6218 6219 static void *g_start(struct seq_file *m, loff_t *pos) 6220 { 6221 struct ftrace_graph_data *fgd = m->private; 6222 6223 mutex_lock(&graph_lock); 6224 6225 if (fgd->type == GRAPH_FILTER_FUNCTION) 6226 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 6227 lockdep_is_held(&graph_lock)); 6228 else 6229 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 6230 lockdep_is_held(&graph_lock)); 6231 6232 /* Nothing, tell g_show to print all functions are enabled */ 6233 if (ftrace_hash_empty(fgd->hash) && !*pos) 6234 return FTRACE_GRAPH_EMPTY; 6235 6236 fgd->idx = 0; 6237 fgd->entry = NULL; 6238 return __g_next(m, pos); 6239 } 6240 6241 static void g_stop(struct seq_file *m, void *p) 6242 { 6243 mutex_unlock(&graph_lock); 6244 } 6245 6246 static int g_show(struct seq_file *m, void *v) 6247 { 6248 struct ftrace_func_entry *entry = v; 6249 6250 if (!entry) 6251 return 0; 6252 6253 if (entry == FTRACE_GRAPH_EMPTY) { 6254 struct ftrace_graph_data *fgd = m->private; 6255 6256 if (fgd->type == GRAPH_FILTER_FUNCTION) 6257 seq_puts(m, "#### all functions enabled ####\n"); 6258 else 6259 seq_puts(m, "#### no functions disabled ####\n"); 6260 return 0; 6261 } 6262 6263 seq_printf(m, "%ps\n", (void *)entry->ip); 6264 6265 return 0; 6266 } 6267 6268 static const struct seq_operations ftrace_graph_seq_ops = { 6269 .start = g_start, 6270 .next = g_next, 6271 .stop = g_stop, 6272 .show = g_show, 6273 }; 6274 6275 static int 6276 __ftrace_graph_open(struct inode *inode, struct file *file, 6277 struct ftrace_graph_data *fgd) 6278 { 6279 int ret; 6280 struct ftrace_hash *new_hash = NULL; 6281 6282 ret = security_locked_down(LOCKDOWN_TRACEFS); 6283 if (ret) 6284 return ret; 6285 6286 if (file->f_mode & FMODE_WRITE) { 6287 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 6288 6289 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) 6290 return -ENOMEM; 6291 6292 if (file->f_flags & O_TRUNC) 6293 new_hash = alloc_ftrace_hash(size_bits); 6294 else 6295 new_hash = alloc_and_copy_ftrace_hash(size_bits, 6296 fgd->hash); 6297 if (!new_hash) { 6298 ret = -ENOMEM; 6299 goto out; 6300 } 6301 } 6302 6303 if (file->f_mode & FMODE_READ) { 6304 ret = seq_open(file, &ftrace_graph_seq_ops); 6305 if (!ret) { 6306 struct seq_file *m = file->private_data; 6307 m->private = fgd; 6308 } else { 6309 /* Failed */ 6310 free_ftrace_hash(new_hash); 6311 new_hash = NULL; 6312 } 6313 } else 6314 file->private_data = fgd; 6315 6316 out: 6317 if (ret < 0 && file->f_mode & FMODE_WRITE) 6318 trace_parser_put(&fgd->parser); 6319 6320 fgd->new_hash = new_hash; 6321 6322 /* 6323 * All uses of fgd->hash must be taken with the graph_lock 6324 * held. The graph_lock is going to be released, so force 6325 * fgd->hash to be reinitialized when it is taken again. 6326 */ 6327 fgd->hash = NULL; 6328 6329 return ret; 6330 } 6331 6332 static int 6333 ftrace_graph_open(struct inode *inode, struct file *file) 6334 { 6335 struct ftrace_graph_data *fgd; 6336 int ret; 6337 6338 if (unlikely(ftrace_disabled)) 6339 return -ENODEV; 6340 6341 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 6342 if (fgd == NULL) 6343 return -ENOMEM; 6344 6345 mutex_lock(&graph_lock); 6346 6347 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 6348 lockdep_is_held(&graph_lock)); 6349 fgd->type = GRAPH_FILTER_FUNCTION; 6350 fgd->seq_ops = &ftrace_graph_seq_ops; 6351 6352 ret = __ftrace_graph_open(inode, file, fgd); 6353 if (ret < 0) 6354 kfree(fgd); 6355 6356 mutex_unlock(&graph_lock); 6357 return ret; 6358 } 6359 6360 static int 6361 ftrace_graph_notrace_open(struct inode *inode, struct file *file) 6362 { 6363 struct ftrace_graph_data *fgd; 6364 int ret; 6365 6366 if (unlikely(ftrace_disabled)) 6367 return -ENODEV; 6368 6369 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 6370 if (fgd == NULL) 6371 return -ENOMEM; 6372 6373 mutex_lock(&graph_lock); 6374 6375 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 6376 lockdep_is_held(&graph_lock)); 6377 fgd->type = GRAPH_FILTER_NOTRACE; 6378 fgd->seq_ops = &ftrace_graph_seq_ops; 6379 6380 ret = __ftrace_graph_open(inode, file, fgd); 6381 if (ret < 0) 6382 kfree(fgd); 6383 6384 mutex_unlock(&graph_lock); 6385 return ret; 6386 } 6387 6388 static int 6389 ftrace_graph_release(struct inode *inode, struct file *file) 6390 { 6391 struct ftrace_graph_data *fgd; 6392 struct ftrace_hash *old_hash, *new_hash; 6393 struct trace_parser *parser; 6394 int ret = 0; 6395 6396 if (file->f_mode & FMODE_READ) { 6397 struct seq_file *m = file->private_data; 6398 6399 fgd = m->private; 6400 seq_release(inode, file); 6401 } else { 6402 fgd = file->private_data; 6403 } 6404 6405 6406 if (file->f_mode & FMODE_WRITE) { 6407 6408 parser = &fgd->parser; 6409 6410 if (trace_parser_loaded((parser))) { 6411 ret = ftrace_graph_set_hash(fgd->new_hash, 6412 parser->buffer); 6413 } 6414 6415 trace_parser_put(parser); 6416 6417 new_hash = __ftrace_hash_move(fgd->new_hash); 6418 if (!new_hash) { 6419 ret = -ENOMEM; 6420 goto out; 6421 } 6422 6423 mutex_lock(&graph_lock); 6424 6425 if (fgd->type == GRAPH_FILTER_FUNCTION) { 6426 old_hash = rcu_dereference_protected(ftrace_graph_hash, 6427 lockdep_is_held(&graph_lock)); 6428 rcu_assign_pointer(ftrace_graph_hash, new_hash); 6429 } else { 6430 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 6431 lockdep_is_held(&graph_lock)); 6432 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); 6433 } 6434 6435 mutex_unlock(&graph_lock); 6436 6437 /* 6438 * We need to do a hard force of sched synchronization. 6439 * This is because we use preempt_disable() to do RCU, but 6440 * the function tracers can be called where RCU is not watching 6441 * (like before user_exit()). We can not rely on the RCU 6442 * infrastructure to do the synchronization, thus we must do it 6443 * ourselves. 6444 */ 6445 if (old_hash != EMPTY_HASH) 6446 synchronize_rcu_tasks_rude(); 6447 6448 free_ftrace_hash(old_hash); 6449 } 6450 6451 out: 6452 free_ftrace_hash(fgd->new_hash); 6453 kfree(fgd); 6454 6455 return ret; 6456 } 6457 6458 static int 6459 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) 6460 { 6461 struct ftrace_glob func_g; 6462 struct dyn_ftrace *rec; 6463 struct ftrace_page *pg; 6464 struct ftrace_func_entry *entry; 6465 int fail = 1; 6466 int not; 6467 6468 /* decode regex */ 6469 func_g.type = filter_parse_regex(buffer, strlen(buffer), 6470 &func_g.search, ¬); 6471 6472 func_g.len = strlen(func_g.search); 6473 6474 mutex_lock(&ftrace_lock); 6475 6476 if (unlikely(ftrace_disabled)) { 6477 mutex_unlock(&ftrace_lock); 6478 return -ENODEV; 6479 } 6480 6481 do_for_each_ftrace_rec(pg, rec) { 6482 6483 if (rec->flags & FTRACE_FL_DISABLED) 6484 continue; 6485 6486 if (ftrace_match_record(rec, &func_g, NULL, 0)) { 6487 entry = ftrace_lookup_ip(hash, rec->ip); 6488 6489 if (!not) { 6490 fail = 0; 6491 6492 if (entry) 6493 continue; 6494 if (add_hash_entry(hash, rec->ip) < 0) 6495 goto out; 6496 } else { 6497 if (entry) { 6498 free_hash_entry(hash, entry); 6499 fail = 0; 6500 } 6501 } 6502 } 6503 } while_for_each_ftrace_rec(); 6504 out: 6505 mutex_unlock(&ftrace_lock); 6506 6507 if (fail) 6508 return -EINVAL; 6509 6510 return 0; 6511 } 6512 6513 static ssize_t 6514 ftrace_graph_write(struct file *file, const char __user *ubuf, 6515 size_t cnt, loff_t *ppos) 6516 { 6517 ssize_t read, ret = 0; 6518 struct ftrace_graph_data *fgd = file->private_data; 6519 struct trace_parser *parser; 6520 6521 if (!cnt) 6522 return 0; 6523 6524 /* Read mode uses seq functions */ 6525 if (file->f_mode & FMODE_READ) { 6526 struct seq_file *m = file->private_data; 6527 fgd = m->private; 6528 } 6529 6530 parser = &fgd->parser; 6531 6532 read = trace_get_user(parser, ubuf, cnt, ppos); 6533 6534 if (read >= 0 && trace_parser_loaded(parser) && 6535 !trace_parser_cont(parser)) { 6536 6537 ret = ftrace_graph_set_hash(fgd->new_hash, 6538 parser->buffer); 6539 trace_parser_clear(parser); 6540 } 6541 6542 if (!ret) 6543 ret = read; 6544 6545 return ret; 6546 } 6547 6548 static const struct file_operations ftrace_graph_fops = { 6549 .open = ftrace_graph_open, 6550 .read = seq_read, 6551 .write = ftrace_graph_write, 6552 .llseek = tracing_lseek, 6553 .release = ftrace_graph_release, 6554 }; 6555 6556 static const struct file_operations ftrace_graph_notrace_fops = { 6557 .open = ftrace_graph_notrace_open, 6558 .read = seq_read, 6559 .write = ftrace_graph_write, 6560 .llseek = tracing_lseek, 6561 .release = ftrace_graph_release, 6562 }; 6563 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6564 6565 void ftrace_create_filter_files(struct ftrace_ops *ops, 6566 struct dentry *parent) 6567 { 6568 6569 trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent, 6570 ops, &ftrace_filter_fops); 6571 6572 trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent, 6573 ops, &ftrace_notrace_fops); 6574 } 6575 6576 /* 6577 * The name "destroy_filter_files" is really a misnomer. Although 6578 * in the future, it may actually delete the files, but this is 6579 * really intended to make sure the ops passed in are disabled 6580 * and that when this function returns, the caller is free to 6581 * free the ops. 6582 * 6583 * The "destroy" name is only to match the "create" name that this 6584 * should be paired with. 6585 */ 6586 void ftrace_destroy_filter_files(struct ftrace_ops *ops) 6587 { 6588 mutex_lock(&ftrace_lock); 6589 if (ops->flags & FTRACE_OPS_FL_ENABLED) 6590 ftrace_shutdown(ops, 0); 6591 ops->flags |= FTRACE_OPS_FL_DELETED; 6592 ftrace_free_filter(ops); 6593 mutex_unlock(&ftrace_lock); 6594 } 6595 6596 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) 6597 { 6598 6599 trace_create_file("available_filter_functions", TRACE_MODE_READ, 6600 d_tracer, NULL, &ftrace_avail_fops); 6601 6602 trace_create_file("enabled_functions", TRACE_MODE_READ, 6603 d_tracer, NULL, &ftrace_enabled_fops); 6604 6605 ftrace_create_filter_files(&global_ops, d_tracer); 6606 6607 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6608 trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer, 6609 NULL, 6610 &ftrace_graph_fops); 6611 trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer, 6612 NULL, 6613 &ftrace_graph_notrace_fops); 6614 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6615 6616 return 0; 6617 } 6618 6619 static int ftrace_cmp_ips(const void *a, const void *b) 6620 { 6621 const unsigned long *ipa = a; 6622 const unsigned long *ipb = b; 6623 6624 if (*ipa > *ipb) 6625 return 1; 6626 if (*ipa < *ipb) 6627 return -1; 6628 return 0; 6629 } 6630 6631 #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST 6632 static void test_is_sorted(unsigned long *start, unsigned long count) 6633 { 6634 int i; 6635 6636 for (i = 1; i < count; i++) { 6637 if (WARN(start[i - 1] > start[i], 6638 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i, 6639 (void *)start[i - 1], start[i - 1], 6640 (void *)start[i], start[i])) 6641 break; 6642 } 6643 if (i == count) 6644 pr_info("ftrace section at %px sorted properly\n", start); 6645 } 6646 #else 6647 static void test_is_sorted(unsigned long *start, unsigned long count) 6648 { 6649 } 6650 #endif 6651 6652 static int ftrace_process_locs(struct module *mod, 6653 unsigned long *start, 6654 unsigned long *end) 6655 { 6656 struct ftrace_page *start_pg; 6657 struct ftrace_page *pg; 6658 struct dyn_ftrace *rec; 6659 unsigned long count; 6660 unsigned long *p; 6661 unsigned long addr; 6662 unsigned long flags = 0; /* Shut up gcc */ 6663 int ret = -ENOMEM; 6664 6665 count = end - start; 6666 6667 if (!count) 6668 return 0; 6669 6670 /* 6671 * Sorting mcount in vmlinux at build time depend on 6672 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in 6673 * modules can not be sorted at build time. 6674 */ 6675 if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) { 6676 sort(start, count, sizeof(*start), 6677 ftrace_cmp_ips, NULL); 6678 } else { 6679 test_is_sorted(start, count); 6680 } 6681 6682 start_pg = ftrace_allocate_pages(count); 6683 if (!start_pg) 6684 return -ENOMEM; 6685 6686 mutex_lock(&ftrace_lock); 6687 6688 /* 6689 * Core and each module needs their own pages, as 6690 * modules will free them when they are removed. 6691 * Force a new page to be allocated for modules. 6692 */ 6693 if (!mod) { 6694 WARN_ON(ftrace_pages || ftrace_pages_start); 6695 /* First initialization */ 6696 ftrace_pages = ftrace_pages_start = start_pg; 6697 } else { 6698 if (!ftrace_pages) 6699 goto out; 6700 6701 if (WARN_ON(ftrace_pages->next)) { 6702 /* Hmm, we have free pages? */ 6703 while (ftrace_pages->next) 6704 ftrace_pages = ftrace_pages->next; 6705 } 6706 6707 ftrace_pages->next = start_pg; 6708 } 6709 6710 p = start; 6711 pg = start_pg; 6712 while (p < end) { 6713 unsigned long end_offset; 6714 addr = ftrace_call_adjust(*p++); 6715 /* 6716 * Some architecture linkers will pad between 6717 * the different mcount_loc sections of different 6718 * object files to satisfy alignments. 6719 * Skip any NULL pointers. 6720 */ 6721 if (!addr) 6722 continue; 6723 6724 end_offset = (pg->index+1) * sizeof(pg->records[0]); 6725 if (end_offset > PAGE_SIZE << pg->order) { 6726 /* We should have allocated enough */ 6727 if (WARN_ON(!pg->next)) 6728 break; 6729 pg = pg->next; 6730 } 6731 6732 rec = &pg->records[pg->index++]; 6733 rec->ip = addr; 6734 } 6735 6736 /* We should have used all pages */ 6737 WARN_ON(pg->next); 6738 6739 /* Assign the last page to ftrace_pages */ 6740 ftrace_pages = pg; 6741 6742 /* 6743 * We only need to disable interrupts on start up 6744 * because we are modifying code that an interrupt 6745 * may execute, and the modification is not atomic. 6746 * But for modules, nothing runs the code we modify 6747 * until we are finished with it, and there's no 6748 * reason to cause large interrupt latencies while we do it. 6749 */ 6750 if (!mod) 6751 local_irq_save(flags); 6752 ftrace_update_code(mod, start_pg); 6753 if (!mod) 6754 local_irq_restore(flags); 6755 ret = 0; 6756 out: 6757 mutex_unlock(&ftrace_lock); 6758 6759 return ret; 6760 } 6761 6762 struct ftrace_mod_func { 6763 struct list_head list; 6764 char *name; 6765 unsigned long ip; 6766 unsigned int size; 6767 }; 6768 6769 struct ftrace_mod_map { 6770 struct rcu_head rcu; 6771 struct list_head list; 6772 struct module *mod; 6773 unsigned long start_addr; 6774 unsigned long end_addr; 6775 struct list_head funcs; 6776 unsigned int num_funcs; 6777 }; 6778 6779 static int ftrace_get_trampoline_kallsym(unsigned int symnum, 6780 unsigned long *value, char *type, 6781 char *name, char *module_name, 6782 int *exported) 6783 { 6784 struct ftrace_ops *op; 6785 6786 list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) { 6787 if (!op->trampoline || symnum--) 6788 continue; 6789 *value = op->trampoline; 6790 *type = 't'; 6791 strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN); 6792 strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN); 6793 *exported = 0; 6794 return 0; 6795 } 6796 6797 return -ERANGE; 6798 } 6799 6800 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES) 6801 /* 6802 * Check if the current ops references the given ip. 6803 * 6804 * If the ops traces all functions, then it was already accounted for. 6805 * If the ops does not trace the current record function, skip it. 6806 * If the ops ignores the function via notrace filter, skip it. 6807 */ 6808 static bool 6809 ops_references_ip(struct ftrace_ops *ops, unsigned long ip) 6810 { 6811 /* If ops isn't enabled, ignore it */ 6812 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 6813 return false; 6814 6815 /* If ops traces all then it includes this function */ 6816 if (ops_traces_mod(ops)) 6817 return true; 6818 6819 /* The function must be in the filter */ 6820 if (!ftrace_hash_empty(ops->func_hash->filter_hash) && 6821 !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip)) 6822 return false; 6823 6824 /* If in notrace hash, we ignore it too */ 6825 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip)) 6826 return false; 6827 6828 return true; 6829 } 6830 #endif 6831 6832 #ifdef CONFIG_MODULES 6833 6834 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) 6835 6836 static LIST_HEAD(ftrace_mod_maps); 6837 6838 static int referenced_filters(struct dyn_ftrace *rec) 6839 { 6840 struct ftrace_ops *ops; 6841 int cnt = 0; 6842 6843 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { 6844 if (ops_references_ip(ops, rec->ip)) { 6845 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT)) 6846 continue; 6847 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 6848 continue; 6849 cnt++; 6850 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 6851 rec->flags |= FTRACE_FL_REGS; 6852 if (cnt == 1 && ops->trampoline) 6853 rec->flags |= FTRACE_FL_TRAMP; 6854 else 6855 rec->flags &= ~FTRACE_FL_TRAMP; 6856 } 6857 } 6858 6859 return cnt; 6860 } 6861 6862 static void 6863 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) 6864 { 6865 struct ftrace_func_entry *entry; 6866 struct dyn_ftrace *rec; 6867 int i; 6868 6869 if (ftrace_hash_empty(hash)) 6870 return; 6871 6872 for (i = 0; i < pg->index; i++) { 6873 rec = &pg->records[i]; 6874 entry = __ftrace_lookup_ip(hash, rec->ip); 6875 /* 6876 * Do not allow this rec to match again. 6877 * Yeah, it may waste some memory, but will be removed 6878 * if/when the hash is modified again. 6879 */ 6880 if (entry) 6881 entry->ip = 0; 6882 } 6883 } 6884 6885 /* Clear any records from hashes */ 6886 static void clear_mod_from_hashes(struct ftrace_page *pg) 6887 { 6888 struct trace_array *tr; 6889 6890 mutex_lock(&trace_types_lock); 6891 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 6892 if (!tr->ops || !tr->ops->func_hash) 6893 continue; 6894 mutex_lock(&tr->ops->func_hash->regex_lock); 6895 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); 6896 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); 6897 mutex_unlock(&tr->ops->func_hash->regex_lock); 6898 } 6899 mutex_unlock(&trace_types_lock); 6900 } 6901 6902 static void ftrace_free_mod_map(struct rcu_head *rcu) 6903 { 6904 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu); 6905 struct ftrace_mod_func *mod_func; 6906 struct ftrace_mod_func *n; 6907 6908 /* All the contents of mod_map are now not visible to readers */ 6909 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { 6910 kfree(mod_func->name); 6911 list_del(&mod_func->list); 6912 kfree(mod_func); 6913 } 6914 6915 kfree(mod_map); 6916 } 6917 6918 void ftrace_release_mod(struct module *mod) 6919 { 6920 struct ftrace_mod_map *mod_map; 6921 struct ftrace_mod_map *n; 6922 struct dyn_ftrace *rec; 6923 struct ftrace_page **last_pg; 6924 struct ftrace_page *tmp_page = NULL; 6925 struct ftrace_page *pg; 6926 6927 mutex_lock(&ftrace_lock); 6928 6929 if (ftrace_disabled) 6930 goto out_unlock; 6931 6932 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { 6933 if (mod_map->mod == mod) { 6934 list_del_rcu(&mod_map->list); 6935 call_rcu(&mod_map->rcu, ftrace_free_mod_map); 6936 break; 6937 } 6938 } 6939 6940 /* 6941 * Each module has its own ftrace_pages, remove 6942 * them from the list. 6943 */ 6944 last_pg = &ftrace_pages_start; 6945 for (pg = ftrace_pages_start; pg; pg = *last_pg) { 6946 rec = &pg->records[0]; 6947 if (within_module_core(rec->ip, mod) || 6948 within_module_init(rec->ip, mod)) { 6949 /* 6950 * As core pages are first, the first 6951 * page should never be a module page. 6952 */ 6953 if (WARN_ON(pg == ftrace_pages_start)) 6954 goto out_unlock; 6955 6956 /* Check if we are deleting the last page */ 6957 if (pg == ftrace_pages) 6958 ftrace_pages = next_to_ftrace_page(last_pg); 6959 6960 ftrace_update_tot_cnt -= pg->index; 6961 *last_pg = pg->next; 6962 6963 pg->next = tmp_page; 6964 tmp_page = pg; 6965 } else 6966 last_pg = &pg->next; 6967 } 6968 out_unlock: 6969 mutex_unlock(&ftrace_lock); 6970 6971 for (pg = tmp_page; pg; pg = tmp_page) { 6972 6973 /* Needs to be called outside of ftrace_lock */ 6974 clear_mod_from_hashes(pg); 6975 6976 if (pg->records) { 6977 free_pages((unsigned long)pg->records, pg->order); 6978 ftrace_number_of_pages -= 1 << pg->order; 6979 } 6980 tmp_page = pg->next; 6981 kfree(pg); 6982 ftrace_number_of_groups--; 6983 } 6984 } 6985 6986 void ftrace_module_enable(struct module *mod) 6987 { 6988 struct dyn_ftrace *rec; 6989 struct ftrace_page *pg; 6990 6991 mutex_lock(&ftrace_lock); 6992 6993 if (ftrace_disabled) 6994 goto out_unlock; 6995 6996 /* 6997 * If the tracing is enabled, go ahead and enable the record. 6998 * 6999 * The reason not to enable the record immediately is the 7000 * inherent check of ftrace_make_nop/ftrace_make_call for 7001 * correct previous instructions. Making first the NOP 7002 * conversion puts the module to the correct state, thus 7003 * passing the ftrace_make_call check. 7004 * 7005 * We also delay this to after the module code already set the 7006 * text to read-only, as we now need to set it back to read-write 7007 * so that we can modify the text. 7008 */ 7009 if (ftrace_start_up) 7010 ftrace_arch_code_modify_prepare(); 7011 7012 do_for_each_ftrace_rec(pg, rec) { 7013 int cnt; 7014 /* 7015 * do_for_each_ftrace_rec() is a double loop. 7016 * module text shares the pg. If a record is 7017 * not part of this module, then skip this pg, 7018 * which the "break" will do. 7019 */ 7020 if (!within_module_core(rec->ip, mod) && 7021 !within_module_init(rec->ip, mod)) 7022 break; 7023 7024 /* Weak functions should still be ignored */ 7025 if (!test_for_valid_rec(rec)) { 7026 /* Clear all other flags. Should not be enabled anyway */ 7027 rec->flags = FTRACE_FL_DISABLED; 7028 continue; 7029 } 7030 7031 cnt = 0; 7032 7033 /* 7034 * When adding a module, we need to check if tracers are 7035 * currently enabled and if they are, and can trace this record, 7036 * we need to enable the module functions as well as update the 7037 * reference counts for those function records. 7038 */ 7039 if (ftrace_start_up) 7040 cnt += referenced_filters(rec); 7041 7042 rec->flags &= ~FTRACE_FL_DISABLED; 7043 rec->flags += cnt; 7044 7045 if (ftrace_start_up && cnt) { 7046 int failed = __ftrace_replace_code(rec, 1); 7047 if (failed) { 7048 ftrace_bug(failed, rec); 7049 goto out_loop; 7050 } 7051 } 7052 7053 } while_for_each_ftrace_rec(); 7054 7055 out_loop: 7056 if (ftrace_start_up) 7057 ftrace_arch_code_modify_post_process(); 7058 7059 out_unlock: 7060 mutex_unlock(&ftrace_lock); 7061 7062 process_cached_mods(mod->name); 7063 } 7064 7065 void ftrace_module_init(struct module *mod) 7066 { 7067 int ret; 7068 7069 if (ftrace_disabled || !mod->num_ftrace_callsites) 7070 return; 7071 7072 ret = ftrace_process_locs(mod, mod->ftrace_callsites, 7073 mod->ftrace_callsites + mod->num_ftrace_callsites); 7074 if (ret) 7075 pr_warn("ftrace: failed to allocate entries for module '%s' functions\n", 7076 mod->name); 7077 } 7078 7079 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 7080 struct dyn_ftrace *rec) 7081 { 7082 struct ftrace_mod_func *mod_func; 7083 unsigned long symsize; 7084 unsigned long offset; 7085 char str[KSYM_SYMBOL_LEN]; 7086 char *modname; 7087 const char *ret; 7088 7089 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); 7090 if (!ret) 7091 return; 7092 7093 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL); 7094 if (!mod_func) 7095 return; 7096 7097 mod_func->name = kstrdup(str, GFP_KERNEL); 7098 if (!mod_func->name) { 7099 kfree(mod_func); 7100 return; 7101 } 7102 7103 mod_func->ip = rec->ip - offset; 7104 mod_func->size = symsize; 7105 7106 mod_map->num_funcs++; 7107 7108 list_add_rcu(&mod_func->list, &mod_map->funcs); 7109 } 7110 7111 static struct ftrace_mod_map * 7112 allocate_ftrace_mod_map(struct module *mod, 7113 unsigned long start, unsigned long end) 7114 { 7115 struct ftrace_mod_map *mod_map; 7116 7117 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL); 7118 if (!mod_map) 7119 return NULL; 7120 7121 mod_map->mod = mod; 7122 mod_map->start_addr = start; 7123 mod_map->end_addr = end; 7124 mod_map->num_funcs = 0; 7125 7126 INIT_LIST_HEAD_RCU(&mod_map->funcs); 7127 7128 list_add_rcu(&mod_map->list, &ftrace_mod_maps); 7129 7130 return mod_map; 7131 } 7132 7133 static const char * 7134 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map, 7135 unsigned long addr, unsigned long *size, 7136 unsigned long *off, char *sym) 7137 { 7138 struct ftrace_mod_func *found_func = NULL; 7139 struct ftrace_mod_func *mod_func; 7140 7141 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 7142 if (addr >= mod_func->ip && 7143 addr < mod_func->ip + mod_func->size) { 7144 found_func = mod_func; 7145 break; 7146 } 7147 } 7148 7149 if (found_func) { 7150 if (size) 7151 *size = found_func->size; 7152 if (off) 7153 *off = addr - found_func->ip; 7154 if (sym) 7155 strlcpy(sym, found_func->name, KSYM_NAME_LEN); 7156 7157 return found_func->name; 7158 } 7159 7160 return NULL; 7161 } 7162 7163 const char * 7164 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 7165 unsigned long *off, char **modname, char *sym) 7166 { 7167 struct ftrace_mod_map *mod_map; 7168 const char *ret = NULL; 7169 7170 /* mod_map is freed via call_rcu() */ 7171 preempt_disable(); 7172 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 7173 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); 7174 if (ret) { 7175 if (modname) 7176 *modname = mod_map->mod->name; 7177 break; 7178 } 7179 } 7180 preempt_enable(); 7181 7182 return ret; 7183 } 7184 7185 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 7186 char *type, char *name, 7187 char *module_name, int *exported) 7188 { 7189 struct ftrace_mod_map *mod_map; 7190 struct ftrace_mod_func *mod_func; 7191 int ret; 7192 7193 preempt_disable(); 7194 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 7195 7196 if (symnum >= mod_map->num_funcs) { 7197 symnum -= mod_map->num_funcs; 7198 continue; 7199 } 7200 7201 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 7202 if (symnum > 1) { 7203 symnum--; 7204 continue; 7205 } 7206 7207 *value = mod_func->ip; 7208 *type = 'T'; 7209 strlcpy(name, mod_func->name, KSYM_NAME_LEN); 7210 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); 7211 *exported = 1; 7212 preempt_enable(); 7213 return 0; 7214 } 7215 WARN_ON(1); 7216 break; 7217 } 7218 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, 7219 module_name, exported); 7220 preempt_enable(); 7221 return ret; 7222 } 7223 7224 #else 7225 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 7226 struct dyn_ftrace *rec) { } 7227 static inline struct ftrace_mod_map * 7228 allocate_ftrace_mod_map(struct module *mod, 7229 unsigned long start, unsigned long end) 7230 { 7231 return NULL; 7232 } 7233 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 7234 char *type, char *name, char *module_name, 7235 int *exported) 7236 { 7237 int ret; 7238 7239 preempt_disable(); 7240 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, 7241 module_name, exported); 7242 preempt_enable(); 7243 return ret; 7244 } 7245 #endif /* CONFIG_MODULES */ 7246 7247 struct ftrace_init_func { 7248 struct list_head list; 7249 unsigned long ip; 7250 }; 7251 7252 /* Clear any init ips from hashes */ 7253 static void 7254 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) 7255 { 7256 struct ftrace_func_entry *entry; 7257 7258 entry = ftrace_lookup_ip(hash, func->ip); 7259 /* 7260 * Do not allow this rec to match again. 7261 * Yeah, it may waste some memory, but will be removed 7262 * if/when the hash is modified again. 7263 */ 7264 if (entry) 7265 entry->ip = 0; 7266 } 7267 7268 static void 7269 clear_func_from_hashes(struct ftrace_init_func *func) 7270 { 7271 struct trace_array *tr; 7272 7273 mutex_lock(&trace_types_lock); 7274 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 7275 if (!tr->ops || !tr->ops->func_hash) 7276 continue; 7277 mutex_lock(&tr->ops->func_hash->regex_lock); 7278 clear_func_from_hash(func, tr->ops->func_hash->filter_hash); 7279 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); 7280 mutex_unlock(&tr->ops->func_hash->regex_lock); 7281 } 7282 mutex_unlock(&trace_types_lock); 7283 } 7284 7285 static void add_to_clear_hash_list(struct list_head *clear_list, 7286 struct dyn_ftrace *rec) 7287 { 7288 struct ftrace_init_func *func; 7289 7290 func = kmalloc(sizeof(*func), GFP_KERNEL); 7291 if (!func) { 7292 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); 7293 return; 7294 } 7295 7296 func->ip = rec->ip; 7297 list_add(&func->list, clear_list); 7298 } 7299 7300 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) 7301 { 7302 unsigned long start = (unsigned long)(start_ptr); 7303 unsigned long end = (unsigned long)(end_ptr); 7304 struct ftrace_page **last_pg = &ftrace_pages_start; 7305 struct ftrace_page *pg; 7306 struct dyn_ftrace *rec; 7307 struct dyn_ftrace key; 7308 struct ftrace_mod_map *mod_map = NULL; 7309 struct ftrace_init_func *func, *func_next; 7310 struct list_head clear_hash; 7311 7312 INIT_LIST_HEAD(&clear_hash); 7313 7314 key.ip = start; 7315 key.flags = end; /* overload flags, as it is unsigned long */ 7316 7317 mutex_lock(&ftrace_lock); 7318 7319 /* 7320 * If we are freeing module init memory, then check if 7321 * any tracer is active. If so, we need to save a mapping of 7322 * the module functions being freed with the address. 7323 */ 7324 if (mod && ftrace_ops_list != &ftrace_list_end) 7325 mod_map = allocate_ftrace_mod_map(mod, start, end); 7326 7327 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { 7328 if (end < pg->records[0].ip || 7329 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 7330 continue; 7331 again: 7332 rec = bsearch(&key, pg->records, pg->index, 7333 sizeof(struct dyn_ftrace), 7334 ftrace_cmp_recs); 7335 if (!rec) 7336 continue; 7337 7338 /* rec will be cleared from hashes after ftrace_lock unlock */ 7339 add_to_clear_hash_list(&clear_hash, rec); 7340 7341 if (mod_map) 7342 save_ftrace_mod_rec(mod_map, rec); 7343 7344 pg->index--; 7345 ftrace_update_tot_cnt--; 7346 if (!pg->index) { 7347 *last_pg = pg->next; 7348 if (pg->records) { 7349 free_pages((unsigned long)pg->records, pg->order); 7350 ftrace_number_of_pages -= 1 << pg->order; 7351 } 7352 ftrace_number_of_groups--; 7353 kfree(pg); 7354 pg = container_of(last_pg, struct ftrace_page, next); 7355 if (!(*last_pg)) 7356 ftrace_pages = pg; 7357 continue; 7358 } 7359 memmove(rec, rec + 1, 7360 (pg->index - (rec - pg->records)) * sizeof(*rec)); 7361 /* More than one function may be in this block */ 7362 goto again; 7363 } 7364 mutex_unlock(&ftrace_lock); 7365 7366 list_for_each_entry_safe(func, func_next, &clear_hash, list) { 7367 clear_func_from_hashes(func); 7368 kfree(func); 7369 } 7370 } 7371 7372 void __init ftrace_free_init_mem(void) 7373 { 7374 void *start = (void *)(&__init_begin); 7375 void *end = (void *)(&__init_end); 7376 7377 ftrace_boot_snapshot(); 7378 7379 ftrace_free_mem(NULL, start, end); 7380 } 7381 7382 int __init __weak ftrace_dyn_arch_init(void) 7383 { 7384 return 0; 7385 } 7386 7387 void __init ftrace_init(void) 7388 { 7389 extern unsigned long __start_mcount_loc[]; 7390 extern unsigned long __stop_mcount_loc[]; 7391 unsigned long count, flags; 7392 int ret; 7393 7394 local_irq_save(flags); 7395 ret = ftrace_dyn_arch_init(); 7396 local_irq_restore(flags); 7397 if (ret) 7398 goto failed; 7399 7400 count = __stop_mcount_loc - __start_mcount_loc; 7401 if (!count) { 7402 pr_info("ftrace: No functions to be traced?\n"); 7403 goto failed; 7404 } 7405 7406 pr_info("ftrace: allocating %ld entries in %ld pages\n", 7407 count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); 7408 7409 ret = ftrace_process_locs(NULL, 7410 __start_mcount_loc, 7411 __stop_mcount_loc); 7412 if (ret) { 7413 pr_warn("ftrace: failed to allocate entries for functions\n"); 7414 goto failed; 7415 } 7416 7417 pr_info("ftrace: allocated %ld pages with %ld groups\n", 7418 ftrace_number_of_pages, ftrace_number_of_groups); 7419 7420 last_ftrace_enabled = ftrace_enabled = 1; 7421 7422 set_ftrace_early_filters(); 7423 7424 return; 7425 failed: 7426 ftrace_disabled = 1; 7427 } 7428 7429 /* Do nothing if arch does not support this */ 7430 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) 7431 { 7432 } 7433 7434 static void ftrace_update_trampoline(struct ftrace_ops *ops) 7435 { 7436 unsigned long trampoline = ops->trampoline; 7437 7438 arch_ftrace_update_trampoline(ops); 7439 if (ops->trampoline && ops->trampoline != trampoline && 7440 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { 7441 /* Add to kallsyms before the perf events */ 7442 ftrace_add_trampoline_to_kallsyms(ops); 7443 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, 7444 ops->trampoline, ops->trampoline_size, false, 7445 FTRACE_TRAMPOLINE_SYM); 7446 /* 7447 * Record the perf text poke event after the ksymbol register 7448 * event. 7449 */ 7450 perf_event_text_poke((void *)ops->trampoline, NULL, 0, 7451 (void *)ops->trampoline, 7452 ops->trampoline_size); 7453 } 7454 } 7455 7456 void ftrace_init_trace_array(struct trace_array *tr) 7457 { 7458 INIT_LIST_HEAD(&tr->func_probes); 7459 INIT_LIST_HEAD(&tr->mod_trace); 7460 INIT_LIST_HEAD(&tr->mod_notrace); 7461 } 7462 #else 7463 7464 struct ftrace_ops global_ops = { 7465 .func = ftrace_stub, 7466 .flags = FTRACE_OPS_FL_INITIALIZED | 7467 FTRACE_OPS_FL_PID, 7468 }; 7469 7470 static int __init ftrace_nodyn_init(void) 7471 { 7472 ftrace_enabled = 1; 7473 return 0; 7474 } 7475 core_initcall(ftrace_nodyn_init); 7476 7477 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } 7478 static inline void ftrace_startup_all(int command) { } 7479 7480 static void ftrace_update_trampoline(struct ftrace_ops *ops) 7481 { 7482 } 7483 7484 #endif /* CONFIG_DYNAMIC_FTRACE */ 7485 7486 __init void ftrace_init_global_array_ops(struct trace_array *tr) 7487 { 7488 tr->ops = &global_ops; 7489 tr->ops->private = tr; 7490 ftrace_init_trace_array(tr); 7491 } 7492 7493 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) 7494 { 7495 /* If we filter on pids, update to use the pid function */ 7496 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { 7497 if (WARN_ON(tr->ops->func != ftrace_stub)) 7498 printk("ftrace ops had %pS for function\n", 7499 tr->ops->func); 7500 } 7501 tr->ops->func = func; 7502 tr->ops->private = tr; 7503 } 7504 7505 void ftrace_reset_array_ops(struct trace_array *tr) 7506 { 7507 tr->ops->func = ftrace_stub; 7508 } 7509 7510 static nokprobe_inline void 7511 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 7512 struct ftrace_ops *ignored, struct ftrace_regs *fregs) 7513 { 7514 struct pt_regs *regs = ftrace_get_regs(fregs); 7515 struct ftrace_ops *op; 7516 int bit; 7517 7518 /* 7519 * The ftrace_test_and_set_recursion() will disable preemption, 7520 * which is required since some of the ops may be dynamically 7521 * allocated, they must be freed after a synchronize_rcu(). 7522 */ 7523 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); 7524 if (bit < 0) 7525 return; 7526 7527 do_for_each_ftrace_op(op, ftrace_ops_list) { 7528 /* Stub functions don't need to be called nor tested */ 7529 if (op->flags & FTRACE_OPS_FL_STUB) 7530 continue; 7531 /* 7532 * Check the following for each ops before calling their func: 7533 * if RCU flag is set, then rcu_is_watching() must be true 7534 * Otherwise test if the ip matches the ops filter 7535 * 7536 * If any of the above fails then the op->func() is not executed. 7537 */ 7538 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && 7539 ftrace_ops_test(op, ip, regs)) { 7540 if (FTRACE_WARN_ON(!op->func)) { 7541 pr_warn("op=%p %pS\n", op, op); 7542 goto out; 7543 } 7544 op->func(ip, parent_ip, op, fregs); 7545 } 7546 } while_for_each_ftrace_op(op); 7547 out: 7548 trace_clear_recursion(bit); 7549 } 7550 7551 /* 7552 * Some archs only support passing ip and parent_ip. Even though 7553 * the list function ignores the op parameter, we do not want any 7554 * C side effects, where a function is called without the caller 7555 * sending a third parameter. 7556 * Archs are to support both the regs and ftrace_ops at the same time. 7557 * If they support ftrace_ops, it is assumed they support regs. 7558 * If call backs want to use regs, they must either check for regs 7559 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. 7560 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. 7561 * An architecture can pass partial regs with ftrace_ops and still 7562 * set the ARCH_SUPPORTS_FTRACE_OPS. 7563 * 7564 * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be 7565 * arch_ftrace_ops_list_func. 7566 */ 7567 #if ARCH_SUPPORTS_FTRACE_OPS 7568 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 7569 struct ftrace_ops *op, struct ftrace_regs *fregs) 7570 { 7571 __ftrace_ops_list_func(ip, parent_ip, NULL, fregs); 7572 } 7573 #else 7574 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) 7575 { 7576 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); 7577 } 7578 #endif 7579 NOKPROBE_SYMBOL(arch_ftrace_ops_list_func); 7580 7581 /* 7582 * If there's only one function registered but it does not support 7583 * recursion, needs RCU protection, then this function will be called 7584 * by the mcount trampoline. 7585 */ 7586 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, 7587 struct ftrace_ops *op, struct ftrace_regs *fregs) 7588 { 7589 int bit; 7590 7591 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); 7592 if (bit < 0) 7593 return; 7594 7595 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) 7596 op->func(ip, parent_ip, op, fregs); 7597 7598 trace_clear_recursion(bit); 7599 } 7600 NOKPROBE_SYMBOL(ftrace_ops_assist_func); 7601 7602 /** 7603 * ftrace_ops_get_func - get the function a trampoline should call 7604 * @ops: the ops to get the function for 7605 * 7606 * Normally the mcount trampoline will call the ops->func, but there 7607 * are times that it should not. For example, if the ops does not 7608 * have its own recursion protection, then it should call the 7609 * ftrace_ops_assist_func() instead. 7610 * 7611 * Returns the function that the trampoline should call for @ops. 7612 */ 7613 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) 7614 { 7615 /* 7616 * If the function does not handle recursion or needs to be RCU safe, 7617 * then we need to call the assist handler. 7618 */ 7619 if (ops->flags & (FTRACE_OPS_FL_RECURSION | 7620 FTRACE_OPS_FL_RCU)) 7621 return ftrace_ops_assist_func; 7622 7623 return ops->func; 7624 } 7625 7626 static void 7627 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, 7628 struct task_struct *prev, 7629 struct task_struct *next, 7630 unsigned int prev_state) 7631 { 7632 struct trace_array *tr = data; 7633 struct trace_pid_list *pid_list; 7634 struct trace_pid_list *no_pid_list; 7635 7636 pid_list = rcu_dereference_sched(tr->function_pids); 7637 no_pid_list = rcu_dereference_sched(tr->function_no_pids); 7638 7639 if (trace_ignore_this_task(pid_list, no_pid_list, next)) 7640 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 7641 FTRACE_PID_IGNORE); 7642 else 7643 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 7644 next->pid); 7645 } 7646 7647 static void 7648 ftrace_pid_follow_sched_process_fork(void *data, 7649 struct task_struct *self, 7650 struct task_struct *task) 7651 { 7652 struct trace_pid_list *pid_list; 7653 struct trace_array *tr = data; 7654 7655 pid_list = rcu_dereference_sched(tr->function_pids); 7656 trace_filter_add_remove_task(pid_list, self, task); 7657 7658 pid_list = rcu_dereference_sched(tr->function_no_pids); 7659 trace_filter_add_remove_task(pid_list, self, task); 7660 } 7661 7662 static void 7663 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) 7664 { 7665 struct trace_pid_list *pid_list; 7666 struct trace_array *tr = data; 7667 7668 pid_list = rcu_dereference_sched(tr->function_pids); 7669 trace_filter_add_remove_task(pid_list, NULL, task); 7670 7671 pid_list = rcu_dereference_sched(tr->function_no_pids); 7672 trace_filter_add_remove_task(pid_list, NULL, task); 7673 } 7674 7675 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) 7676 { 7677 if (enable) { 7678 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 7679 tr); 7680 register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, 7681 tr); 7682 } else { 7683 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 7684 tr); 7685 unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, 7686 tr); 7687 } 7688 } 7689 7690 static void clear_ftrace_pids(struct trace_array *tr, int type) 7691 { 7692 struct trace_pid_list *pid_list; 7693 struct trace_pid_list *no_pid_list; 7694 int cpu; 7695 7696 pid_list = rcu_dereference_protected(tr->function_pids, 7697 lockdep_is_held(&ftrace_lock)); 7698 no_pid_list = rcu_dereference_protected(tr->function_no_pids, 7699 lockdep_is_held(&ftrace_lock)); 7700 7701 /* Make sure there's something to do */ 7702 if (!pid_type_enabled(type, pid_list, no_pid_list)) 7703 return; 7704 7705 /* See if the pids still need to be checked after this */ 7706 if (!still_need_pid_events(type, pid_list, no_pid_list)) { 7707 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 7708 for_each_possible_cpu(cpu) 7709 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE; 7710 } 7711 7712 if (type & TRACE_PIDS) 7713 rcu_assign_pointer(tr->function_pids, NULL); 7714 7715 if (type & TRACE_NO_PIDS) 7716 rcu_assign_pointer(tr->function_no_pids, NULL); 7717 7718 /* Wait till all users are no longer using pid filtering */ 7719 synchronize_rcu(); 7720 7721 if ((type & TRACE_PIDS) && pid_list) 7722 trace_pid_list_free(pid_list); 7723 7724 if ((type & TRACE_NO_PIDS) && no_pid_list) 7725 trace_pid_list_free(no_pid_list); 7726 } 7727 7728 void ftrace_clear_pids(struct trace_array *tr) 7729 { 7730 mutex_lock(&ftrace_lock); 7731 7732 clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS); 7733 7734 mutex_unlock(&ftrace_lock); 7735 } 7736 7737 static void ftrace_pid_reset(struct trace_array *tr, int type) 7738 { 7739 mutex_lock(&ftrace_lock); 7740 clear_ftrace_pids(tr, type); 7741 7742 ftrace_update_pid_func(); 7743 ftrace_startup_all(0); 7744 7745 mutex_unlock(&ftrace_lock); 7746 } 7747 7748 /* Greater than any max PID */ 7749 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) 7750 7751 static void *fpid_start(struct seq_file *m, loff_t *pos) 7752 __acquires(RCU) 7753 { 7754 struct trace_pid_list *pid_list; 7755 struct trace_array *tr = m->private; 7756 7757 mutex_lock(&ftrace_lock); 7758 rcu_read_lock_sched(); 7759 7760 pid_list = rcu_dereference_sched(tr->function_pids); 7761 7762 if (!pid_list) 7763 return !(*pos) ? FTRACE_NO_PIDS : NULL; 7764 7765 return trace_pid_start(pid_list, pos); 7766 } 7767 7768 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) 7769 { 7770 struct trace_array *tr = m->private; 7771 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); 7772 7773 if (v == FTRACE_NO_PIDS) { 7774 (*pos)++; 7775 return NULL; 7776 } 7777 return trace_pid_next(pid_list, v, pos); 7778 } 7779 7780 static void fpid_stop(struct seq_file *m, void *p) 7781 __releases(RCU) 7782 { 7783 rcu_read_unlock_sched(); 7784 mutex_unlock(&ftrace_lock); 7785 } 7786 7787 static int fpid_show(struct seq_file *m, void *v) 7788 { 7789 if (v == FTRACE_NO_PIDS) { 7790 seq_puts(m, "no pid\n"); 7791 return 0; 7792 } 7793 7794 return trace_pid_show(m, v); 7795 } 7796 7797 static const struct seq_operations ftrace_pid_sops = { 7798 .start = fpid_start, 7799 .next = fpid_next, 7800 .stop = fpid_stop, 7801 .show = fpid_show, 7802 }; 7803 7804 static void *fnpid_start(struct seq_file *m, loff_t *pos) 7805 __acquires(RCU) 7806 { 7807 struct trace_pid_list *pid_list; 7808 struct trace_array *tr = m->private; 7809 7810 mutex_lock(&ftrace_lock); 7811 rcu_read_lock_sched(); 7812 7813 pid_list = rcu_dereference_sched(tr->function_no_pids); 7814 7815 if (!pid_list) 7816 return !(*pos) ? FTRACE_NO_PIDS : NULL; 7817 7818 return trace_pid_start(pid_list, pos); 7819 } 7820 7821 static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos) 7822 { 7823 struct trace_array *tr = m->private; 7824 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids); 7825 7826 if (v == FTRACE_NO_PIDS) { 7827 (*pos)++; 7828 return NULL; 7829 } 7830 return trace_pid_next(pid_list, v, pos); 7831 } 7832 7833 static const struct seq_operations ftrace_no_pid_sops = { 7834 .start = fnpid_start, 7835 .next = fnpid_next, 7836 .stop = fpid_stop, 7837 .show = fpid_show, 7838 }; 7839 7840 static int pid_open(struct inode *inode, struct file *file, int type) 7841 { 7842 const struct seq_operations *seq_ops; 7843 struct trace_array *tr = inode->i_private; 7844 struct seq_file *m; 7845 int ret = 0; 7846 7847 ret = tracing_check_open_get_tr(tr); 7848 if (ret) 7849 return ret; 7850 7851 if ((file->f_mode & FMODE_WRITE) && 7852 (file->f_flags & O_TRUNC)) 7853 ftrace_pid_reset(tr, type); 7854 7855 switch (type) { 7856 case TRACE_PIDS: 7857 seq_ops = &ftrace_pid_sops; 7858 break; 7859 case TRACE_NO_PIDS: 7860 seq_ops = &ftrace_no_pid_sops; 7861 break; 7862 default: 7863 trace_array_put(tr); 7864 WARN_ON_ONCE(1); 7865 return -EINVAL; 7866 } 7867 7868 ret = seq_open(file, seq_ops); 7869 if (ret < 0) { 7870 trace_array_put(tr); 7871 } else { 7872 m = file->private_data; 7873 /* copy tr over to seq ops */ 7874 m->private = tr; 7875 } 7876 7877 return ret; 7878 } 7879 7880 static int 7881 ftrace_pid_open(struct inode *inode, struct file *file) 7882 { 7883 return pid_open(inode, file, TRACE_PIDS); 7884 } 7885 7886 static int 7887 ftrace_no_pid_open(struct inode *inode, struct file *file) 7888 { 7889 return pid_open(inode, file, TRACE_NO_PIDS); 7890 } 7891 7892 static void ignore_task_cpu(void *data) 7893 { 7894 struct trace_array *tr = data; 7895 struct trace_pid_list *pid_list; 7896 struct trace_pid_list *no_pid_list; 7897 7898 /* 7899 * This function is called by on_each_cpu() while the 7900 * event_mutex is held. 7901 */ 7902 pid_list = rcu_dereference_protected(tr->function_pids, 7903 mutex_is_locked(&ftrace_lock)); 7904 no_pid_list = rcu_dereference_protected(tr->function_no_pids, 7905 mutex_is_locked(&ftrace_lock)); 7906 7907 if (trace_ignore_this_task(pid_list, no_pid_list, current)) 7908 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 7909 FTRACE_PID_IGNORE); 7910 else 7911 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 7912 current->pid); 7913 } 7914 7915 static ssize_t 7916 pid_write(struct file *filp, const char __user *ubuf, 7917 size_t cnt, loff_t *ppos, int type) 7918 { 7919 struct seq_file *m = filp->private_data; 7920 struct trace_array *tr = m->private; 7921 struct trace_pid_list *filtered_pids; 7922 struct trace_pid_list *other_pids; 7923 struct trace_pid_list *pid_list; 7924 ssize_t ret; 7925 7926 if (!cnt) 7927 return 0; 7928 7929 mutex_lock(&ftrace_lock); 7930 7931 switch (type) { 7932 case TRACE_PIDS: 7933 filtered_pids = rcu_dereference_protected(tr->function_pids, 7934 lockdep_is_held(&ftrace_lock)); 7935 other_pids = rcu_dereference_protected(tr->function_no_pids, 7936 lockdep_is_held(&ftrace_lock)); 7937 break; 7938 case TRACE_NO_PIDS: 7939 filtered_pids = rcu_dereference_protected(tr->function_no_pids, 7940 lockdep_is_held(&ftrace_lock)); 7941 other_pids = rcu_dereference_protected(tr->function_pids, 7942 lockdep_is_held(&ftrace_lock)); 7943 break; 7944 default: 7945 ret = -EINVAL; 7946 WARN_ON_ONCE(1); 7947 goto out; 7948 } 7949 7950 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 7951 if (ret < 0) 7952 goto out; 7953 7954 switch (type) { 7955 case TRACE_PIDS: 7956 rcu_assign_pointer(tr->function_pids, pid_list); 7957 break; 7958 case TRACE_NO_PIDS: 7959 rcu_assign_pointer(tr->function_no_pids, pid_list); 7960 break; 7961 } 7962 7963 7964 if (filtered_pids) { 7965 synchronize_rcu(); 7966 trace_pid_list_free(filtered_pids); 7967 } else if (pid_list && !other_pids) { 7968 /* Register a probe to set whether to ignore the tracing of a task */ 7969 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 7970 } 7971 7972 /* 7973 * Ignoring of pids is done at task switch. But we have to 7974 * check for those tasks that are currently running. 7975 * Always do this in case a pid was appended or removed. 7976 */ 7977 on_each_cpu(ignore_task_cpu, tr, 1); 7978 7979 ftrace_update_pid_func(); 7980 ftrace_startup_all(0); 7981 out: 7982 mutex_unlock(&ftrace_lock); 7983 7984 if (ret > 0) 7985 *ppos += ret; 7986 7987 return ret; 7988 } 7989 7990 static ssize_t 7991 ftrace_pid_write(struct file *filp, const char __user *ubuf, 7992 size_t cnt, loff_t *ppos) 7993 { 7994 return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS); 7995 } 7996 7997 static ssize_t 7998 ftrace_no_pid_write(struct file *filp, const char __user *ubuf, 7999 size_t cnt, loff_t *ppos) 8000 { 8001 return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS); 8002 } 8003 8004 static int 8005 ftrace_pid_release(struct inode *inode, struct file *file) 8006 { 8007 struct trace_array *tr = inode->i_private; 8008 8009 trace_array_put(tr); 8010 8011 return seq_release(inode, file); 8012 } 8013 8014 static const struct file_operations ftrace_pid_fops = { 8015 .open = ftrace_pid_open, 8016 .write = ftrace_pid_write, 8017 .read = seq_read, 8018 .llseek = tracing_lseek, 8019 .release = ftrace_pid_release, 8020 }; 8021 8022 static const struct file_operations ftrace_no_pid_fops = { 8023 .open = ftrace_no_pid_open, 8024 .write = ftrace_no_pid_write, 8025 .read = seq_read, 8026 .llseek = tracing_lseek, 8027 .release = ftrace_pid_release, 8028 }; 8029 8030 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) 8031 { 8032 trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer, 8033 tr, &ftrace_pid_fops); 8034 trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE, 8035 d_tracer, tr, &ftrace_no_pid_fops); 8036 } 8037 8038 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, 8039 struct dentry *d_tracer) 8040 { 8041 /* Only the top level directory has the dyn_tracefs and profile */ 8042 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 8043 8044 ftrace_init_dyn_tracefs(d_tracer); 8045 ftrace_profile_tracefs(d_tracer); 8046 } 8047 8048 /** 8049 * ftrace_kill - kill ftrace 8050 * 8051 * This function should be used by panic code. It stops ftrace 8052 * but in a not so nice way. If you need to simply kill ftrace 8053 * from a non-atomic section, use ftrace_kill. 8054 */ 8055 void ftrace_kill(void) 8056 { 8057 ftrace_disabled = 1; 8058 ftrace_enabled = 0; 8059 ftrace_trace_function = ftrace_stub; 8060 } 8061 8062 /** 8063 * ftrace_is_dead - Test if ftrace is dead or not. 8064 * 8065 * Returns 1 if ftrace is "dead", zero otherwise. 8066 */ 8067 int ftrace_is_dead(void) 8068 { 8069 return ftrace_disabled; 8070 } 8071 8072 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 8073 /* 8074 * When registering ftrace_ops with IPMODIFY, it is necessary to make sure 8075 * it doesn't conflict with any direct ftrace_ops. If there is existing 8076 * direct ftrace_ops on a kernel function being patched, call 8077 * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing. 8078 * 8079 * @ops: ftrace_ops being registered. 8080 * 8081 * Returns: 8082 * 0 on success; 8083 * Negative on failure. 8084 */ 8085 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) 8086 { 8087 struct ftrace_func_entry *entry; 8088 struct ftrace_hash *hash; 8089 struct ftrace_ops *op; 8090 int size, i, ret; 8091 8092 lockdep_assert_held_once(&direct_mutex); 8093 8094 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 8095 return 0; 8096 8097 hash = ops->func_hash->filter_hash; 8098 size = 1 << hash->size_bits; 8099 for (i = 0; i < size; i++) { 8100 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 8101 unsigned long ip = entry->ip; 8102 bool found_op = false; 8103 8104 mutex_lock(&ftrace_lock); 8105 do_for_each_ftrace_op(op, ftrace_ops_list) { 8106 if (!(op->flags & FTRACE_OPS_FL_DIRECT)) 8107 continue; 8108 if (ops_references_ip(op, ip)) { 8109 found_op = true; 8110 break; 8111 } 8112 } while_for_each_ftrace_op(op); 8113 mutex_unlock(&ftrace_lock); 8114 8115 if (found_op) { 8116 if (!op->ops_func) 8117 return -EBUSY; 8118 8119 ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER); 8120 if (ret) 8121 return ret; 8122 } 8123 } 8124 } 8125 8126 return 0; 8127 } 8128 8129 /* 8130 * Similar to prepare_direct_functions_for_ipmodify, clean up after ops 8131 * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT 8132 * ops. 8133 */ 8134 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) 8135 { 8136 struct ftrace_func_entry *entry; 8137 struct ftrace_hash *hash; 8138 struct ftrace_ops *op; 8139 int size, i; 8140 8141 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 8142 return; 8143 8144 mutex_lock(&direct_mutex); 8145 8146 hash = ops->func_hash->filter_hash; 8147 size = 1 << hash->size_bits; 8148 for (i = 0; i < size; i++) { 8149 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 8150 unsigned long ip = entry->ip; 8151 bool found_op = false; 8152 8153 mutex_lock(&ftrace_lock); 8154 do_for_each_ftrace_op(op, ftrace_ops_list) { 8155 if (!(op->flags & FTRACE_OPS_FL_DIRECT)) 8156 continue; 8157 if (ops_references_ip(op, ip)) { 8158 found_op = true; 8159 break; 8160 } 8161 } while_for_each_ftrace_op(op); 8162 mutex_unlock(&ftrace_lock); 8163 8164 /* The cleanup is optional, ignore any errors */ 8165 if (found_op && op->ops_func) 8166 op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER); 8167 } 8168 } 8169 mutex_unlock(&direct_mutex); 8170 } 8171 8172 #define lock_direct_mutex() mutex_lock(&direct_mutex) 8173 #define unlock_direct_mutex() mutex_unlock(&direct_mutex) 8174 8175 #else /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 8176 8177 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) 8178 { 8179 return 0; 8180 } 8181 8182 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) 8183 { 8184 } 8185 8186 #define lock_direct_mutex() do { } while (0) 8187 #define unlock_direct_mutex() do { } while (0) 8188 8189 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 8190 8191 /* 8192 * Similar to register_ftrace_function, except we don't lock direct_mutex. 8193 */ 8194 static int register_ftrace_function_nolock(struct ftrace_ops *ops) 8195 { 8196 int ret; 8197 8198 ftrace_ops_init(ops); 8199 8200 mutex_lock(&ftrace_lock); 8201 8202 ret = ftrace_startup(ops, 0); 8203 8204 mutex_unlock(&ftrace_lock); 8205 8206 return ret; 8207 } 8208 8209 /** 8210 * register_ftrace_function - register a function for profiling 8211 * @ops: ops structure that holds the function for profiling. 8212 * 8213 * Register a function to be called by all functions in the 8214 * kernel. 8215 * 8216 * Note: @ops->func and all the functions it calls must be labeled 8217 * with "notrace", otherwise it will go into a 8218 * recursive loop. 8219 */ 8220 int register_ftrace_function(struct ftrace_ops *ops) 8221 { 8222 int ret; 8223 8224 lock_direct_mutex(); 8225 ret = prepare_direct_functions_for_ipmodify(ops); 8226 if (ret < 0) 8227 goto out_unlock; 8228 8229 ret = register_ftrace_function_nolock(ops); 8230 8231 out_unlock: 8232 unlock_direct_mutex(); 8233 return ret; 8234 } 8235 EXPORT_SYMBOL_GPL(register_ftrace_function); 8236 8237 /** 8238 * unregister_ftrace_function - unregister a function for profiling. 8239 * @ops: ops structure that holds the function to unregister 8240 * 8241 * Unregister a function that was added to be called by ftrace profiling. 8242 */ 8243 int unregister_ftrace_function(struct ftrace_ops *ops) 8244 { 8245 int ret; 8246 8247 mutex_lock(&ftrace_lock); 8248 ret = ftrace_shutdown(ops, 0); 8249 mutex_unlock(&ftrace_lock); 8250 8251 cleanup_direct_functions_after_ipmodify(ops); 8252 return ret; 8253 } 8254 EXPORT_SYMBOL_GPL(unregister_ftrace_function); 8255 8256 static int symbols_cmp(const void *a, const void *b) 8257 { 8258 const char **str_a = (const char **) a; 8259 const char **str_b = (const char **) b; 8260 8261 return strcmp(*str_a, *str_b); 8262 } 8263 8264 struct kallsyms_data { 8265 unsigned long *addrs; 8266 const char **syms; 8267 size_t cnt; 8268 size_t found; 8269 }; 8270 8271 static int kallsyms_callback(void *data, const char *name, 8272 struct module *mod, unsigned long addr) 8273 { 8274 struct kallsyms_data *args = data; 8275 const char **sym; 8276 int idx; 8277 8278 sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp); 8279 if (!sym) 8280 return 0; 8281 8282 idx = sym - args->syms; 8283 if (args->addrs[idx]) 8284 return 0; 8285 8286 if (!ftrace_location(addr)) 8287 return 0; 8288 8289 args->addrs[idx] = addr; 8290 args->found++; 8291 return args->found == args->cnt ? 1 : 0; 8292 } 8293 8294 /** 8295 * ftrace_lookup_symbols - Lookup addresses for array of symbols 8296 * 8297 * @sorted_syms: array of symbols pointers symbols to resolve, 8298 * must be alphabetically sorted 8299 * @cnt: number of symbols/addresses in @syms/@addrs arrays 8300 * @addrs: array for storing resulting addresses 8301 * 8302 * This function looks up addresses for array of symbols provided in 8303 * @syms array (must be alphabetically sorted) and stores them in 8304 * @addrs array, which needs to be big enough to store at least @cnt 8305 * addresses. 8306 * 8307 * This function returns 0 if all provided symbols are found, 8308 * -ESRCH otherwise. 8309 */ 8310 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs) 8311 { 8312 struct kallsyms_data args; 8313 int err; 8314 8315 memset(addrs, 0, sizeof(*addrs) * cnt); 8316 args.addrs = addrs; 8317 args.syms = sorted_syms; 8318 args.cnt = cnt; 8319 args.found = 0; 8320 err = kallsyms_on_each_symbol(kallsyms_callback, &args); 8321 if (err < 0) 8322 return err; 8323 return args.found == args.cnt ? 0 : -ESRCH; 8324 } 8325 8326 #ifdef CONFIG_SYSCTL 8327 8328 #ifdef CONFIG_DYNAMIC_FTRACE 8329 static void ftrace_startup_sysctl(void) 8330 { 8331 int command; 8332 8333 if (unlikely(ftrace_disabled)) 8334 return; 8335 8336 /* Force update next time */ 8337 saved_ftrace_func = NULL; 8338 /* ftrace_start_up is true if we want ftrace running */ 8339 if (ftrace_start_up) { 8340 command = FTRACE_UPDATE_CALLS; 8341 if (ftrace_graph_active) 8342 command |= FTRACE_START_FUNC_RET; 8343 ftrace_startup_enable(command); 8344 } 8345 } 8346 8347 static void ftrace_shutdown_sysctl(void) 8348 { 8349 int command; 8350 8351 if (unlikely(ftrace_disabled)) 8352 return; 8353 8354 /* ftrace_start_up is true if ftrace is running */ 8355 if (ftrace_start_up) { 8356 command = FTRACE_DISABLE_CALLS; 8357 if (ftrace_graph_active) 8358 command |= FTRACE_STOP_FUNC_RET; 8359 ftrace_run_update_code(command); 8360 } 8361 } 8362 #else 8363 # define ftrace_startup_sysctl() do { } while (0) 8364 # define ftrace_shutdown_sysctl() do { } while (0) 8365 #endif /* CONFIG_DYNAMIC_FTRACE */ 8366 8367 static bool is_permanent_ops_registered(void) 8368 { 8369 struct ftrace_ops *op; 8370 8371 do_for_each_ftrace_op(op, ftrace_ops_list) { 8372 if (op->flags & FTRACE_OPS_FL_PERMANENT) 8373 return true; 8374 } while_for_each_ftrace_op(op); 8375 8376 return false; 8377 } 8378 8379 static int 8380 ftrace_enable_sysctl(struct ctl_table *table, int write, 8381 void *buffer, size_t *lenp, loff_t *ppos) 8382 { 8383 int ret = -ENODEV; 8384 8385 mutex_lock(&ftrace_lock); 8386 8387 if (unlikely(ftrace_disabled)) 8388 goto out; 8389 8390 ret = proc_dointvec(table, write, buffer, lenp, ppos); 8391 8392 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 8393 goto out; 8394 8395 if (ftrace_enabled) { 8396 8397 /* we are starting ftrace again */ 8398 if (rcu_dereference_protected(ftrace_ops_list, 8399 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) 8400 update_ftrace_function(); 8401 8402 ftrace_startup_sysctl(); 8403 8404 } else { 8405 if (is_permanent_ops_registered()) { 8406 ftrace_enabled = true; 8407 ret = -EBUSY; 8408 goto out; 8409 } 8410 8411 /* stopping ftrace calls (just send to ftrace_stub) */ 8412 ftrace_trace_function = ftrace_stub; 8413 8414 ftrace_shutdown_sysctl(); 8415 } 8416 8417 last_ftrace_enabled = !!ftrace_enabled; 8418 out: 8419 mutex_unlock(&ftrace_lock); 8420 return ret; 8421 } 8422 8423 static struct ctl_table ftrace_sysctls[] = { 8424 { 8425 .procname = "ftrace_enabled", 8426 .data = &ftrace_enabled, 8427 .maxlen = sizeof(int), 8428 .mode = 0644, 8429 .proc_handler = ftrace_enable_sysctl, 8430 }, 8431 {} 8432 }; 8433 8434 static int __init ftrace_sysctl_init(void) 8435 { 8436 register_sysctl_init("kernel", ftrace_sysctls); 8437 return 0; 8438 } 8439 late_initcall(ftrace_sysctl_init); 8440 #endif 8441