1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Infrastructure for profiling code inserted by 'gcc -pg'. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Originally ported from the -rt patch by: 9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 10 * 11 * Based on code in the latency_tracer, that is: 12 * 13 * Copyright (C) 2004-2006 Ingo Molnar 14 * Copyright (C) 2004 Nadia Yvette Chambers 15 */ 16 17 #include <linux/stop_machine.h> 18 #include <linux/clocksource.h> 19 #include <linux/sched/task.h> 20 #include <linux/kallsyms.h> 21 #include <linux/seq_file.h> 22 #include <linux/tracefs.h> 23 #include <linux/hardirq.h> 24 #include <linux/kthread.h> 25 #include <linux/uaccess.h> 26 #include <linux/bsearch.h> 27 #include <linux/module.h> 28 #include <linux/ftrace.h> 29 #include <linux/sysctl.h> 30 #include <linux/slab.h> 31 #include <linux/ctype.h> 32 #include <linux/sort.h> 33 #include <linux/list.h> 34 #include <linux/hash.h> 35 #include <linux/rcupdate.h> 36 #include <linux/kprobes.h> 37 38 #include <trace/events/sched.h> 39 40 #include <asm/sections.h> 41 #include <asm/setup.h> 42 43 #include "ftrace_internal.h" 44 #include "trace_output.h" 45 #include "trace_stat.h" 46 47 #define FTRACE_WARN_ON(cond) \ 48 ({ \ 49 int ___r = cond; \ 50 if (WARN_ON(___r)) \ 51 ftrace_kill(); \ 52 ___r; \ 53 }) 54 55 #define FTRACE_WARN_ON_ONCE(cond) \ 56 ({ \ 57 int ___r = cond; \ 58 if (WARN_ON_ONCE(___r)) \ 59 ftrace_kill(); \ 60 ___r; \ 61 }) 62 63 /* hash bits for specific function selection */ 64 #define FTRACE_HASH_BITS 7 65 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) 66 #define FTRACE_HASH_DEFAULT_BITS 10 67 #define FTRACE_HASH_MAX_BITS 12 68 69 #ifdef CONFIG_DYNAMIC_FTRACE 70 #define INIT_OPS_HASH(opsname) \ 71 .func_hash = &opsname.local_hash, \ 72 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 73 #else 74 #define INIT_OPS_HASH(opsname) 75 #endif 76 77 enum { 78 FTRACE_MODIFY_ENABLE_FL = (1 << 0), 79 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), 80 }; 81 82 struct ftrace_ops ftrace_list_end __read_mostly = { 83 .func = ftrace_stub, 84 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 85 INIT_OPS_HASH(ftrace_list_end) 86 }; 87 88 /* ftrace_enabled is a method to turn ftrace on or off */ 89 int ftrace_enabled __read_mostly; 90 static int last_ftrace_enabled; 91 92 /* Current function tracing op */ 93 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 94 /* What to set function_trace_op to */ 95 static struct ftrace_ops *set_function_trace_op; 96 97 static bool ftrace_pids_enabled(struct ftrace_ops *ops) 98 { 99 struct trace_array *tr; 100 101 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) 102 return false; 103 104 tr = ops->private; 105 106 return tr->function_pids != NULL; 107 } 108 109 static void ftrace_update_trampoline(struct ftrace_ops *ops); 110 111 /* 112 * ftrace_disabled is set when an anomaly is discovered. 113 * ftrace_disabled is much stronger than ftrace_enabled. 114 */ 115 static int ftrace_disabled __read_mostly; 116 117 DEFINE_MUTEX(ftrace_lock); 118 119 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; 120 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 121 struct ftrace_ops global_ops; 122 123 #if ARCH_SUPPORTS_FTRACE_OPS 124 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 125 struct ftrace_ops *op, struct pt_regs *regs); 126 #else 127 /* See comment below, where ftrace_ops_list_func is defined */ 128 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); 129 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) 130 #endif 131 132 static inline void ftrace_ops_init(struct ftrace_ops *ops) 133 { 134 #ifdef CONFIG_DYNAMIC_FTRACE 135 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 136 mutex_init(&ops->local_hash.regex_lock); 137 ops->func_hash = &ops->local_hash; 138 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 139 } 140 #endif 141 } 142 143 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 144 struct ftrace_ops *op, struct pt_regs *regs) 145 { 146 struct trace_array *tr = op->private; 147 148 if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid)) 149 return; 150 151 op->saved_func(ip, parent_ip, op, regs); 152 } 153 154 static void ftrace_sync(struct work_struct *work) 155 { 156 /* 157 * This function is just a stub to implement a hard force 158 * of synchronize_rcu(). This requires synchronizing 159 * tasks even in userspace and idle. 160 * 161 * Yes, function tracing is rude. 162 */ 163 } 164 165 static void ftrace_sync_ipi(void *data) 166 { 167 /* Probably not needed, but do it anyway */ 168 smp_rmb(); 169 } 170 171 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) 172 { 173 /* 174 * If this is a dynamic, RCU, or per CPU ops, or we force list func, 175 * then it needs to call the list anyway. 176 */ 177 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || 178 FTRACE_FORCE_LIST_FUNC) 179 return ftrace_ops_list_func; 180 181 return ftrace_ops_get_func(ops); 182 } 183 184 static void update_ftrace_function(void) 185 { 186 ftrace_func_t func; 187 188 /* 189 * Prepare the ftrace_ops that the arch callback will use. 190 * If there's only one ftrace_ops registered, the ftrace_ops_list 191 * will point to the ops we want. 192 */ 193 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, 194 lockdep_is_held(&ftrace_lock)); 195 196 /* If there's no ftrace_ops registered, just call the stub function */ 197 if (set_function_trace_op == &ftrace_list_end) { 198 func = ftrace_stub; 199 200 /* 201 * If we are at the end of the list and this ops is 202 * recursion safe and not dynamic and the arch supports passing ops, 203 * then have the mcount trampoline call the function directly. 204 */ 205 } else if (rcu_dereference_protected(ftrace_ops_list->next, 206 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 207 func = ftrace_ops_get_list_func(ftrace_ops_list); 208 209 } else { 210 /* Just use the default ftrace_ops */ 211 set_function_trace_op = &ftrace_list_end; 212 func = ftrace_ops_list_func; 213 } 214 215 update_function_graph_func(); 216 217 /* If there's no change, then do nothing more here */ 218 if (ftrace_trace_function == func) 219 return; 220 221 /* 222 * If we are using the list function, it doesn't care 223 * about the function_trace_ops. 224 */ 225 if (func == ftrace_ops_list_func) { 226 ftrace_trace_function = func; 227 /* 228 * Don't even bother setting function_trace_ops, 229 * it would be racy to do so anyway. 230 */ 231 return; 232 } 233 234 #ifndef CONFIG_DYNAMIC_FTRACE 235 /* 236 * For static tracing, we need to be a bit more careful. 237 * The function change takes affect immediately. Thus, 238 * we need to coorditate the setting of the function_trace_ops 239 * with the setting of the ftrace_trace_function. 240 * 241 * Set the function to the list ops, which will call the 242 * function we want, albeit indirectly, but it handles the 243 * ftrace_ops and doesn't depend on function_trace_op. 244 */ 245 ftrace_trace_function = ftrace_ops_list_func; 246 /* 247 * Make sure all CPUs see this. Yes this is slow, but static 248 * tracing is slow and nasty to have enabled. 249 */ 250 schedule_on_each_cpu(ftrace_sync); 251 /* Now all cpus are using the list ops. */ 252 function_trace_op = set_function_trace_op; 253 /* Make sure the function_trace_op is visible on all CPUs */ 254 smp_wmb(); 255 /* Nasty way to force a rmb on all cpus */ 256 smp_call_function(ftrace_sync_ipi, NULL, 1); 257 /* OK, we are all set to update the ftrace_trace_function now! */ 258 #endif /* !CONFIG_DYNAMIC_FTRACE */ 259 260 ftrace_trace_function = func; 261 } 262 263 static void add_ftrace_ops(struct ftrace_ops __rcu **list, 264 struct ftrace_ops *ops) 265 { 266 rcu_assign_pointer(ops->next, *list); 267 268 /* 269 * We are entering ops into the list but another 270 * CPU might be walking that list. We need to make sure 271 * the ops->next pointer is valid before another CPU sees 272 * the ops pointer included into the list. 273 */ 274 rcu_assign_pointer(*list, ops); 275 } 276 277 static int remove_ftrace_ops(struct ftrace_ops __rcu **list, 278 struct ftrace_ops *ops) 279 { 280 struct ftrace_ops **p; 281 282 /* 283 * If we are removing the last function, then simply point 284 * to the ftrace_stub. 285 */ 286 if (rcu_dereference_protected(*list, 287 lockdep_is_held(&ftrace_lock)) == ops && 288 rcu_dereference_protected(ops->next, 289 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 290 *list = &ftrace_list_end; 291 return 0; 292 } 293 294 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) 295 if (*p == ops) 296 break; 297 298 if (*p != ops) 299 return -1; 300 301 *p = (*p)->next; 302 return 0; 303 } 304 305 static void ftrace_update_trampoline(struct ftrace_ops *ops); 306 307 int __register_ftrace_function(struct ftrace_ops *ops) 308 { 309 if (ops->flags & FTRACE_OPS_FL_DELETED) 310 return -EINVAL; 311 312 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 313 return -EBUSY; 314 315 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS 316 /* 317 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 318 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. 319 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. 320 */ 321 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && 322 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) 323 return -EINVAL; 324 325 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) 326 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; 327 #endif 328 329 if (!core_kernel_data((unsigned long)ops)) 330 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 331 332 add_ftrace_ops(&ftrace_ops_list, ops); 333 334 /* Always save the function, and reset at unregistering */ 335 ops->saved_func = ops->func; 336 337 if (ftrace_pids_enabled(ops)) 338 ops->func = ftrace_pid_func; 339 340 ftrace_update_trampoline(ops); 341 342 if (ftrace_enabled) 343 update_ftrace_function(); 344 345 return 0; 346 } 347 348 int __unregister_ftrace_function(struct ftrace_ops *ops) 349 { 350 int ret; 351 352 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 353 return -EBUSY; 354 355 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 356 357 if (ret < 0) 358 return ret; 359 360 if (ftrace_enabled) 361 update_ftrace_function(); 362 363 ops->func = ops->saved_func; 364 365 return 0; 366 } 367 368 static void ftrace_update_pid_func(void) 369 { 370 struct ftrace_ops *op; 371 372 /* Only do something if we are tracing something */ 373 if (ftrace_trace_function == ftrace_stub) 374 return; 375 376 do_for_each_ftrace_op(op, ftrace_ops_list) { 377 if (op->flags & FTRACE_OPS_FL_PID) { 378 op->func = ftrace_pids_enabled(op) ? 379 ftrace_pid_func : op->saved_func; 380 ftrace_update_trampoline(op); 381 } 382 } while_for_each_ftrace_op(op); 383 384 update_ftrace_function(); 385 } 386 387 #ifdef CONFIG_FUNCTION_PROFILER 388 struct ftrace_profile { 389 struct hlist_node node; 390 unsigned long ip; 391 unsigned long counter; 392 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 393 unsigned long long time; 394 unsigned long long time_squared; 395 #endif 396 }; 397 398 struct ftrace_profile_page { 399 struct ftrace_profile_page *next; 400 unsigned long index; 401 struct ftrace_profile records[]; 402 }; 403 404 struct ftrace_profile_stat { 405 atomic_t disabled; 406 struct hlist_head *hash; 407 struct ftrace_profile_page *pages; 408 struct ftrace_profile_page *start; 409 struct tracer_stat stat; 410 }; 411 412 #define PROFILE_RECORDS_SIZE \ 413 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) 414 415 #define PROFILES_PER_PAGE \ 416 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) 417 418 static int ftrace_profile_enabled __read_mostly; 419 420 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ 421 static DEFINE_MUTEX(ftrace_profile_lock); 422 423 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); 424 425 #define FTRACE_PROFILE_HASH_BITS 10 426 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) 427 428 static void * 429 function_stat_next(void *v, int idx) 430 { 431 struct ftrace_profile *rec = v; 432 struct ftrace_profile_page *pg; 433 434 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 435 436 again: 437 if (idx != 0) 438 rec++; 439 440 if ((void *)rec >= (void *)&pg->records[pg->index]) { 441 pg = pg->next; 442 if (!pg) 443 return NULL; 444 rec = &pg->records[0]; 445 if (!rec->counter) 446 goto again; 447 } 448 449 return rec; 450 } 451 452 static void *function_stat_start(struct tracer_stat *trace) 453 { 454 struct ftrace_profile_stat *stat = 455 container_of(trace, struct ftrace_profile_stat, stat); 456 457 if (!stat || !stat->start) 458 return NULL; 459 460 return function_stat_next(&stat->start->records[0], 0); 461 } 462 463 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 464 /* function graph compares on total time */ 465 static int function_stat_cmp(void *p1, void *p2) 466 { 467 struct ftrace_profile *a = p1; 468 struct ftrace_profile *b = p2; 469 470 if (a->time < b->time) 471 return -1; 472 if (a->time > b->time) 473 return 1; 474 else 475 return 0; 476 } 477 #else 478 /* not function graph compares against hits */ 479 static int function_stat_cmp(void *p1, void *p2) 480 { 481 struct ftrace_profile *a = p1; 482 struct ftrace_profile *b = p2; 483 484 if (a->counter < b->counter) 485 return -1; 486 if (a->counter > b->counter) 487 return 1; 488 else 489 return 0; 490 } 491 #endif 492 493 static int function_stat_headers(struct seq_file *m) 494 { 495 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 496 seq_puts(m, " Function " 497 "Hit Time Avg s^2\n" 498 " -------- " 499 "--- ---- --- ---\n"); 500 #else 501 seq_puts(m, " Function Hit\n" 502 " -------- ---\n"); 503 #endif 504 return 0; 505 } 506 507 static int function_stat_show(struct seq_file *m, void *v) 508 { 509 struct ftrace_profile *rec = v; 510 char str[KSYM_SYMBOL_LEN]; 511 int ret = 0; 512 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 513 static struct trace_seq s; 514 unsigned long long avg; 515 unsigned long long stddev; 516 #endif 517 mutex_lock(&ftrace_profile_lock); 518 519 /* we raced with function_profile_reset() */ 520 if (unlikely(rec->counter == 0)) { 521 ret = -EBUSY; 522 goto out; 523 } 524 525 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 526 avg = rec->time; 527 do_div(avg, rec->counter); 528 if (tracing_thresh && (avg < tracing_thresh)) 529 goto out; 530 #endif 531 532 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 533 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 534 535 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 536 seq_puts(m, " "); 537 538 /* Sample standard deviation (s^2) */ 539 if (rec->counter <= 1) 540 stddev = 0; 541 else { 542 /* 543 * Apply Welford's method: 544 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) 545 */ 546 stddev = rec->counter * rec->time_squared - 547 rec->time * rec->time; 548 549 /* 550 * Divide only 1000 for ns^2 -> us^2 conversion. 551 * trace_print_graph_duration will divide 1000 again. 552 */ 553 do_div(stddev, rec->counter * (rec->counter - 1) * 1000); 554 } 555 556 trace_seq_init(&s); 557 trace_print_graph_duration(rec->time, &s); 558 trace_seq_puts(&s, " "); 559 trace_print_graph_duration(avg, &s); 560 trace_seq_puts(&s, " "); 561 trace_print_graph_duration(stddev, &s); 562 trace_print_seq(m, &s); 563 #endif 564 seq_putc(m, '\n'); 565 out: 566 mutex_unlock(&ftrace_profile_lock); 567 568 return ret; 569 } 570 571 static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 572 { 573 struct ftrace_profile_page *pg; 574 575 pg = stat->pages = stat->start; 576 577 while (pg) { 578 memset(pg->records, 0, PROFILE_RECORDS_SIZE); 579 pg->index = 0; 580 pg = pg->next; 581 } 582 583 memset(stat->hash, 0, 584 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); 585 } 586 587 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) 588 { 589 struct ftrace_profile_page *pg; 590 int functions; 591 int pages; 592 int i; 593 594 /* If we already allocated, do nothing */ 595 if (stat->pages) 596 return 0; 597 598 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); 599 if (!stat->pages) 600 return -ENOMEM; 601 602 #ifdef CONFIG_DYNAMIC_FTRACE 603 functions = ftrace_update_tot_cnt; 604 #else 605 /* 606 * We do not know the number of functions that exist because 607 * dynamic tracing is what counts them. With past experience 608 * we have around 20K functions. That should be more than enough. 609 * It is highly unlikely we will execute every function in 610 * the kernel. 611 */ 612 functions = 20000; 613 #endif 614 615 pg = stat->start = stat->pages; 616 617 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); 618 619 for (i = 1; i < pages; i++) { 620 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 621 if (!pg->next) 622 goto out_free; 623 pg = pg->next; 624 } 625 626 return 0; 627 628 out_free: 629 pg = stat->start; 630 while (pg) { 631 unsigned long tmp = (unsigned long)pg; 632 633 pg = pg->next; 634 free_page(tmp); 635 } 636 637 stat->pages = NULL; 638 stat->start = NULL; 639 640 return -ENOMEM; 641 } 642 643 static int ftrace_profile_init_cpu(int cpu) 644 { 645 struct ftrace_profile_stat *stat; 646 int size; 647 648 stat = &per_cpu(ftrace_profile_stats, cpu); 649 650 if (stat->hash) { 651 /* If the profile is already created, simply reset it */ 652 ftrace_profile_reset(stat); 653 return 0; 654 } 655 656 /* 657 * We are profiling all functions, but usually only a few thousand 658 * functions are hit. We'll make a hash of 1024 items. 659 */ 660 size = FTRACE_PROFILE_HASH_SIZE; 661 662 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); 663 664 if (!stat->hash) 665 return -ENOMEM; 666 667 /* Preallocate the function profiling pages */ 668 if (ftrace_profile_pages_init(stat) < 0) { 669 kfree(stat->hash); 670 stat->hash = NULL; 671 return -ENOMEM; 672 } 673 674 return 0; 675 } 676 677 static int ftrace_profile_init(void) 678 { 679 int cpu; 680 int ret = 0; 681 682 for_each_possible_cpu(cpu) { 683 ret = ftrace_profile_init_cpu(cpu); 684 if (ret) 685 break; 686 } 687 688 return ret; 689 } 690 691 /* interrupts must be disabled */ 692 static struct ftrace_profile * 693 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) 694 { 695 struct ftrace_profile *rec; 696 struct hlist_head *hhd; 697 unsigned long key; 698 699 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); 700 hhd = &stat->hash[key]; 701 702 if (hlist_empty(hhd)) 703 return NULL; 704 705 hlist_for_each_entry_rcu_notrace(rec, hhd, node) { 706 if (rec->ip == ip) 707 return rec; 708 } 709 710 return NULL; 711 } 712 713 static void ftrace_add_profile(struct ftrace_profile_stat *stat, 714 struct ftrace_profile *rec) 715 { 716 unsigned long key; 717 718 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); 719 hlist_add_head_rcu(&rec->node, &stat->hash[key]); 720 } 721 722 /* 723 * The memory is already allocated, this simply finds a new record to use. 724 */ 725 static struct ftrace_profile * 726 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) 727 { 728 struct ftrace_profile *rec = NULL; 729 730 /* prevent recursion (from NMIs) */ 731 if (atomic_inc_return(&stat->disabled) != 1) 732 goto out; 733 734 /* 735 * Try to find the function again since an NMI 736 * could have added it 737 */ 738 rec = ftrace_find_profiled_func(stat, ip); 739 if (rec) 740 goto out; 741 742 if (stat->pages->index == PROFILES_PER_PAGE) { 743 if (!stat->pages->next) 744 goto out; 745 stat->pages = stat->pages->next; 746 } 747 748 rec = &stat->pages->records[stat->pages->index++]; 749 rec->ip = ip; 750 ftrace_add_profile(stat, rec); 751 752 out: 753 atomic_dec(&stat->disabled); 754 755 return rec; 756 } 757 758 static void 759 function_profile_call(unsigned long ip, unsigned long parent_ip, 760 struct ftrace_ops *ops, struct pt_regs *regs) 761 { 762 struct ftrace_profile_stat *stat; 763 struct ftrace_profile *rec; 764 unsigned long flags; 765 766 if (!ftrace_profile_enabled) 767 return; 768 769 local_irq_save(flags); 770 771 stat = this_cpu_ptr(&ftrace_profile_stats); 772 if (!stat->hash || !ftrace_profile_enabled) 773 goto out; 774 775 rec = ftrace_find_profiled_func(stat, ip); 776 if (!rec) { 777 rec = ftrace_profile_alloc(stat, ip); 778 if (!rec) 779 goto out; 780 } 781 782 rec->counter++; 783 out: 784 local_irq_restore(flags); 785 } 786 787 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 788 static bool fgraph_graph_time = true; 789 790 void ftrace_graph_graph_time_control(bool enable) 791 { 792 fgraph_graph_time = enable; 793 } 794 795 static int profile_graph_entry(struct ftrace_graph_ent *trace) 796 { 797 struct ftrace_ret_stack *ret_stack; 798 799 function_profile_call(trace->func, 0, NULL, NULL); 800 801 /* If function graph is shutting down, ret_stack can be NULL */ 802 if (!current->ret_stack) 803 return 0; 804 805 ret_stack = ftrace_graph_get_ret_stack(current, 0); 806 if (ret_stack) 807 ret_stack->subtime = 0; 808 809 return 1; 810 } 811 812 static void profile_graph_return(struct ftrace_graph_ret *trace) 813 { 814 struct ftrace_ret_stack *ret_stack; 815 struct ftrace_profile_stat *stat; 816 unsigned long long calltime; 817 struct ftrace_profile *rec; 818 unsigned long flags; 819 820 local_irq_save(flags); 821 stat = this_cpu_ptr(&ftrace_profile_stats); 822 if (!stat->hash || !ftrace_profile_enabled) 823 goto out; 824 825 /* If the calltime was zero'd ignore it */ 826 if (!trace->calltime) 827 goto out; 828 829 calltime = trace->rettime - trace->calltime; 830 831 if (!fgraph_graph_time) { 832 833 /* Append this call time to the parent time to subtract */ 834 ret_stack = ftrace_graph_get_ret_stack(current, 1); 835 if (ret_stack) 836 ret_stack->subtime += calltime; 837 838 ret_stack = ftrace_graph_get_ret_stack(current, 0); 839 if (ret_stack && ret_stack->subtime < calltime) 840 calltime -= ret_stack->subtime; 841 else 842 calltime = 0; 843 } 844 845 rec = ftrace_find_profiled_func(stat, trace->func); 846 if (rec) { 847 rec->time += calltime; 848 rec->time_squared += calltime * calltime; 849 } 850 851 out: 852 local_irq_restore(flags); 853 } 854 855 static struct fgraph_ops fprofiler_ops = { 856 .entryfunc = &profile_graph_entry, 857 .retfunc = &profile_graph_return, 858 }; 859 860 static int register_ftrace_profiler(void) 861 { 862 return register_ftrace_graph(&fprofiler_ops); 863 } 864 865 static void unregister_ftrace_profiler(void) 866 { 867 unregister_ftrace_graph(&fprofiler_ops); 868 } 869 #else 870 static struct ftrace_ops ftrace_profile_ops __read_mostly = { 871 .func = function_profile_call, 872 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 873 INIT_OPS_HASH(ftrace_profile_ops) 874 }; 875 876 static int register_ftrace_profiler(void) 877 { 878 return register_ftrace_function(&ftrace_profile_ops); 879 } 880 881 static void unregister_ftrace_profiler(void) 882 { 883 unregister_ftrace_function(&ftrace_profile_ops); 884 } 885 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 886 887 static ssize_t 888 ftrace_profile_write(struct file *filp, const char __user *ubuf, 889 size_t cnt, loff_t *ppos) 890 { 891 unsigned long val; 892 int ret; 893 894 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 895 if (ret) 896 return ret; 897 898 val = !!val; 899 900 mutex_lock(&ftrace_profile_lock); 901 if (ftrace_profile_enabled ^ val) { 902 if (val) { 903 ret = ftrace_profile_init(); 904 if (ret < 0) { 905 cnt = ret; 906 goto out; 907 } 908 909 ret = register_ftrace_profiler(); 910 if (ret < 0) { 911 cnt = ret; 912 goto out; 913 } 914 ftrace_profile_enabled = 1; 915 } else { 916 ftrace_profile_enabled = 0; 917 /* 918 * unregister_ftrace_profiler calls stop_machine 919 * so this acts like an synchronize_rcu. 920 */ 921 unregister_ftrace_profiler(); 922 } 923 } 924 out: 925 mutex_unlock(&ftrace_profile_lock); 926 927 *ppos += cnt; 928 929 return cnt; 930 } 931 932 static ssize_t 933 ftrace_profile_read(struct file *filp, char __user *ubuf, 934 size_t cnt, loff_t *ppos) 935 { 936 char buf[64]; /* big enough to hold a number */ 937 int r; 938 939 r = sprintf(buf, "%u\n", ftrace_profile_enabled); 940 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 941 } 942 943 static const struct file_operations ftrace_profile_fops = { 944 .open = tracing_open_generic, 945 .read = ftrace_profile_read, 946 .write = ftrace_profile_write, 947 .llseek = default_llseek, 948 }; 949 950 /* used to initialize the real stat files */ 951 static struct tracer_stat function_stats __initdata = { 952 .name = "functions", 953 .stat_start = function_stat_start, 954 .stat_next = function_stat_next, 955 .stat_cmp = function_stat_cmp, 956 .stat_headers = function_stat_headers, 957 .stat_show = function_stat_show 958 }; 959 960 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 961 { 962 struct ftrace_profile_stat *stat; 963 struct dentry *entry; 964 char *name; 965 int ret; 966 int cpu; 967 968 for_each_possible_cpu(cpu) { 969 stat = &per_cpu(ftrace_profile_stats, cpu); 970 971 name = kasprintf(GFP_KERNEL, "function%d", cpu); 972 if (!name) { 973 /* 974 * The files created are permanent, if something happens 975 * we still do not free memory. 976 */ 977 WARN(1, 978 "Could not allocate stat file for cpu %d\n", 979 cpu); 980 return; 981 } 982 stat->stat = function_stats; 983 stat->stat.name = name; 984 ret = register_stat_tracer(&stat->stat); 985 if (ret) { 986 WARN(1, 987 "Could not register function stat for cpu %d\n", 988 cpu); 989 kfree(name); 990 return; 991 } 992 } 993 994 entry = tracefs_create_file("function_profile_enabled", 0644, 995 d_tracer, NULL, &ftrace_profile_fops); 996 if (!entry) 997 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n"); 998 } 999 1000 #else /* CONFIG_FUNCTION_PROFILER */ 1001 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 1002 { 1003 } 1004 #endif /* CONFIG_FUNCTION_PROFILER */ 1005 1006 #ifdef CONFIG_DYNAMIC_FTRACE 1007 1008 static struct ftrace_ops *removed_ops; 1009 1010 /* 1011 * Set when doing a global update, like enabling all recs or disabling them. 1012 * It is not set when just updating a single ftrace_ops. 1013 */ 1014 static bool update_all_ops; 1015 1016 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 1017 # error Dynamic ftrace depends on MCOUNT_RECORD 1018 #endif 1019 1020 struct ftrace_func_entry { 1021 struct hlist_node hlist; 1022 unsigned long ip; 1023 }; 1024 1025 struct ftrace_func_probe { 1026 struct ftrace_probe_ops *probe_ops; 1027 struct ftrace_ops ops; 1028 struct trace_array *tr; 1029 struct list_head list; 1030 void *data; 1031 int ref; 1032 }; 1033 1034 /* 1035 * We make these constant because no one should touch them, 1036 * but they are used as the default "empty hash", to avoid allocating 1037 * it all the time. These are in a read only section such that if 1038 * anyone does try to modify it, it will cause an exception. 1039 */ 1040 static const struct hlist_head empty_buckets[1]; 1041 static const struct ftrace_hash empty_hash = { 1042 .buckets = (struct hlist_head *)empty_buckets, 1043 }; 1044 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1045 1046 struct ftrace_ops global_ops = { 1047 .func = ftrace_stub, 1048 .local_hash.notrace_hash = EMPTY_HASH, 1049 .local_hash.filter_hash = EMPTY_HASH, 1050 INIT_OPS_HASH(global_ops) 1051 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1052 FTRACE_OPS_FL_INITIALIZED | 1053 FTRACE_OPS_FL_PID, 1054 }; 1055 1056 /* 1057 * Used by the stack undwinder to know about dynamic ftrace trampolines. 1058 */ 1059 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) 1060 { 1061 struct ftrace_ops *op = NULL; 1062 1063 /* 1064 * Some of the ops may be dynamically allocated, 1065 * they are freed after a synchronize_rcu(). 1066 */ 1067 preempt_disable_notrace(); 1068 1069 do_for_each_ftrace_op(op, ftrace_ops_list) { 1070 /* 1071 * This is to check for dynamically allocated trampolines. 1072 * Trampolines that are in kernel text will have 1073 * core_kernel_text() return true. 1074 */ 1075 if (op->trampoline && op->trampoline_size) 1076 if (addr >= op->trampoline && 1077 addr < op->trampoline + op->trampoline_size) { 1078 preempt_enable_notrace(); 1079 return op; 1080 } 1081 } while_for_each_ftrace_op(op); 1082 preempt_enable_notrace(); 1083 1084 return NULL; 1085 } 1086 1087 /* 1088 * This is used by __kernel_text_address() to return true if the 1089 * address is on a dynamically allocated trampoline that would 1090 * not return true for either core_kernel_text() or 1091 * is_module_text_address(). 1092 */ 1093 bool is_ftrace_trampoline(unsigned long addr) 1094 { 1095 return ftrace_ops_trampoline(addr) != NULL; 1096 } 1097 1098 struct ftrace_page { 1099 struct ftrace_page *next; 1100 struct dyn_ftrace *records; 1101 int index; 1102 int size; 1103 }; 1104 1105 #define ENTRY_SIZE sizeof(struct dyn_ftrace) 1106 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1107 1108 /* estimate from running different kernels */ 1109 #define NR_TO_INIT 10000 1110 1111 static struct ftrace_page *ftrace_pages_start; 1112 static struct ftrace_page *ftrace_pages; 1113 1114 static __always_inline unsigned long 1115 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) 1116 { 1117 if (hash->size_bits > 0) 1118 return hash_long(ip, hash->size_bits); 1119 1120 return 0; 1121 } 1122 1123 /* Only use this function if ftrace_hash_empty() has already been tested */ 1124 static __always_inline struct ftrace_func_entry * 1125 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1126 { 1127 unsigned long key; 1128 struct ftrace_func_entry *entry; 1129 struct hlist_head *hhd; 1130 1131 key = ftrace_hash_key(hash, ip); 1132 hhd = &hash->buckets[key]; 1133 1134 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { 1135 if (entry->ip == ip) 1136 return entry; 1137 } 1138 return NULL; 1139 } 1140 1141 /** 1142 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash 1143 * @hash: The hash to look at 1144 * @ip: The instruction pointer to test 1145 * 1146 * Search a given @hash to see if a given instruction pointer (@ip) 1147 * exists in it. 1148 * 1149 * Returns the entry that holds the @ip if found. NULL otherwise. 1150 */ 1151 struct ftrace_func_entry * 1152 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1153 { 1154 if (ftrace_hash_empty(hash)) 1155 return NULL; 1156 1157 return __ftrace_lookup_ip(hash, ip); 1158 } 1159 1160 static void __add_hash_entry(struct ftrace_hash *hash, 1161 struct ftrace_func_entry *entry) 1162 { 1163 struct hlist_head *hhd; 1164 unsigned long key; 1165 1166 key = ftrace_hash_key(hash, entry->ip); 1167 hhd = &hash->buckets[key]; 1168 hlist_add_head(&entry->hlist, hhd); 1169 hash->count++; 1170 } 1171 1172 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) 1173 { 1174 struct ftrace_func_entry *entry; 1175 1176 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1177 if (!entry) 1178 return -ENOMEM; 1179 1180 entry->ip = ip; 1181 __add_hash_entry(hash, entry); 1182 1183 return 0; 1184 } 1185 1186 static void 1187 free_hash_entry(struct ftrace_hash *hash, 1188 struct ftrace_func_entry *entry) 1189 { 1190 hlist_del(&entry->hlist); 1191 kfree(entry); 1192 hash->count--; 1193 } 1194 1195 static void 1196 remove_hash_entry(struct ftrace_hash *hash, 1197 struct ftrace_func_entry *entry) 1198 { 1199 hlist_del_rcu(&entry->hlist); 1200 hash->count--; 1201 } 1202 1203 static void ftrace_hash_clear(struct ftrace_hash *hash) 1204 { 1205 struct hlist_head *hhd; 1206 struct hlist_node *tn; 1207 struct ftrace_func_entry *entry; 1208 int size = 1 << hash->size_bits; 1209 int i; 1210 1211 if (!hash->count) 1212 return; 1213 1214 for (i = 0; i < size; i++) { 1215 hhd = &hash->buckets[i]; 1216 hlist_for_each_entry_safe(entry, tn, hhd, hlist) 1217 free_hash_entry(hash, entry); 1218 } 1219 FTRACE_WARN_ON(hash->count); 1220 } 1221 1222 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) 1223 { 1224 list_del(&ftrace_mod->list); 1225 kfree(ftrace_mod->module); 1226 kfree(ftrace_mod->func); 1227 kfree(ftrace_mod); 1228 } 1229 1230 static void clear_ftrace_mod_list(struct list_head *head) 1231 { 1232 struct ftrace_mod_load *p, *n; 1233 1234 /* stack tracer isn't supported yet */ 1235 if (!head) 1236 return; 1237 1238 mutex_lock(&ftrace_lock); 1239 list_for_each_entry_safe(p, n, head, list) 1240 free_ftrace_mod(p); 1241 mutex_unlock(&ftrace_lock); 1242 } 1243 1244 static void free_ftrace_hash(struct ftrace_hash *hash) 1245 { 1246 if (!hash || hash == EMPTY_HASH) 1247 return; 1248 ftrace_hash_clear(hash); 1249 kfree(hash->buckets); 1250 kfree(hash); 1251 } 1252 1253 static void __free_ftrace_hash_rcu(struct rcu_head *rcu) 1254 { 1255 struct ftrace_hash *hash; 1256 1257 hash = container_of(rcu, struct ftrace_hash, rcu); 1258 free_ftrace_hash(hash); 1259 } 1260 1261 static void free_ftrace_hash_rcu(struct ftrace_hash *hash) 1262 { 1263 if (!hash || hash == EMPTY_HASH) 1264 return; 1265 call_rcu(&hash->rcu, __free_ftrace_hash_rcu); 1266 } 1267 1268 void ftrace_free_filter(struct ftrace_ops *ops) 1269 { 1270 ftrace_ops_init(ops); 1271 free_ftrace_hash(ops->func_hash->filter_hash); 1272 free_ftrace_hash(ops->func_hash->notrace_hash); 1273 } 1274 1275 static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1276 { 1277 struct ftrace_hash *hash; 1278 int size; 1279 1280 hash = kzalloc(sizeof(*hash), GFP_KERNEL); 1281 if (!hash) 1282 return NULL; 1283 1284 size = 1 << size_bits; 1285 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); 1286 1287 if (!hash->buckets) { 1288 kfree(hash); 1289 return NULL; 1290 } 1291 1292 hash->size_bits = size_bits; 1293 1294 return hash; 1295 } 1296 1297 1298 static int ftrace_add_mod(struct trace_array *tr, 1299 const char *func, const char *module, 1300 int enable) 1301 { 1302 struct ftrace_mod_load *ftrace_mod; 1303 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; 1304 1305 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL); 1306 if (!ftrace_mod) 1307 return -ENOMEM; 1308 1309 ftrace_mod->func = kstrdup(func, GFP_KERNEL); 1310 ftrace_mod->module = kstrdup(module, GFP_KERNEL); 1311 ftrace_mod->enable = enable; 1312 1313 if (!ftrace_mod->func || !ftrace_mod->module) 1314 goto out_free; 1315 1316 list_add(&ftrace_mod->list, mod_head); 1317 1318 return 0; 1319 1320 out_free: 1321 free_ftrace_mod(ftrace_mod); 1322 1323 return -ENOMEM; 1324 } 1325 1326 static struct ftrace_hash * 1327 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) 1328 { 1329 struct ftrace_func_entry *entry; 1330 struct ftrace_hash *new_hash; 1331 int size; 1332 int ret; 1333 int i; 1334 1335 new_hash = alloc_ftrace_hash(size_bits); 1336 if (!new_hash) 1337 return NULL; 1338 1339 if (hash) 1340 new_hash->flags = hash->flags; 1341 1342 /* Empty hash? */ 1343 if (ftrace_hash_empty(hash)) 1344 return new_hash; 1345 1346 size = 1 << hash->size_bits; 1347 for (i = 0; i < size; i++) { 1348 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 1349 ret = add_hash_entry(new_hash, entry->ip); 1350 if (ret < 0) 1351 goto free_hash; 1352 } 1353 } 1354 1355 FTRACE_WARN_ON(new_hash->count != hash->count); 1356 1357 return new_hash; 1358 1359 free_hash: 1360 free_ftrace_hash(new_hash); 1361 return NULL; 1362 } 1363 1364 static void 1365 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); 1366 static void 1367 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); 1368 1369 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1370 struct ftrace_hash *new_hash); 1371 1372 static struct ftrace_hash * 1373 __ftrace_hash_move(struct ftrace_hash *src) 1374 { 1375 struct ftrace_func_entry *entry; 1376 struct hlist_node *tn; 1377 struct hlist_head *hhd; 1378 struct ftrace_hash *new_hash; 1379 int size = src->count; 1380 int bits = 0; 1381 int i; 1382 1383 /* 1384 * If the new source is empty, just return the empty_hash. 1385 */ 1386 if (ftrace_hash_empty(src)) 1387 return EMPTY_HASH; 1388 1389 /* 1390 * Make the hash size about 1/2 the # found 1391 */ 1392 for (size /= 2; size; size >>= 1) 1393 bits++; 1394 1395 /* Don't allocate too much */ 1396 if (bits > FTRACE_HASH_MAX_BITS) 1397 bits = FTRACE_HASH_MAX_BITS; 1398 1399 new_hash = alloc_ftrace_hash(bits); 1400 if (!new_hash) 1401 return NULL; 1402 1403 new_hash->flags = src->flags; 1404 1405 size = 1 << src->size_bits; 1406 for (i = 0; i < size; i++) { 1407 hhd = &src->buckets[i]; 1408 hlist_for_each_entry_safe(entry, tn, hhd, hlist) { 1409 remove_hash_entry(src, entry); 1410 __add_hash_entry(new_hash, entry); 1411 } 1412 } 1413 1414 return new_hash; 1415 } 1416 1417 static int 1418 ftrace_hash_move(struct ftrace_ops *ops, int enable, 1419 struct ftrace_hash **dst, struct ftrace_hash *src) 1420 { 1421 struct ftrace_hash *new_hash; 1422 int ret; 1423 1424 /* Reject setting notrace hash on IPMODIFY ftrace_ops */ 1425 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) 1426 return -EINVAL; 1427 1428 new_hash = __ftrace_hash_move(src); 1429 if (!new_hash) 1430 return -ENOMEM; 1431 1432 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ 1433 if (enable) { 1434 /* IPMODIFY should be updated only when filter_hash updating */ 1435 ret = ftrace_hash_ipmodify_update(ops, new_hash); 1436 if (ret < 0) { 1437 free_ftrace_hash(new_hash); 1438 return ret; 1439 } 1440 } 1441 1442 /* 1443 * Remove the current set, update the hash and add 1444 * them back. 1445 */ 1446 ftrace_hash_rec_disable_modify(ops, enable); 1447 1448 rcu_assign_pointer(*dst, new_hash); 1449 1450 ftrace_hash_rec_enable_modify(ops, enable); 1451 1452 return 0; 1453 } 1454 1455 static bool hash_contains_ip(unsigned long ip, 1456 struct ftrace_ops_hash *hash) 1457 { 1458 /* 1459 * The function record is a match if it exists in the filter 1460 * hash and not in the notrace hash. Note, an emty hash is 1461 * considered a match for the filter hash, but an empty 1462 * notrace hash is considered not in the notrace hash. 1463 */ 1464 return (ftrace_hash_empty(hash->filter_hash) || 1465 __ftrace_lookup_ip(hash->filter_hash, ip)) && 1466 (ftrace_hash_empty(hash->notrace_hash) || 1467 !__ftrace_lookup_ip(hash->notrace_hash, ip)); 1468 } 1469 1470 /* 1471 * Test the hashes for this ops to see if we want to call 1472 * the ops->func or not. 1473 * 1474 * It's a match if the ip is in the ops->filter_hash or 1475 * the filter_hash does not exist or is empty, 1476 * AND 1477 * the ip is not in the ops->notrace_hash. 1478 * 1479 * This needs to be called with preemption disabled as 1480 * the hashes are freed with call_rcu(). 1481 */ 1482 int 1483 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1484 { 1485 struct ftrace_ops_hash hash; 1486 int ret; 1487 1488 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 1489 /* 1490 * There's a small race when adding ops that the ftrace handler 1491 * that wants regs, may be called without them. We can not 1492 * allow that handler to be called if regs is NULL. 1493 */ 1494 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) 1495 return 0; 1496 #endif 1497 1498 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); 1499 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); 1500 1501 if (hash_contains_ip(ip, &hash)) 1502 ret = 1; 1503 else 1504 ret = 0; 1505 1506 return ret; 1507 } 1508 1509 /* 1510 * This is a double for. Do not use 'break' to break out of the loop, 1511 * you must use a goto. 1512 */ 1513 #define do_for_each_ftrace_rec(pg, rec) \ 1514 for (pg = ftrace_pages_start; pg; pg = pg->next) { \ 1515 int _____i; \ 1516 for (_____i = 0; _____i < pg->index; _____i++) { \ 1517 rec = &pg->records[_____i]; 1518 1519 #define while_for_each_ftrace_rec() \ 1520 } \ 1521 } 1522 1523 1524 static int ftrace_cmp_recs(const void *a, const void *b) 1525 { 1526 const struct dyn_ftrace *key = a; 1527 const struct dyn_ftrace *rec = b; 1528 1529 if (key->flags < rec->ip) 1530 return -1; 1531 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) 1532 return 1; 1533 return 0; 1534 } 1535 1536 /** 1537 * ftrace_location_range - return the first address of a traced location 1538 * if it touches the given ip range 1539 * @start: start of range to search. 1540 * @end: end of range to search (inclusive). @end points to the last byte 1541 * to check. 1542 * 1543 * Returns rec->ip if the related ftrace location is a least partly within 1544 * the given address range. That is, the first address of the instruction 1545 * that is either a NOP or call to the function tracer. It checks the ftrace 1546 * internal tables to determine if the address belongs or not. 1547 */ 1548 unsigned long ftrace_location_range(unsigned long start, unsigned long end) 1549 { 1550 struct ftrace_page *pg; 1551 struct dyn_ftrace *rec; 1552 struct dyn_ftrace key; 1553 1554 key.ip = start; 1555 key.flags = end; /* overload flags, as it is unsigned long */ 1556 1557 for (pg = ftrace_pages_start; pg; pg = pg->next) { 1558 if (end < pg->records[0].ip || 1559 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 1560 continue; 1561 rec = bsearch(&key, pg->records, pg->index, 1562 sizeof(struct dyn_ftrace), 1563 ftrace_cmp_recs); 1564 if (rec) 1565 return rec->ip; 1566 } 1567 1568 return 0; 1569 } 1570 1571 /** 1572 * ftrace_location - return true if the ip giving is a traced location 1573 * @ip: the instruction pointer to check 1574 * 1575 * Returns rec->ip if @ip given is a pointer to a ftrace location. 1576 * That is, the instruction that is either a NOP or call to 1577 * the function tracer. It checks the ftrace internal tables to 1578 * determine if the address belongs or not. 1579 */ 1580 unsigned long ftrace_location(unsigned long ip) 1581 { 1582 return ftrace_location_range(ip, ip); 1583 } 1584 1585 /** 1586 * ftrace_text_reserved - return true if range contains an ftrace location 1587 * @start: start of range to search 1588 * @end: end of range to search (inclusive). @end points to the last byte to check. 1589 * 1590 * Returns 1 if @start and @end contains a ftrace location. 1591 * That is, the instruction that is either a NOP or call to 1592 * the function tracer. It checks the ftrace internal tables to 1593 * determine if the address belongs or not. 1594 */ 1595 int ftrace_text_reserved(const void *start, const void *end) 1596 { 1597 unsigned long ret; 1598 1599 ret = ftrace_location_range((unsigned long)start, 1600 (unsigned long)end); 1601 1602 return (int)!!ret; 1603 } 1604 1605 /* Test if ops registered to this rec needs regs */ 1606 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) 1607 { 1608 struct ftrace_ops *ops; 1609 bool keep_regs = false; 1610 1611 for (ops = ftrace_ops_list; 1612 ops != &ftrace_list_end; ops = ops->next) { 1613 /* pass rec in as regs to have non-NULL val */ 1614 if (ftrace_ops_test(ops, rec->ip, rec)) { 1615 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1616 keep_regs = true; 1617 break; 1618 } 1619 } 1620 } 1621 1622 return keep_regs; 1623 } 1624 1625 static struct ftrace_ops * 1626 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); 1627 static struct ftrace_ops * 1628 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); 1629 1630 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, 1631 int filter_hash, 1632 bool inc) 1633 { 1634 struct ftrace_hash *hash; 1635 struct ftrace_hash *other_hash; 1636 struct ftrace_page *pg; 1637 struct dyn_ftrace *rec; 1638 bool update = false; 1639 int count = 0; 1640 int all = false; 1641 1642 /* Only update if the ops has been registered */ 1643 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1644 return false; 1645 1646 /* 1647 * In the filter_hash case: 1648 * If the count is zero, we update all records. 1649 * Otherwise we just update the items in the hash. 1650 * 1651 * In the notrace_hash case: 1652 * We enable the update in the hash. 1653 * As disabling notrace means enabling the tracing, 1654 * and enabling notrace means disabling, the inc variable 1655 * gets inversed. 1656 */ 1657 if (filter_hash) { 1658 hash = ops->func_hash->filter_hash; 1659 other_hash = ops->func_hash->notrace_hash; 1660 if (ftrace_hash_empty(hash)) 1661 all = true; 1662 } else { 1663 inc = !inc; 1664 hash = ops->func_hash->notrace_hash; 1665 other_hash = ops->func_hash->filter_hash; 1666 /* 1667 * If the notrace hash has no items, 1668 * then there's nothing to do. 1669 */ 1670 if (ftrace_hash_empty(hash)) 1671 return false; 1672 } 1673 1674 do_for_each_ftrace_rec(pg, rec) { 1675 int in_other_hash = 0; 1676 int in_hash = 0; 1677 int match = 0; 1678 1679 if (rec->flags & FTRACE_FL_DISABLED) 1680 continue; 1681 1682 if (all) { 1683 /* 1684 * Only the filter_hash affects all records. 1685 * Update if the record is not in the notrace hash. 1686 */ 1687 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) 1688 match = 1; 1689 } else { 1690 in_hash = !!ftrace_lookup_ip(hash, rec->ip); 1691 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); 1692 1693 /* 1694 * If filter_hash is set, we want to match all functions 1695 * that are in the hash but not in the other hash. 1696 * 1697 * If filter_hash is not set, then we are decrementing. 1698 * That means we match anything that is in the hash 1699 * and also in the other_hash. That is, we need to turn 1700 * off functions in the other hash because they are disabled 1701 * by this hash. 1702 */ 1703 if (filter_hash && in_hash && !in_other_hash) 1704 match = 1; 1705 else if (!filter_hash && in_hash && 1706 (in_other_hash || ftrace_hash_empty(other_hash))) 1707 match = 1; 1708 } 1709 if (!match) 1710 continue; 1711 1712 if (inc) { 1713 rec->flags++; 1714 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) 1715 return false; 1716 1717 /* 1718 * If there's only a single callback registered to a 1719 * function, and the ops has a trampoline registered 1720 * for it, then we can call it directly. 1721 */ 1722 if (ftrace_rec_count(rec) == 1 && ops->trampoline) 1723 rec->flags |= FTRACE_FL_TRAMP; 1724 else 1725 /* 1726 * If we are adding another function callback 1727 * to this function, and the previous had a 1728 * custom trampoline in use, then we need to go 1729 * back to the default trampoline. 1730 */ 1731 rec->flags &= ~FTRACE_FL_TRAMP; 1732 1733 /* 1734 * If any ops wants regs saved for this function 1735 * then all ops will get saved regs. 1736 */ 1737 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 1738 rec->flags |= FTRACE_FL_REGS; 1739 } else { 1740 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) 1741 return false; 1742 rec->flags--; 1743 1744 /* 1745 * If the rec had REGS enabled and the ops that is 1746 * being removed had REGS set, then see if there is 1747 * still any ops for this record that wants regs. 1748 * If not, we can stop recording them. 1749 */ 1750 if (ftrace_rec_count(rec) > 0 && 1751 rec->flags & FTRACE_FL_REGS && 1752 ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1753 if (!test_rec_ops_needs_regs(rec)) 1754 rec->flags &= ~FTRACE_FL_REGS; 1755 } 1756 1757 /* 1758 * The TRAMP needs to be set only if rec count 1759 * is decremented to one, and the ops that is 1760 * left has a trampoline. As TRAMP can only be 1761 * enabled if there is only a single ops attached 1762 * to it. 1763 */ 1764 if (ftrace_rec_count(rec) == 1 && 1765 ftrace_find_tramp_ops_any(rec)) 1766 rec->flags |= FTRACE_FL_TRAMP; 1767 else 1768 rec->flags &= ~FTRACE_FL_TRAMP; 1769 1770 /* 1771 * flags will be cleared in ftrace_check_record() 1772 * if rec count is zero. 1773 */ 1774 } 1775 count++; 1776 1777 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ 1778 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE; 1779 1780 /* Shortcut, if we handled all records, we are done. */ 1781 if (!all && count == hash->count) 1782 return update; 1783 } while_for_each_ftrace_rec(); 1784 1785 return update; 1786 } 1787 1788 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops, 1789 int filter_hash) 1790 { 1791 return __ftrace_hash_rec_update(ops, filter_hash, 0); 1792 } 1793 1794 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops, 1795 int filter_hash) 1796 { 1797 return __ftrace_hash_rec_update(ops, filter_hash, 1); 1798 } 1799 1800 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, 1801 int filter_hash, int inc) 1802 { 1803 struct ftrace_ops *op; 1804 1805 __ftrace_hash_rec_update(ops, filter_hash, inc); 1806 1807 if (ops->func_hash != &global_ops.local_hash) 1808 return; 1809 1810 /* 1811 * If the ops shares the global_ops hash, then we need to update 1812 * all ops that are enabled and use this hash. 1813 */ 1814 do_for_each_ftrace_op(op, ftrace_ops_list) { 1815 /* Already done */ 1816 if (op == ops) 1817 continue; 1818 if (op->func_hash == &global_ops.local_hash) 1819 __ftrace_hash_rec_update(op, filter_hash, inc); 1820 } while_for_each_ftrace_op(op); 1821 } 1822 1823 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, 1824 int filter_hash) 1825 { 1826 ftrace_hash_rec_update_modify(ops, filter_hash, 0); 1827 } 1828 1829 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, 1830 int filter_hash) 1831 { 1832 ftrace_hash_rec_update_modify(ops, filter_hash, 1); 1833 } 1834 1835 /* 1836 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK 1837 * or no-needed to update, -EBUSY if it detects a conflict of the flag 1838 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. 1839 * Note that old_hash and new_hash has below meanings 1840 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) 1841 * - If the hash is EMPTY_HASH, it hits nothing 1842 * - Anything else hits the recs which match the hash entries. 1843 */ 1844 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, 1845 struct ftrace_hash *old_hash, 1846 struct ftrace_hash *new_hash) 1847 { 1848 struct ftrace_page *pg; 1849 struct dyn_ftrace *rec, *end = NULL; 1850 int in_old, in_new; 1851 1852 /* Only update if the ops has been registered */ 1853 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1854 return 0; 1855 1856 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 1857 return 0; 1858 1859 /* 1860 * Since the IPMODIFY is a very address sensitive action, we do not 1861 * allow ftrace_ops to set all functions to new hash. 1862 */ 1863 if (!new_hash || !old_hash) 1864 return -EINVAL; 1865 1866 /* Update rec->flags */ 1867 do_for_each_ftrace_rec(pg, rec) { 1868 1869 if (rec->flags & FTRACE_FL_DISABLED) 1870 continue; 1871 1872 /* We need to update only differences of filter_hash */ 1873 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1874 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1875 if (in_old == in_new) 1876 continue; 1877 1878 if (in_new) { 1879 /* New entries must ensure no others are using it */ 1880 if (rec->flags & FTRACE_FL_IPMODIFY) 1881 goto rollback; 1882 rec->flags |= FTRACE_FL_IPMODIFY; 1883 } else /* Removed entry */ 1884 rec->flags &= ~FTRACE_FL_IPMODIFY; 1885 } while_for_each_ftrace_rec(); 1886 1887 return 0; 1888 1889 rollback: 1890 end = rec; 1891 1892 /* Roll back what we did above */ 1893 do_for_each_ftrace_rec(pg, rec) { 1894 1895 if (rec->flags & FTRACE_FL_DISABLED) 1896 continue; 1897 1898 if (rec == end) 1899 goto err_out; 1900 1901 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1902 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1903 if (in_old == in_new) 1904 continue; 1905 1906 if (in_new) 1907 rec->flags &= ~FTRACE_FL_IPMODIFY; 1908 else 1909 rec->flags |= FTRACE_FL_IPMODIFY; 1910 } while_for_each_ftrace_rec(); 1911 1912 err_out: 1913 return -EBUSY; 1914 } 1915 1916 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) 1917 { 1918 struct ftrace_hash *hash = ops->func_hash->filter_hash; 1919 1920 if (ftrace_hash_empty(hash)) 1921 hash = NULL; 1922 1923 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); 1924 } 1925 1926 /* Disabling always succeeds */ 1927 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) 1928 { 1929 struct ftrace_hash *hash = ops->func_hash->filter_hash; 1930 1931 if (ftrace_hash_empty(hash)) 1932 hash = NULL; 1933 1934 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); 1935 } 1936 1937 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1938 struct ftrace_hash *new_hash) 1939 { 1940 struct ftrace_hash *old_hash = ops->func_hash->filter_hash; 1941 1942 if (ftrace_hash_empty(old_hash)) 1943 old_hash = NULL; 1944 1945 if (ftrace_hash_empty(new_hash)) 1946 new_hash = NULL; 1947 1948 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); 1949 } 1950 1951 static void print_ip_ins(const char *fmt, const unsigned char *p) 1952 { 1953 int i; 1954 1955 printk(KERN_CONT "%s", fmt); 1956 1957 for (i = 0; i < MCOUNT_INSN_SIZE; i++) 1958 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); 1959 } 1960 1961 enum ftrace_bug_type ftrace_bug_type; 1962 const void *ftrace_expected; 1963 1964 static void print_bug_type(void) 1965 { 1966 switch (ftrace_bug_type) { 1967 case FTRACE_BUG_UNKNOWN: 1968 break; 1969 case FTRACE_BUG_INIT: 1970 pr_info("Initializing ftrace call sites\n"); 1971 break; 1972 case FTRACE_BUG_NOP: 1973 pr_info("Setting ftrace call site to NOP\n"); 1974 break; 1975 case FTRACE_BUG_CALL: 1976 pr_info("Setting ftrace call site to call ftrace function\n"); 1977 break; 1978 case FTRACE_BUG_UPDATE: 1979 pr_info("Updating ftrace call site to call a different ftrace function\n"); 1980 break; 1981 } 1982 } 1983 1984 /** 1985 * ftrace_bug - report and shutdown function tracer 1986 * @failed: The failed type (EFAULT, EINVAL, EPERM) 1987 * @rec: The record that failed 1988 * 1989 * The arch code that enables or disables the function tracing 1990 * can call ftrace_bug() when it has detected a problem in 1991 * modifying the code. @failed should be one of either: 1992 * EFAULT - if the problem happens on reading the @ip address 1993 * EINVAL - if what is read at @ip is not what was expected 1994 * EPERM - if the problem happens on writing to the @ip address 1995 */ 1996 void ftrace_bug(int failed, struct dyn_ftrace *rec) 1997 { 1998 unsigned long ip = rec ? rec->ip : 0; 1999 2000 switch (failed) { 2001 case -EFAULT: 2002 FTRACE_WARN_ON_ONCE(1); 2003 pr_info("ftrace faulted on modifying "); 2004 print_ip_sym(ip); 2005 break; 2006 case -EINVAL: 2007 FTRACE_WARN_ON_ONCE(1); 2008 pr_info("ftrace failed to modify "); 2009 print_ip_sym(ip); 2010 print_ip_ins(" actual: ", (unsigned char *)ip); 2011 pr_cont("\n"); 2012 if (ftrace_expected) { 2013 print_ip_ins(" expected: ", ftrace_expected); 2014 pr_cont("\n"); 2015 } 2016 break; 2017 case -EPERM: 2018 FTRACE_WARN_ON_ONCE(1); 2019 pr_info("ftrace faulted on writing "); 2020 print_ip_sym(ip); 2021 break; 2022 default: 2023 FTRACE_WARN_ON_ONCE(1); 2024 pr_info("ftrace faulted on unknown error "); 2025 print_ip_sym(ip); 2026 } 2027 print_bug_type(); 2028 if (rec) { 2029 struct ftrace_ops *ops = NULL; 2030 2031 pr_info("ftrace record flags: %lx\n", rec->flags); 2032 pr_cont(" (%ld)%s", ftrace_rec_count(rec), 2033 rec->flags & FTRACE_FL_REGS ? " R" : " "); 2034 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2035 ops = ftrace_find_tramp_ops_any(rec); 2036 if (ops) { 2037 do { 2038 pr_cont("\ttramp: %pS (%pS)", 2039 (void *)ops->trampoline, 2040 (void *)ops->func); 2041 ops = ftrace_find_tramp_ops_next(rec, ops); 2042 } while (ops); 2043 } else 2044 pr_cont("\ttramp: ERROR!"); 2045 2046 } 2047 ip = ftrace_get_addr_curr(rec); 2048 pr_cont("\n expected tramp: %lx\n", ip); 2049 } 2050 } 2051 2052 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) 2053 { 2054 unsigned long flag = 0UL; 2055 2056 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2057 2058 if (rec->flags & FTRACE_FL_DISABLED) 2059 return FTRACE_UPDATE_IGNORE; 2060 2061 /* 2062 * If we are updating calls: 2063 * 2064 * If the record has a ref count, then we need to enable it 2065 * because someone is using it. 2066 * 2067 * Otherwise we make sure its disabled. 2068 * 2069 * If we are disabling calls, then disable all records that 2070 * are enabled. 2071 */ 2072 if (enable && ftrace_rec_count(rec)) 2073 flag = FTRACE_FL_ENABLED; 2074 2075 /* 2076 * If enabling and the REGS flag does not match the REGS_EN, or 2077 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore 2078 * this record. Set flags to fail the compare against ENABLED. 2079 */ 2080 if (flag) { 2081 if (!(rec->flags & FTRACE_FL_REGS) != 2082 !(rec->flags & FTRACE_FL_REGS_EN)) 2083 flag |= FTRACE_FL_REGS; 2084 2085 if (!(rec->flags & FTRACE_FL_TRAMP) != 2086 !(rec->flags & FTRACE_FL_TRAMP_EN)) 2087 flag |= FTRACE_FL_TRAMP; 2088 } 2089 2090 /* If the state of this record hasn't changed, then do nothing */ 2091 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 2092 return FTRACE_UPDATE_IGNORE; 2093 2094 if (flag) { 2095 /* Save off if rec is being enabled (for return value) */ 2096 flag ^= rec->flags & FTRACE_FL_ENABLED; 2097 2098 if (update) { 2099 rec->flags |= FTRACE_FL_ENABLED; 2100 if (flag & FTRACE_FL_REGS) { 2101 if (rec->flags & FTRACE_FL_REGS) 2102 rec->flags |= FTRACE_FL_REGS_EN; 2103 else 2104 rec->flags &= ~FTRACE_FL_REGS_EN; 2105 } 2106 if (flag & FTRACE_FL_TRAMP) { 2107 if (rec->flags & FTRACE_FL_TRAMP) 2108 rec->flags |= FTRACE_FL_TRAMP_EN; 2109 else 2110 rec->flags &= ~FTRACE_FL_TRAMP_EN; 2111 } 2112 } 2113 2114 /* 2115 * If this record is being updated from a nop, then 2116 * return UPDATE_MAKE_CALL. 2117 * Otherwise, 2118 * return UPDATE_MODIFY_CALL to tell the caller to convert 2119 * from the save regs, to a non-save regs function or 2120 * vice versa, or from a trampoline call. 2121 */ 2122 if (flag & FTRACE_FL_ENABLED) { 2123 ftrace_bug_type = FTRACE_BUG_CALL; 2124 return FTRACE_UPDATE_MAKE_CALL; 2125 } 2126 2127 ftrace_bug_type = FTRACE_BUG_UPDATE; 2128 return FTRACE_UPDATE_MODIFY_CALL; 2129 } 2130 2131 if (update) { 2132 /* If there's no more users, clear all flags */ 2133 if (!ftrace_rec_count(rec)) 2134 rec->flags = 0; 2135 else 2136 /* 2137 * Just disable the record, but keep the ops TRAMP 2138 * and REGS states. The _EN flags must be disabled though. 2139 */ 2140 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | 2141 FTRACE_FL_REGS_EN); 2142 } 2143 2144 ftrace_bug_type = FTRACE_BUG_NOP; 2145 return FTRACE_UPDATE_MAKE_NOP; 2146 } 2147 2148 /** 2149 * ftrace_update_record, set a record that now is tracing or not 2150 * @rec: the record to update 2151 * @enable: set to true if the record is tracing, false to force disable 2152 * 2153 * The records that represent all functions that can be traced need 2154 * to be updated when tracing has been enabled. 2155 */ 2156 int ftrace_update_record(struct dyn_ftrace *rec, bool enable) 2157 { 2158 return ftrace_check_record(rec, enable, true); 2159 } 2160 2161 /** 2162 * ftrace_test_record, check if the record has been enabled or not 2163 * @rec: the record to test 2164 * @enable: set to true to check if enabled, false if it is disabled 2165 * 2166 * The arch code may need to test if a record is already set to 2167 * tracing to determine how to modify the function code that it 2168 * represents. 2169 */ 2170 int ftrace_test_record(struct dyn_ftrace *rec, bool enable) 2171 { 2172 return ftrace_check_record(rec, enable, false); 2173 } 2174 2175 static struct ftrace_ops * 2176 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) 2177 { 2178 struct ftrace_ops *op; 2179 unsigned long ip = rec->ip; 2180 2181 do_for_each_ftrace_op(op, ftrace_ops_list) { 2182 2183 if (!op->trampoline) 2184 continue; 2185 2186 if (hash_contains_ip(ip, op->func_hash)) 2187 return op; 2188 } while_for_each_ftrace_op(op); 2189 2190 return NULL; 2191 } 2192 2193 static struct ftrace_ops * 2194 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, 2195 struct ftrace_ops *op) 2196 { 2197 unsigned long ip = rec->ip; 2198 2199 while_for_each_ftrace_op(op) { 2200 2201 if (!op->trampoline) 2202 continue; 2203 2204 if (hash_contains_ip(ip, op->func_hash)) 2205 return op; 2206 } 2207 2208 return NULL; 2209 } 2210 2211 static struct ftrace_ops * 2212 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) 2213 { 2214 struct ftrace_ops *op; 2215 unsigned long ip = rec->ip; 2216 2217 /* 2218 * Need to check removed ops first. 2219 * If they are being removed, and this rec has a tramp, 2220 * and this rec is in the ops list, then it would be the 2221 * one with the tramp. 2222 */ 2223 if (removed_ops) { 2224 if (hash_contains_ip(ip, &removed_ops->old_hash)) 2225 return removed_ops; 2226 } 2227 2228 /* 2229 * Need to find the current trampoline for a rec. 2230 * Now, a trampoline is only attached to a rec if there 2231 * was a single 'ops' attached to it. But this can be called 2232 * when we are adding another op to the rec or removing the 2233 * current one. Thus, if the op is being added, we can 2234 * ignore it because it hasn't attached itself to the rec 2235 * yet. 2236 * 2237 * If an ops is being modified (hooking to different functions) 2238 * then we don't care about the new functions that are being 2239 * added, just the old ones (that are probably being removed). 2240 * 2241 * If we are adding an ops to a function that already is using 2242 * a trampoline, it needs to be removed (trampolines are only 2243 * for single ops connected), then an ops that is not being 2244 * modified also needs to be checked. 2245 */ 2246 do_for_each_ftrace_op(op, ftrace_ops_list) { 2247 2248 if (!op->trampoline) 2249 continue; 2250 2251 /* 2252 * If the ops is being added, it hasn't gotten to 2253 * the point to be removed from this tree yet. 2254 */ 2255 if (op->flags & FTRACE_OPS_FL_ADDING) 2256 continue; 2257 2258 2259 /* 2260 * If the ops is being modified and is in the old 2261 * hash, then it is probably being removed from this 2262 * function. 2263 */ 2264 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && 2265 hash_contains_ip(ip, &op->old_hash)) 2266 return op; 2267 /* 2268 * If the ops is not being added or modified, and it's 2269 * in its normal filter hash, then this must be the one 2270 * we want! 2271 */ 2272 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && 2273 hash_contains_ip(ip, op->func_hash)) 2274 return op; 2275 2276 } while_for_each_ftrace_op(op); 2277 2278 return NULL; 2279 } 2280 2281 static struct ftrace_ops * 2282 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) 2283 { 2284 struct ftrace_ops *op; 2285 unsigned long ip = rec->ip; 2286 2287 do_for_each_ftrace_op(op, ftrace_ops_list) { 2288 /* pass rec in as regs to have non-NULL val */ 2289 if (hash_contains_ip(ip, op->func_hash)) 2290 return op; 2291 } while_for_each_ftrace_op(op); 2292 2293 return NULL; 2294 } 2295 2296 /** 2297 * ftrace_get_addr_new - Get the call address to set to 2298 * @rec: The ftrace record descriptor 2299 * 2300 * If the record has the FTRACE_FL_REGS set, that means that it 2301 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS 2302 * is not not set, then it wants to convert to the normal callback. 2303 * 2304 * Returns the address of the trampoline to set to 2305 */ 2306 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) 2307 { 2308 struct ftrace_ops *ops; 2309 2310 /* Trampolines take precedence over regs */ 2311 if (rec->flags & FTRACE_FL_TRAMP) { 2312 ops = ftrace_find_tramp_ops_new(rec); 2313 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { 2314 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", 2315 (void *)rec->ip, (void *)rec->ip, rec->flags); 2316 /* Ftrace is shutting down, return anything */ 2317 return (unsigned long)FTRACE_ADDR; 2318 } 2319 return ops->trampoline; 2320 } 2321 2322 if (rec->flags & FTRACE_FL_REGS) 2323 return (unsigned long)FTRACE_REGS_ADDR; 2324 else 2325 return (unsigned long)FTRACE_ADDR; 2326 } 2327 2328 /** 2329 * ftrace_get_addr_curr - Get the call address that is already there 2330 * @rec: The ftrace record descriptor 2331 * 2332 * The FTRACE_FL_REGS_EN is set when the record already points to 2333 * a function that saves all the regs. Basically the '_EN' version 2334 * represents the current state of the function. 2335 * 2336 * Returns the address of the trampoline that is currently being called 2337 */ 2338 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) 2339 { 2340 struct ftrace_ops *ops; 2341 2342 /* Trampolines take precedence over regs */ 2343 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2344 ops = ftrace_find_tramp_ops_curr(rec); 2345 if (FTRACE_WARN_ON(!ops)) { 2346 pr_warn("Bad trampoline accounting at: %p (%pS)\n", 2347 (void *)rec->ip, (void *)rec->ip); 2348 /* Ftrace is shutting down, return anything */ 2349 return (unsigned long)FTRACE_ADDR; 2350 } 2351 return ops->trampoline; 2352 } 2353 2354 if (rec->flags & FTRACE_FL_REGS_EN) 2355 return (unsigned long)FTRACE_REGS_ADDR; 2356 else 2357 return (unsigned long)FTRACE_ADDR; 2358 } 2359 2360 static int 2361 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable) 2362 { 2363 unsigned long ftrace_old_addr; 2364 unsigned long ftrace_addr; 2365 int ret; 2366 2367 ftrace_addr = ftrace_get_addr_new(rec); 2368 2369 /* This needs to be done before we call ftrace_update_record */ 2370 ftrace_old_addr = ftrace_get_addr_curr(rec); 2371 2372 ret = ftrace_update_record(rec, enable); 2373 2374 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2375 2376 switch (ret) { 2377 case FTRACE_UPDATE_IGNORE: 2378 return 0; 2379 2380 case FTRACE_UPDATE_MAKE_CALL: 2381 ftrace_bug_type = FTRACE_BUG_CALL; 2382 return ftrace_make_call(rec, ftrace_addr); 2383 2384 case FTRACE_UPDATE_MAKE_NOP: 2385 ftrace_bug_type = FTRACE_BUG_NOP; 2386 return ftrace_make_nop(NULL, rec, ftrace_old_addr); 2387 2388 case FTRACE_UPDATE_MODIFY_CALL: 2389 ftrace_bug_type = FTRACE_BUG_UPDATE; 2390 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 2391 } 2392 2393 return -1; /* unknown ftrace bug */ 2394 } 2395 2396 void __weak ftrace_replace_code(int mod_flags) 2397 { 2398 struct dyn_ftrace *rec; 2399 struct ftrace_page *pg; 2400 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; 2401 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; 2402 int failed; 2403 2404 if (unlikely(ftrace_disabled)) 2405 return; 2406 2407 do_for_each_ftrace_rec(pg, rec) { 2408 2409 if (rec->flags & FTRACE_FL_DISABLED) 2410 continue; 2411 2412 failed = __ftrace_replace_code(rec, enable); 2413 if (failed) { 2414 ftrace_bug(failed, rec); 2415 /* Stop processing */ 2416 return; 2417 } 2418 if (schedulable) 2419 cond_resched(); 2420 } while_for_each_ftrace_rec(); 2421 } 2422 2423 struct ftrace_rec_iter { 2424 struct ftrace_page *pg; 2425 int index; 2426 }; 2427 2428 /** 2429 * ftrace_rec_iter_start, start up iterating over traced functions 2430 * 2431 * Returns an iterator handle that is used to iterate over all 2432 * the records that represent address locations where functions 2433 * are traced. 2434 * 2435 * May return NULL if no records are available. 2436 */ 2437 struct ftrace_rec_iter *ftrace_rec_iter_start(void) 2438 { 2439 /* 2440 * We only use a single iterator. 2441 * Protected by the ftrace_lock mutex. 2442 */ 2443 static struct ftrace_rec_iter ftrace_rec_iter; 2444 struct ftrace_rec_iter *iter = &ftrace_rec_iter; 2445 2446 iter->pg = ftrace_pages_start; 2447 iter->index = 0; 2448 2449 /* Could have empty pages */ 2450 while (iter->pg && !iter->pg->index) 2451 iter->pg = iter->pg->next; 2452 2453 if (!iter->pg) 2454 return NULL; 2455 2456 return iter; 2457 } 2458 2459 /** 2460 * ftrace_rec_iter_next, get the next record to process. 2461 * @iter: The handle to the iterator. 2462 * 2463 * Returns the next iterator after the given iterator @iter. 2464 */ 2465 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) 2466 { 2467 iter->index++; 2468 2469 if (iter->index >= iter->pg->index) { 2470 iter->pg = iter->pg->next; 2471 iter->index = 0; 2472 2473 /* Could have empty pages */ 2474 while (iter->pg && !iter->pg->index) 2475 iter->pg = iter->pg->next; 2476 } 2477 2478 if (!iter->pg) 2479 return NULL; 2480 2481 return iter; 2482 } 2483 2484 /** 2485 * ftrace_rec_iter_record, get the record at the iterator location 2486 * @iter: The current iterator location 2487 * 2488 * Returns the record that the current @iter is at. 2489 */ 2490 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) 2491 { 2492 return &iter->pg->records[iter->index]; 2493 } 2494 2495 static int 2496 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) 2497 { 2498 int ret; 2499 2500 if (unlikely(ftrace_disabled)) 2501 return 0; 2502 2503 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); 2504 if (ret) { 2505 ftrace_bug_type = FTRACE_BUG_INIT; 2506 ftrace_bug(ret, rec); 2507 return 0; 2508 } 2509 return 1; 2510 } 2511 2512 /* 2513 * archs can override this function if they must do something 2514 * before the modifying code is performed. 2515 */ 2516 int __weak ftrace_arch_code_modify_prepare(void) 2517 { 2518 return 0; 2519 } 2520 2521 /* 2522 * archs can override this function if they must do something 2523 * after the modifying code is performed. 2524 */ 2525 int __weak ftrace_arch_code_modify_post_process(void) 2526 { 2527 return 0; 2528 } 2529 2530 void ftrace_modify_all_code(int command) 2531 { 2532 int update = command & FTRACE_UPDATE_TRACE_FUNC; 2533 int mod_flags = 0; 2534 int err = 0; 2535 2536 if (command & FTRACE_MAY_SLEEP) 2537 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; 2538 2539 /* 2540 * If the ftrace_caller calls a ftrace_ops func directly, 2541 * we need to make sure that it only traces functions it 2542 * expects to trace. When doing the switch of functions, 2543 * we need to update to the ftrace_ops_list_func first 2544 * before the transition between old and new calls are set, 2545 * as the ftrace_ops_list_func will check the ops hashes 2546 * to make sure the ops are having the right functions 2547 * traced. 2548 */ 2549 if (update) { 2550 err = ftrace_update_ftrace_func(ftrace_ops_list_func); 2551 if (FTRACE_WARN_ON(err)) 2552 return; 2553 } 2554 2555 if (command & FTRACE_UPDATE_CALLS) 2556 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL); 2557 else if (command & FTRACE_DISABLE_CALLS) 2558 ftrace_replace_code(mod_flags); 2559 2560 if (update && ftrace_trace_function != ftrace_ops_list_func) { 2561 function_trace_op = set_function_trace_op; 2562 smp_wmb(); 2563 /* If irqs are disabled, we are in stop machine */ 2564 if (!irqs_disabled()) 2565 smp_call_function(ftrace_sync_ipi, NULL, 1); 2566 err = ftrace_update_ftrace_func(ftrace_trace_function); 2567 if (FTRACE_WARN_ON(err)) 2568 return; 2569 } 2570 2571 if (command & FTRACE_START_FUNC_RET) 2572 err = ftrace_enable_ftrace_graph_caller(); 2573 else if (command & FTRACE_STOP_FUNC_RET) 2574 err = ftrace_disable_ftrace_graph_caller(); 2575 FTRACE_WARN_ON(err); 2576 } 2577 2578 static int __ftrace_modify_code(void *data) 2579 { 2580 int *command = data; 2581 2582 ftrace_modify_all_code(*command); 2583 2584 return 0; 2585 } 2586 2587 /** 2588 * ftrace_run_stop_machine, go back to the stop machine method 2589 * @command: The command to tell ftrace what to do 2590 * 2591 * If an arch needs to fall back to the stop machine method, the 2592 * it can call this function. 2593 */ 2594 void ftrace_run_stop_machine(int command) 2595 { 2596 stop_machine(__ftrace_modify_code, &command, NULL); 2597 } 2598 2599 /** 2600 * arch_ftrace_update_code, modify the code to trace or not trace 2601 * @command: The command that needs to be done 2602 * 2603 * Archs can override this function if it does not need to 2604 * run stop_machine() to modify code. 2605 */ 2606 void __weak arch_ftrace_update_code(int command) 2607 { 2608 ftrace_run_stop_machine(command); 2609 } 2610 2611 static void ftrace_run_update_code(int command) 2612 { 2613 int ret; 2614 2615 ret = ftrace_arch_code_modify_prepare(); 2616 FTRACE_WARN_ON(ret); 2617 if (ret) 2618 return; 2619 2620 /* 2621 * By default we use stop_machine() to modify the code. 2622 * But archs can do what ever they want as long as it 2623 * is safe. The stop_machine() is the safest, but also 2624 * produces the most overhead. 2625 */ 2626 arch_ftrace_update_code(command); 2627 2628 ret = ftrace_arch_code_modify_post_process(); 2629 FTRACE_WARN_ON(ret); 2630 } 2631 2632 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, 2633 struct ftrace_ops_hash *old_hash) 2634 { 2635 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2636 ops->old_hash.filter_hash = old_hash->filter_hash; 2637 ops->old_hash.notrace_hash = old_hash->notrace_hash; 2638 ftrace_run_update_code(command); 2639 ops->old_hash.filter_hash = NULL; 2640 ops->old_hash.notrace_hash = NULL; 2641 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2642 } 2643 2644 static ftrace_func_t saved_ftrace_func; 2645 static int ftrace_start_up; 2646 2647 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) 2648 { 2649 } 2650 2651 static void ftrace_startup_enable(int command) 2652 { 2653 if (saved_ftrace_func != ftrace_trace_function) { 2654 saved_ftrace_func = ftrace_trace_function; 2655 command |= FTRACE_UPDATE_TRACE_FUNC; 2656 } 2657 2658 if (!command || !ftrace_enabled) 2659 return; 2660 2661 ftrace_run_update_code(command); 2662 } 2663 2664 static void ftrace_startup_all(int command) 2665 { 2666 update_all_ops = true; 2667 ftrace_startup_enable(command); 2668 update_all_ops = false; 2669 } 2670 2671 int ftrace_startup(struct ftrace_ops *ops, int command) 2672 { 2673 int ret; 2674 2675 if (unlikely(ftrace_disabled)) 2676 return -ENODEV; 2677 2678 ret = __register_ftrace_function(ops); 2679 if (ret) 2680 return ret; 2681 2682 ftrace_start_up++; 2683 2684 /* 2685 * Note that ftrace probes uses this to start up 2686 * and modify functions it will probe. But we still 2687 * set the ADDING flag for modification, as probes 2688 * do not have trampolines. If they add them in the 2689 * future, then the probes will need to distinguish 2690 * between adding and updating probes. 2691 */ 2692 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; 2693 2694 ret = ftrace_hash_ipmodify_enable(ops); 2695 if (ret < 0) { 2696 /* Rollback registration process */ 2697 __unregister_ftrace_function(ops); 2698 ftrace_start_up--; 2699 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2700 return ret; 2701 } 2702 2703 if (ftrace_hash_rec_enable(ops, 1)) 2704 command |= FTRACE_UPDATE_CALLS; 2705 2706 ftrace_startup_enable(command); 2707 2708 ops->flags &= ~FTRACE_OPS_FL_ADDING; 2709 2710 return 0; 2711 } 2712 2713 int ftrace_shutdown(struct ftrace_ops *ops, int command) 2714 { 2715 int ret; 2716 2717 if (unlikely(ftrace_disabled)) 2718 return -ENODEV; 2719 2720 ret = __unregister_ftrace_function(ops); 2721 if (ret) 2722 return ret; 2723 2724 ftrace_start_up--; 2725 /* 2726 * Just warn in case of unbalance, no need to kill ftrace, it's not 2727 * critical but the ftrace_call callers may be never nopped again after 2728 * further ftrace uses. 2729 */ 2730 WARN_ON_ONCE(ftrace_start_up < 0); 2731 2732 /* Disabling ipmodify never fails */ 2733 ftrace_hash_ipmodify_disable(ops); 2734 2735 if (ftrace_hash_rec_disable(ops, 1)) 2736 command |= FTRACE_UPDATE_CALLS; 2737 2738 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2739 2740 if (saved_ftrace_func != ftrace_trace_function) { 2741 saved_ftrace_func = ftrace_trace_function; 2742 command |= FTRACE_UPDATE_TRACE_FUNC; 2743 } 2744 2745 if (!command || !ftrace_enabled) { 2746 /* 2747 * If these are dynamic or per_cpu ops, they still 2748 * need their data freed. Since, function tracing is 2749 * not currently active, we can just free them 2750 * without synchronizing all CPUs. 2751 */ 2752 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 2753 goto free_ops; 2754 2755 return 0; 2756 } 2757 2758 /* 2759 * If the ops uses a trampoline, then it needs to be 2760 * tested first on update. 2761 */ 2762 ops->flags |= FTRACE_OPS_FL_REMOVING; 2763 removed_ops = ops; 2764 2765 /* The trampoline logic checks the old hashes */ 2766 ops->old_hash.filter_hash = ops->func_hash->filter_hash; 2767 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; 2768 2769 ftrace_run_update_code(command); 2770 2771 /* 2772 * If there's no more ops registered with ftrace, run a 2773 * sanity check to make sure all rec flags are cleared. 2774 */ 2775 if (rcu_dereference_protected(ftrace_ops_list, 2776 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 2777 struct ftrace_page *pg; 2778 struct dyn_ftrace *rec; 2779 2780 do_for_each_ftrace_rec(pg, rec) { 2781 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) 2782 pr_warn(" %pS flags:%lx\n", 2783 (void *)rec->ip, rec->flags); 2784 } while_for_each_ftrace_rec(); 2785 } 2786 2787 ops->old_hash.filter_hash = NULL; 2788 ops->old_hash.notrace_hash = NULL; 2789 2790 removed_ops = NULL; 2791 ops->flags &= ~FTRACE_OPS_FL_REMOVING; 2792 2793 /* 2794 * Dynamic ops may be freed, we must make sure that all 2795 * callers are done before leaving this function. 2796 * The same goes for freeing the per_cpu data of the per_cpu 2797 * ops. 2798 */ 2799 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { 2800 /* 2801 * We need to do a hard force of sched synchronization. 2802 * This is because we use preempt_disable() to do RCU, but 2803 * the function tracers can be called where RCU is not watching 2804 * (like before user_exit()). We can not rely on the RCU 2805 * infrastructure to do the synchronization, thus we must do it 2806 * ourselves. 2807 */ 2808 schedule_on_each_cpu(ftrace_sync); 2809 2810 /* 2811 * When the kernel is preeptive, tasks can be preempted 2812 * while on a ftrace trampoline. Just scheduling a task on 2813 * a CPU is not good enough to flush them. Calling 2814 * synchornize_rcu_tasks() will wait for those tasks to 2815 * execute and either schedule voluntarily or enter user space. 2816 */ 2817 if (IS_ENABLED(CONFIG_PREEMPTION)) 2818 synchronize_rcu_tasks(); 2819 2820 free_ops: 2821 arch_ftrace_trampoline_free(ops); 2822 } 2823 2824 return 0; 2825 } 2826 2827 static void ftrace_startup_sysctl(void) 2828 { 2829 int command; 2830 2831 if (unlikely(ftrace_disabled)) 2832 return; 2833 2834 /* Force update next time */ 2835 saved_ftrace_func = NULL; 2836 /* ftrace_start_up is true if we want ftrace running */ 2837 if (ftrace_start_up) { 2838 command = FTRACE_UPDATE_CALLS; 2839 if (ftrace_graph_active) 2840 command |= FTRACE_START_FUNC_RET; 2841 ftrace_startup_enable(command); 2842 } 2843 } 2844 2845 static void ftrace_shutdown_sysctl(void) 2846 { 2847 int command; 2848 2849 if (unlikely(ftrace_disabled)) 2850 return; 2851 2852 /* ftrace_start_up is true if ftrace is running */ 2853 if (ftrace_start_up) { 2854 command = FTRACE_DISABLE_CALLS; 2855 if (ftrace_graph_active) 2856 command |= FTRACE_STOP_FUNC_RET; 2857 ftrace_run_update_code(command); 2858 } 2859 } 2860 2861 static u64 ftrace_update_time; 2862 unsigned long ftrace_update_tot_cnt; 2863 2864 static inline int ops_traces_mod(struct ftrace_ops *ops) 2865 { 2866 /* 2867 * Filter_hash being empty will default to trace module. 2868 * But notrace hash requires a test of individual module functions. 2869 */ 2870 return ftrace_hash_empty(ops->func_hash->filter_hash) && 2871 ftrace_hash_empty(ops->func_hash->notrace_hash); 2872 } 2873 2874 /* 2875 * Check if the current ops references the record. 2876 * 2877 * If the ops traces all functions, then it was already accounted for. 2878 * If the ops does not trace the current record function, skip it. 2879 * If the ops ignores the function via notrace filter, skip it. 2880 */ 2881 static inline bool 2882 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) 2883 { 2884 /* If ops isn't enabled, ignore it */ 2885 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 2886 return false; 2887 2888 /* If ops traces all then it includes this function */ 2889 if (ops_traces_mod(ops)) 2890 return true; 2891 2892 /* The function must be in the filter */ 2893 if (!ftrace_hash_empty(ops->func_hash->filter_hash) && 2894 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) 2895 return false; 2896 2897 /* If in notrace hash, we ignore it too */ 2898 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) 2899 return false; 2900 2901 return true; 2902 } 2903 2904 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) 2905 { 2906 struct ftrace_page *pg; 2907 struct dyn_ftrace *p; 2908 u64 start, stop; 2909 unsigned long update_cnt = 0; 2910 unsigned long rec_flags = 0; 2911 int i; 2912 2913 start = ftrace_now(raw_smp_processor_id()); 2914 2915 /* 2916 * When a module is loaded, this function is called to convert 2917 * the calls to mcount in its text to nops, and also to create 2918 * an entry in the ftrace data. Now, if ftrace is activated 2919 * after this call, but before the module sets its text to 2920 * read-only, the modification of enabling ftrace can fail if 2921 * the read-only is done while ftrace is converting the calls. 2922 * To prevent this, the module's records are set as disabled 2923 * and will be enabled after the call to set the module's text 2924 * to read-only. 2925 */ 2926 if (mod) 2927 rec_flags |= FTRACE_FL_DISABLED; 2928 2929 for (pg = new_pgs; pg; pg = pg->next) { 2930 2931 for (i = 0; i < pg->index; i++) { 2932 2933 /* If something went wrong, bail without enabling anything */ 2934 if (unlikely(ftrace_disabled)) 2935 return -1; 2936 2937 p = &pg->records[i]; 2938 p->flags = rec_flags; 2939 2940 /* 2941 * Do the initial record conversion from mcount jump 2942 * to the NOP instructions. 2943 */ 2944 if (!__is_defined(CC_USING_NOP_MCOUNT) && 2945 !ftrace_code_disable(mod, p)) 2946 break; 2947 2948 update_cnt++; 2949 } 2950 } 2951 2952 stop = ftrace_now(raw_smp_processor_id()); 2953 ftrace_update_time = stop - start; 2954 ftrace_update_tot_cnt += update_cnt; 2955 2956 return 0; 2957 } 2958 2959 static int ftrace_allocate_records(struct ftrace_page *pg, int count) 2960 { 2961 int order; 2962 int cnt; 2963 2964 if (WARN_ON(!count)) 2965 return -EINVAL; 2966 2967 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); 2968 2969 /* 2970 * We want to fill as much as possible. No more than a page 2971 * may be empty. 2972 */ 2973 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) 2974 order--; 2975 2976 again: 2977 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 2978 2979 if (!pg->records) { 2980 /* if we can't allocate this size, try something smaller */ 2981 if (!order) 2982 return -ENOMEM; 2983 order >>= 1; 2984 goto again; 2985 } 2986 2987 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; 2988 pg->size = cnt; 2989 2990 if (cnt > count) 2991 cnt = count; 2992 2993 return cnt; 2994 } 2995 2996 static struct ftrace_page * 2997 ftrace_allocate_pages(unsigned long num_to_init) 2998 { 2999 struct ftrace_page *start_pg; 3000 struct ftrace_page *pg; 3001 int order; 3002 int cnt; 3003 3004 if (!num_to_init) 3005 return NULL; 3006 3007 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); 3008 if (!pg) 3009 return NULL; 3010 3011 /* 3012 * Try to allocate as much as possible in one continues 3013 * location that fills in all of the space. We want to 3014 * waste as little space as possible. 3015 */ 3016 for (;;) { 3017 cnt = ftrace_allocate_records(pg, num_to_init); 3018 if (cnt < 0) 3019 goto free_pages; 3020 3021 num_to_init -= cnt; 3022 if (!num_to_init) 3023 break; 3024 3025 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); 3026 if (!pg->next) 3027 goto free_pages; 3028 3029 pg = pg->next; 3030 } 3031 3032 return start_pg; 3033 3034 free_pages: 3035 pg = start_pg; 3036 while (pg) { 3037 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 3038 free_pages((unsigned long)pg->records, order); 3039 start_pg = pg->next; 3040 kfree(pg); 3041 pg = start_pg; 3042 } 3043 pr_info("ftrace: FAILED to allocate memory for functions\n"); 3044 return NULL; 3045 } 3046 3047 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 3048 3049 struct ftrace_iterator { 3050 loff_t pos; 3051 loff_t func_pos; 3052 loff_t mod_pos; 3053 struct ftrace_page *pg; 3054 struct dyn_ftrace *func; 3055 struct ftrace_func_probe *probe; 3056 struct ftrace_func_entry *probe_entry; 3057 struct trace_parser parser; 3058 struct ftrace_hash *hash; 3059 struct ftrace_ops *ops; 3060 struct trace_array *tr; 3061 struct list_head *mod_list; 3062 int pidx; 3063 int idx; 3064 unsigned flags; 3065 }; 3066 3067 static void * 3068 t_probe_next(struct seq_file *m, loff_t *pos) 3069 { 3070 struct ftrace_iterator *iter = m->private; 3071 struct trace_array *tr = iter->ops->private; 3072 struct list_head *func_probes; 3073 struct ftrace_hash *hash; 3074 struct list_head *next; 3075 struct hlist_node *hnd = NULL; 3076 struct hlist_head *hhd; 3077 int size; 3078 3079 (*pos)++; 3080 iter->pos = *pos; 3081 3082 if (!tr) 3083 return NULL; 3084 3085 func_probes = &tr->func_probes; 3086 if (list_empty(func_probes)) 3087 return NULL; 3088 3089 if (!iter->probe) { 3090 next = func_probes->next; 3091 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3092 } 3093 3094 if (iter->probe_entry) 3095 hnd = &iter->probe_entry->hlist; 3096 3097 hash = iter->probe->ops.func_hash->filter_hash; 3098 3099 /* 3100 * A probe being registered may temporarily have an empty hash 3101 * and it's at the end of the func_probes list. 3102 */ 3103 if (!hash || hash == EMPTY_HASH) 3104 return NULL; 3105 3106 size = 1 << hash->size_bits; 3107 3108 retry: 3109 if (iter->pidx >= size) { 3110 if (iter->probe->list.next == func_probes) 3111 return NULL; 3112 next = iter->probe->list.next; 3113 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3114 hash = iter->probe->ops.func_hash->filter_hash; 3115 size = 1 << hash->size_bits; 3116 iter->pidx = 0; 3117 } 3118 3119 hhd = &hash->buckets[iter->pidx]; 3120 3121 if (hlist_empty(hhd)) { 3122 iter->pidx++; 3123 hnd = NULL; 3124 goto retry; 3125 } 3126 3127 if (!hnd) 3128 hnd = hhd->first; 3129 else { 3130 hnd = hnd->next; 3131 if (!hnd) { 3132 iter->pidx++; 3133 goto retry; 3134 } 3135 } 3136 3137 if (WARN_ON_ONCE(!hnd)) 3138 return NULL; 3139 3140 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); 3141 3142 return iter; 3143 } 3144 3145 static void *t_probe_start(struct seq_file *m, loff_t *pos) 3146 { 3147 struct ftrace_iterator *iter = m->private; 3148 void *p = NULL; 3149 loff_t l; 3150 3151 if (!(iter->flags & FTRACE_ITER_DO_PROBES)) 3152 return NULL; 3153 3154 if (iter->mod_pos > *pos) 3155 return NULL; 3156 3157 iter->probe = NULL; 3158 iter->probe_entry = NULL; 3159 iter->pidx = 0; 3160 for (l = 0; l <= (*pos - iter->mod_pos); ) { 3161 p = t_probe_next(m, &l); 3162 if (!p) 3163 break; 3164 } 3165 if (!p) 3166 return NULL; 3167 3168 /* Only set this if we have an item */ 3169 iter->flags |= FTRACE_ITER_PROBE; 3170 3171 return iter; 3172 } 3173 3174 static int 3175 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) 3176 { 3177 struct ftrace_func_entry *probe_entry; 3178 struct ftrace_probe_ops *probe_ops; 3179 struct ftrace_func_probe *probe; 3180 3181 probe = iter->probe; 3182 probe_entry = iter->probe_entry; 3183 3184 if (WARN_ON_ONCE(!probe || !probe_entry)) 3185 return -EIO; 3186 3187 probe_ops = probe->probe_ops; 3188 3189 if (probe_ops->print) 3190 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); 3191 3192 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, 3193 (void *)probe_ops->func); 3194 3195 return 0; 3196 } 3197 3198 static void * 3199 t_mod_next(struct seq_file *m, loff_t *pos) 3200 { 3201 struct ftrace_iterator *iter = m->private; 3202 struct trace_array *tr = iter->tr; 3203 3204 (*pos)++; 3205 iter->pos = *pos; 3206 3207 iter->mod_list = iter->mod_list->next; 3208 3209 if (iter->mod_list == &tr->mod_trace || 3210 iter->mod_list == &tr->mod_notrace) { 3211 iter->flags &= ~FTRACE_ITER_MOD; 3212 return NULL; 3213 } 3214 3215 iter->mod_pos = *pos; 3216 3217 return iter; 3218 } 3219 3220 static void *t_mod_start(struct seq_file *m, loff_t *pos) 3221 { 3222 struct ftrace_iterator *iter = m->private; 3223 void *p = NULL; 3224 loff_t l; 3225 3226 if (iter->func_pos > *pos) 3227 return NULL; 3228 3229 iter->mod_pos = iter->func_pos; 3230 3231 /* probes are only available if tr is set */ 3232 if (!iter->tr) 3233 return NULL; 3234 3235 for (l = 0; l <= (*pos - iter->func_pos); ) { 3236 p = t_mod_next(m, &l); 3237 if (!p) 3238 break; 3239 } 3240 if (!p) { 3241 iter->flags &= ~FTRACE_ITER_MOD; 3242 return t_probe_start(m, pos); 3243 } 3244 3245 /* Only set this if we have an item */ 3246 iter->flags |= FTRACE_ITER_MOD; 3247 3248 return iter; 3249 } 3250 3251 static int 3252 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) 3253 { 3254 struct ftrace_mod_load *ftrace_mod; 3255 struct trace_array *tr = iter->tr; 3256 3257 if (WARN_ON_ONCE(!iter->mod_list) || 3258 iter->mod_list == &tr->mod_trace || 3259 iter->mod_list == &tr->mod_notrace) 3260 return -EIO; 3261 3262 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); 3263 3264 if (ftrace_mod->func) 3265 seq_printf(m, "%s", ftrace_mod->func); 3266 else 3267 seq_putc(m, '*'); 3268 3269 seq_printf(m, ":mod:%s\n", ftrace_mod->module); 3270 3271 return 0; 3272 } 3273 3274 static void * 3275 t_func_next(struct seq_file *m, loff_t *pos) 3276 { 3277 struct ftrace_iterator *iter = m->private; 3278 struct dyn_ftrace *rec = NULL; 3279 3280 (*pos)++; 3281 3282 retry: 3283 if (iter->idx >= iter->pg->index) { 3284 if (iter->pg->next) { 3285 iter->pg = iter->pg->next; 3286 iter->idx = 0; 3287 goto retry; 3288 } 3289 } else { 3290 rec = &iter->pg->records[iter->idx++]; 3291 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3292 !ftrace_lookup_ip(iter->hash, rec->ip)) || 3293 3294 ((iter->flags & FTRACE_ITER_ENABLED) && 3295 !(rec->flags & FTRACE_FL_ENABLED))) { 3296 3297 rec = NULL; 3298 goto retry; 3299 } 3300 } 3301 3302 if (!rec) 3303 return NULL; 3304 3305 iter->pos = iter->func_pos = *pos; 3306 iter->func = rec; 3307 3308 return iter; 3309 } 3310 3311 static void * 3312 t_next(struct seq_file *m, void *v, loff_t *pos) 3313 { 3314 struct ftrace_iterator *iter = m->private; 3315 loff_t l = *pos; /* t_probe_start() must use original pos */ 3316 void *ret; 3317 3318 if (unlikely(ftrace_disabled)) 3319 return NULL; 3320 3321 if (iter->flags & FTRACE_ITER_PROBE) 3322 return t_probe_next(m, pos); 3323 3324 if (iter->flags & FTRACE_ITER_MOD) 3325 return t_mod_next(m, pos); 3326 3327 if (iter->flags & FTRACE_ITER_PRINTALL) { 3328 /* next must increment pos, and t_probe_start does not */ 3329 (*pos)++; 3330 return t_mod_start(m, &l); 3331 } 3332 3333 ret = t_func_next(m, pos); 3334 3335 if (!ret) 3336 return t_mod_start(m, &l); 3337 3338 return ret; 3339 } 3340 3341 static void reset_iter_read(struct ftrace_iterator *iter) 3342 { 3343 iter->pos = 0; 3344 iter->func_pos = 0; 3345 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); 3346 } 3347 3348 static void *t_start(struct seq_file *m, loff_t *pos) 3349 { 3350 struct ftrace_iterator *iter = m->private; 3351 void *p = NULL; 3352 loff_t l; 3353 3354 mutex_lock(&ftrace_lock); 3355 3356 if (unlikely(ftrace_disabled)) 3357 return NULL; 3358 3359 /* 3360 * If an lseek was done, then reset and start from beginning. 3361 */ 3362 if (*pos < iter->pos) 3363 reset_iter_read(iter); 3364 3365 /* 3366 * For set_ftrace_filter reading, if we have the filter 3367 * off, we can short cut and just print out that all 3368 * functions are enabled. 3369 */ 3370 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3371 ftrace_hash_empty(iter->hash)) { 3372 iter->func_pos = 1; /* Account for the message */ 3373 if (*pos > 0) 3374 return t_mod_start(m, pos); 3375 iter->flags |= FTRACE_ITER_PRINTALL; 3376 /* reset in case of seek/pread */ 3377 iter->flags &= ~FTRACE_ITER_PROBE; 3378 return iter; 3379 } 3380 3381 if (iter->flags & FTRACE_ITER_MOD) 3382 return t_mod_start(m, pos); 3383 3384 /* 3385 * Unfortunately, we need to restart at ftrace_pages_start 3386 * every time we let go of the ftrace_mutex. This is because 3387 * those pointers can change without the lock. 3388 */ 3389 iter->pg = ftrace_pages_start; 3390 iter->idx = 0; 3391 for (l = 0; l <= *pos; ) { 3392 p = t_func_next(m, &l); 3393 if (!p) 3394 break; 3395 } 3396 3397 if (!p) 3398 return t_mod_start(m, pos); 3399 3400 return iter; 3401 } 3402 3403 static void t_stop(struct seq_file *m, void *p) 3404 { 3405 mutex_unlock(&ftrace_lock); 3406 } 3407 3408 void * __weak 3409 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 3410 { 3411 return NULL; 3412 } 3413 3414 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, 3415 struct dyn_ftrace *rec) 3416 { 3417 void *ptr; 3418 3419 ptr = arch_ftrace_trampoline_func(ops, rec); 3420 if (ptr) 3421 seq_printf(m, " ->%pS", ptr); 3422 } 3423 3424 static int t_show(struct seq_file *m, void *v) 3425 { 3426 struct ftrace_iterator *iter = m->private; 3427 struct dyn_ftrace *rec; 3428 3429 if (iter->flags & FTRACE_ITER_PROBE) 3430 return t_probe_show(m, iter); 3431 3432 if (iter->flags & FTRACE_ITER_MOD) 3433 return t_mod_show(m, iter); 3434 3435 if (iter->flags & FTRACE_ITER_PRINTALL) { 3436 if (iter->flags & FTRACE_ITER_NOTRACE) 3437 seq_puts(m, "#### no functions disabled ####\n"); 3438 else 3439 seq_puts(m, "#### all functions enabled ####\n"); 3440 return 0; 3441 } 3442 3443 rec = iter->func; 3444 3445 if (!rec) 3446 return 0; 3447 3448 seq_printf(m, "%ps", (void *)rec->ip); 3449 if (iter->flags & FTRACE_ITER_ENABLED) { 3450 struct ftrace_ops *ops; 3451 3452 seq_printf(m, " (%ld)%s%s", 3453 ftrace_rec_count(rec), 3454 rec->flags & FTRACE_FL_REGS ? " R" : " ", 3455 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); 3456 if (rec->flags & FTRACE_FL_TRAMP_EN) { 3457 ops = ftrace_find_tramp_ops_any(rec); 3458 if (ops) { 3459 do { 3460 seq_printf(m, "\ttramp: %pS (%pS)", 3461 (void *)ops->trampoline, 3462 (void *)ops->func); 3463 add_trampoline_func(m, ops, rec); 3464 ops = ftrace_find_tramp_ops_next(rec, ops); 3465 } while (ops); 3466 } else 3467 seq_puts(m, "\ttramp: ERROR!"); 3468 } else { 3469 add_trampoline_func(m, NULL, rec); 3470 } 3471 } 3472 3473 seq_putc(m, '\n'); 3474 3475 return 0; 3476 } 3477 3478 static const struct seq_operations show_ftrace_seq_ops = { 3479 .start = t_start, 3480 .next = t_next, 3481 .stop = t_stop, 3482 .show = t_show, 3483 }; 3484 3485 static int 3486 ftrace_avail_open(struct inode *inode, struct file *file) 3487 { 3488 struct ftrace_iterator *iter; 3489 3490 if (unlikely(ftrace_disabled)) 3491 return -ENODEV; 3492 3493 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3494 if (!iter) 3495 return -ENOMEM; 3496 3497 iter->pg = ftrace_pages_start; 3498 iter->ops = &global_ops; 3499 3500 return 0; 3501 } 3502 3503 static int 3504 ftrace_enabled_open(struct inode *inode, struct file *file) 3505 { 3506 struct ftrace_iterator *iter; 3507 3508 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3509 if (!iter) 3510 return -ENOMEM; 3511 3512 iter->pg = ftrace_pages_start; 3513 iter->flags = FTRACE_ITER_ENABLED; 3514 iter->ops = &global_ops; 3515 3516 return 0; 3517 } 3518 3519 /** 3520 * ftrace_regex_open - initialize function tracer filter files 3521 * @ops: The ftrace_ops that hold the hash filters 3522 * @flag: The type of filter to process 3523 * @inode: The inode, usually passed in to your open routine 3524 * @file: The file, usually passed in to your open routine 3525 * 3526 * ftrace_regex_open() initializes the filter files for the 3527 * @ops. Depending on @flag it may process the filter hash or 3528 * the notrace hash of @ops. With this called from the open 3529 * routine, you can use ftrace_filter_write() for the write 3530 * routine if @flag has FTRACE_ITER_FILTER set, or 3531 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 3532 * tracing_lseek() should be used as the lseek routine, and 3533 * release must call ftrace_regex_release(). 3534 */ 3535 int 3536 ftrace_regex_open(struct ftrace_ops *ops, int flag, 3537 struct inode *inode, struct file *file) 3538 { 3539 struct ftrace_iterator *iter; 3540 struct ftrace_hash *hash; 3541 struct list_head *mod_head; 3542 struct trace_array *tr = ops->private; 3543 int ret = 0; 3544 3545 ftrace_ops_init(ops); 3546 3547 if (unlikely(ftrace_disabled)) 3548 return -ENODEV; 3549 3550 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3551 if (!iter) 3552 return -ENOMEM; 3553 3554 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { 3555 kfree(iter); 3556 return -ENOMEM; 3557 } 3558 3559 iter->ops = ops; 3560 iter->flags = flag; 3561 iter->tr = tr; 3562 3563 mutex_lock(&ops->func_hash->regex_lock); 3564 3565 if (flag & FTRACE_ITER_NOTRACE) { 3566 hash = ops->func_hash->notrace_hash; 3567 mod_head = tr ? &tr->mod_notrace : NULL; 3568 } else { 3569 hash = ops->func_hash->filter_hash; 3570 mod_head = tr ? &tr->mod_trace : NULL; 3571 } 3572 3573 iter->mod_list = mod_head; 3574 3575 if (file->f_mode & FMODE_WRITE) { 3576 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 3577 3578 if (file->f_flags & O_TRUNC) { 3579 iter->hash = alloc_ftrace_hash(size_bits); 3580 clear_ftrace_mod_list(mod_head); 3581 } else { 3582 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); 3583 } 3584 3585 if (!iter->hash) { 3586 trace_parser_put(&iter->parser); 3587 kfree(iter); 3588 ret = -ENOMEM; 3589 goto out_unlock; 3590 } 3591 } else 3592 iter->hash = hash; 3593 3594 if (file->f_mode & FMODE_READ) { 3595 iter->pg = ftrace_pages_start; 3596 3597 ret = seq_open(file, &show_ftrace_seq_ops); 3598 if (!ret) { 3599 struct seq_file *m = file->private_data; 3600 m->private = iter; 3601 } else { 3602 /* Failed */ 3603 free_ftrace_hash(iter->hash); 3604 trace_parser_put(&iter->parser); 3605 kfree(iter); 3606 } 3607 } else 3608 file->private_data = iter; 3609 3610 out_unlock: 3611 mutex_unlock(&ops->func_hash->regex_lock); 3612 3613 return ret; 3614 } 3615 3616 static int 3617 ftrace_filter_open(struct inode *inode, struct file *file) 3618 { 3619 struct ftrace_ops *ops = inode->i_private; 3620 3621 return ftrace_regex_open(ops, 3622 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, 3623 inode, file); 3624 } 3625 3626 static int 3627 ftrace_notrace_open(struct inode *inode, struct file *file) 3628 { 3629 struct ftrace_ops *ops = inode->i_private; 3630 3631 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, 3632 inode, file); 3633 } 3634 3635 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ 3636 struct ftrace_glob { 3637 char *search; 3638 unsigned len; 3639 int type; 3640 }; 3641 3642 /* 3643 * If symbols in an architecture don't correspond exactly to the user-visible 3644 * name of what they represent, it is possible to define this function to 3645 * perform the necessary adjustments. 3646 */ 3647 char * __weak arch_ftrace_match_adjust(char *str, const char *search) 3648 { 3649 return str; 3650 } 3651 3652 static int ftrace_match(char *str, struct ftrace_glob *g) 3653 { 3654 int matched = 0; 3655 int slen; 3656 3657 str = arch_ftrace_match_adjust(str, g->search); 3658 3659 switch (g->type) { 3660 case MATCH_FULL: 3661 if (strcmp(str, g->search) == 0) 3662 matched = 1; 3663 break; 3664 case MATCH_FRONT_ONLY: 3665 if (strncmp(str, g->search, g->len) == 0) 3666 matched = 1; 3667 break; 3668 case MATCH_MIDDLE_ONLY: 3669 if (strstr(str, g->search)) 3670 matched = 1; 3671 break; 3672 case MATCH_END_ONLY: 3673 slen = strlen(str); 3674 if (slen >= g->len && 3675 memcmp(str + slen - g->len, g->search, g->len) == 0) 3676 matched = 1; 3677 break; 3678 case MATCH_GLOB: 3679 if (glob_match(g->search, str)) 3680 matched = 1; 3681 break; 3682 } 3683 3684 return matched; 3685 } 3686 3687 static int 3688 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) 3689 { 3690 struct ftrace_func_entry *entry; 3691 int ret = 0; 3692 3693 entry = ftrace_lookup_ip(hash, rec->ip); 3694 if (clear_filter) { 3695 /* Do nothing if it doesn't exist */ 3696 if (!entry) 3697 return 0; 3698 3699 free_hash_entry(hash, entry); 3700 } else { 3701 /* Do nothing if it exists */ 3702 if (entry) 3703 return 0; 3704 3705 ret = add_hash_entry(hash, rec->ip); 3706 } 3707 return ret; 3708 } 3709 3710 static int 3711 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, 3712 int clear_filter) 3713 { 3714 long index = simple_strtoul(func_g->search, NULL, 0); 3715 struct ftrace_page *pg; 3716 struct dyn_ftrace *rec; 3717 3718 /* The index starts at 1 */ 3719 if (--index < 0) 3720 return 0; 3721 3722 do_for_each_ftrace_rec(pg, rec) { 3723 if (pg->index <= index) { 3724 index -= pg->index; 3725 /* this is a double loop, break goes to the next page */ 3726 break; 3727 } 3728 rec = &pg->records[index]; 3729 enter_record(hash, rec, clear_filter); 3730 return 1; 3731 } while_for_each_ftrace_rec(); 3732 return 0; 3733 } 3734 3735 static int 3736 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, 3737 struct ftrace_glob *mod_g, int exclude_mod) 3738 { 3739 char str[KSYM_SYMBOL_LEN]; 3740 char *modname; 3741 3742 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); 3743 3744 if (mod_g) { 3745 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0; 3746 3747 /* blank module name to match all modules */ 3748 if (!mod_g->len) { 3749 /* blank module globbing: modname xor exclude_mod */ 3750 if (!exclude_mod != !modname) 3751 goto func_match; 3752 return 0; 3753 } 3754 3755 /* 3756 * exclude_mod is set to trace everything but the given 3757 * module. If it is set and the module matches, then 3758 * return 0. If it is not set, and the module doesn't match 3759 * also return 0. Otherwise, check the function to see if 3760 * that matches. 3761 */ 3762 if (!mod_matches == !exclude_mod) 3763 return 0; 3764 func_match: 3765 /* blank search means to match all funcs in the mod */ 3766 if (!func_g->len) 3767 return 1; 3768 } 3769 3770 return ftrace_match(str, func_g); 3771 } 3772 3773 static int 3774 match_records(struct ftrace_hash *hash, char *func, int len, char *mod) 3775 { 3776 struct ftrace_page *pg; 3777 struct dyn_ftrace *rec; 3778 struct ftrace_glob func_g = { .type = MATCH_FULL }; 3779 struct ftrace_glob mod_g = { .type = MATCH_FULL }; 3780 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; 3781 int exclude_mod = 0; 3782 int found = 0; 3783 int ret; 3784 int clear_filter = 0; 3785 3786 if (func) { 3787 func_g.type = filter_parse_regex(func, len, &func_g.search, 3788 &clear_filter); 3789 func_g.len = strlen(func_g.search); 3790 } 3791 3792 if (mod) { 3793 mod_g.type = filter_parse_regex(mod, strlen(mod), 3794 &mod_g.search, &exclude_mod); 3795 mod_g.len = strlen(mod_g.search); 3796 } 3797 3798 mutex_lock(&ftrace_lock); 3799 3800 if (unlikely(ftrace_disabled)) 3801 goto out_unlock; 3802 3803 if (func_g.type == MATCH_INDEX) { 3804 found = add_rec_by_index(hash, &func_g, clear_filter); 3805 goto out_unlock; 3806 } 3807 3808 do_for_each_ftrace_rec(pg, rec) { 3809 3810 if (rec->flags & FTRACE_FL_DISABLED) 3811 continue; 3812 3813 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 3814 ret = enter_record(hash, rec, clear_filter); 3815 if (ret < 0) { 3816 found = ret; 3817 goto out_unlock; 3818 } 3819 found = 1; 3820 } 3821 } while_for_each_ftrace_rec(); 3822 out_unlock: 3823 mutex_unlock(&ftrace_lock); 3824 3825 return found; 3826 } 3827 3828 static int 3829 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) 3830 { 3831 return match_records(hash, buff, len, NULL); 3832 } 3833 3834 static void ftrace_ops_update_code(struct ftrace_ops *ops, 3835 struct ftrace_ops_hash *old_hash) 3836 { 3837 struct ftrace_ops *op; 3838 3839 if (!ftrace_enabled) 3840 return; 3841 3842 if (ops->flags & FTRACE_OPS_FL_ENABLED) { 3843 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); 3844 return; 3845 } 3846 3847 /* 3848 * If this is the shared global_ops filter, then we need to 3849 * check if there is another ops that shares it, is enabled. 3850 * If so, we still need to run the modify code. 3851 */ 3852 if (ops->func_hash != &global_ops.local_hash) 3853 return; 3854 3855 do_for_each_ftrace_op(op, ftrace_ops_list) { 3856 if (op->func_hash == &global_ops.local_hash && 3857 op->flags & FTRACE_OPS_FL_ENABLED) { 3858 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); 3859 /* Only need to do this once */ 3860 return; 3861 } 3862 } while_for_each_ftrace_op(op); 3863 } 3864 3865 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, 3866 struct ftrace_hash **orig_hash, 3867 struct ftrace_hash *hash, 3868 int enable) 3869 { 3870 struct ftrace_ops_hash old_hash_ops; 3871 struct ftrace_hash *old_hash; 3872 int ret; 3873 3874 old_hash = *orig_hash; 3875 old_hash_ops.filter_hash = ops->func_hash->filter_hash; 3876 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; 3877 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3878 if (!ret) { 3879 ftrace_ops_update_code(ops, &old_hash_ops); 3880 free_ftrace_hash_rcu(old_hash); 3881 } 3882 return ret; 3883 } 3884 3885 static bool module_exists(const char *module) 3886 { 3887 /* All modules have the symbol __this_module */ 3888 static const char this_mod[] = "__this_module"; 3889 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 3890 unsigned long val; 3891 int n; 3892 3893 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); 3894 3895 if (n > sizeof(modname) - 1) 3896 return false; 3897 3898 val = module_kallsyms_lookup_name(modname); 3899 return val != 0; 3900 } 3901 3902 static int cache_mod(struct trace_array *tr, 3903 const char *func, char *module, int enable) 3904 { 3905 struct ftrace_mod_load *ftrace_mod, *n; 3906 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; 3907 int ret; 3908 3909 mutex_lock(&ftrace_lock); 3910 3911 /* We do not cache inverse filters */ 3912 if (func[0] == '!') { 3913 func++; 3914 ret = -EINVAL; 3915 3916 /* Look to remove this hash */ 3917 list_for_each_entry_safe(ftrace_mod, n, head, list) { 3918 if (strcmp(ftrace_mod->module, module) != 0) 3919 continue; 3920 3921 /* no func matches all */ 3922 if (strcmp(func, "*") == 0 || 3923 (ftrace_mod->func && 3924 strcmp(ftrace_mod->func, func) == 0)) { 3925 ret = 0; 3926 free_ftrace_mod(ftrace_mod); 3927 continue; 3928 } 3929 } 3930 goto out; 3931 } 3932 3933 ret = -EINVAL; 3934 /* We only care about modules that have not been loaded yet */ 3935 if (module_exists(module)) 3936 goto out; 3937 3938 /* Save this string off, and execute it when the module is loaded */ 3939 ret = ftrace_add_mod(tr, func, module, enable); 3940 out: 3941 mutex_unlock(&ftrace_lock); 3942 3943 return ret; 3944 } 3945 3946 static int 3947 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 3948 int reset, int enable); 3949 3950 #ifdef CONFIG_MODULES 3951 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, 3952 char *mod, bool enable) 3953 { 3954 struct ftrace_mod_load *ftrace_mod, *n; 3955 struct ftrace_hash **orig_hash, *new_hash; 3956 LIST_HEAD(process_mods); 3957 char *func; 3958 int ret; 3959 3960 mutex_lock(&ops->func_hash->regex_lock); 3961 3962 if (enable) 3963 orig_hash = &ops->func_hash->filter_hash; 3964 else 3965 orig_hash = &ops->func_hash->notrace_hash; 3966 3967 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, 3968 *orig_hash); 3969 if (!new_hash) 3970 goto out; /* warn? */ 3971 3972 mutex_lock(&ftrace_lock); 3973 3974 list_for_each_entry_safe(ftrace_mod, n, head, list) { 3975 3976 if (strcmp(ftrace_mod->module, mod) != 0) 3977 continue; 3978 3979 if (ftrace_mod->func) 3980 func = kstrdup(ftrace_mod->func, GFP_KERNEL); 3981 else 3982 func = kstrdup("*", GFP_KERNEL); 3983 3984 if (!func) /* warn? */ 3985 continue; 3986 3987 list_del(&ftrace_mod->list); 3988 list_add(&ftrace_mod->list, &process_mods); 3989 3990 /* Use the newly allocated func, as it may be "*" */ 3991 kfree(ftrace_mod->func); 3992 ftrace_mod->func = func; 3993 } 3994 3995 mutex_unlock(&ftrace_lock); 3996 3997 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { 3998 3999 func = ftrace_mod->func; 4000 4001 /* Grabs ftrace_lock, which is why we have this extra step */ 4002 match_records(new_hash, func, strlen(func), mod); 4003 free_ftrace_mod(ftrace_mod); 4004 } 4005 4006 if (enable && list_empty(head)) 4007 new_hash->flags &= ~FTRACE_HASH_FL_MOD; 4008 4009 mutex_lock(&ftrace_lock); 4010 4011 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, 4012 new_hash, enable); 4013 mutex_unlock(&ftrace_lock); 4014 4015 out: 4016 mutex_unlock(&ops->func_hash->regex_lock); 4017 4018 free_ftrace_hash(new_hash); 4019 } 4020 4021 static void process_cached_mods(const char *mod_name) 4022 { 4023 struct trace_array *tr; 4024 char *mod; 4025 4026 mod = kstrdup(mod_name, GFP_KERNEL); 4027 if (!mod) 4028 return; 4029 4030 mutex_lock(&trace_types_lock); 4031 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 4032 if (!list_empty(&tr->mod_trace)) 4033 process_mod_list(&tr->mod_trace, tr->ops, mod, true); 4034 if (!list_empty(&tr->mod_notrace)) 4035 process_mod_list(&tr->mod_notrace, tr->ops, mod, false); 4036 } 4037 mutex_unlock(&trace_types_lock); 4038 4039 kfree(mod); 4040 } 4041 #endif 4042 4043 /* 4044 * We register the module command as a template to show others how 4045 * to register the a command as well. 4046 */ 4047 4048 static int 4049 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, 4050 char *func_orig, char *cmd, char *module, int enable) 4051 { 4052 char *func; 4053 int ret; 4054 4055 /* match_records() modifies func, and we need the original */ 4056 func = kstrdup(func_orig, GFP_KERNEL); 4057 if (!func) 4058 return -ENOMEM; 4059 4060 /* 4061 * cmd == 'mod' because we only registered this func 4062 * for the 'mod' ftrace_func_command. 4063 * But if you register one func with multiple commands, 4064 * you can tell which command was used by the cmd 4065 * parameter. 4066 */ 4067 ret = match_records(hash, func, strlen(func), module); 4068 kfree(func); 4069 4070 if (!ret) 4071 return cache_mod(tr, func_orig, module, enable); 4072 if (ret < 0) 4073 return ret; 4074 return 0; 4075 } 4076 4077 static struct ftrace_func_command ftrace_mod_cmd = { 4078 .name = "mod", 4079 .func = ftrace_mod_callback, 4080 }; 4081 4082 static int __init ftrace_mod_cmd_init(void) 4083 { 4084 return register_ftrace_command(&ftrace_mod_cmd); 4085 } 4086 core_initcall(ftrace_mod_cmd_init); 4087 4088 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 4089 struct ftrace_ops *op, struct pt_regs *pt_regs) 4090 { 4091 struct ftrace_probe_ops *probe_ops; 4092 struct ftrace_func_probe *probe; 4093 4094 probe = container_of(op, struct ftrace_func_probe, ops); 4095 probe_ops = probe->probe_ops; 4096 4097 /* 4098 * Disable preemption for these calls to prevent a RCU grace 4099 * period. This syncs the hash iteration and freeing of items 4100 * on the hash. rcu_read_lock is too dangerous here. 4101 */ 4102 preempt_disable_notrace(); 4103 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); 4104 preempt_enable_notrace(); 4105 } 4106 4107 struct ftrace_func_map { 4108 struct ftrace_func_entry entry; 4109 void *data; 4110 }; 4111 4112 struct ftrace_func_mapper { 4113 struct ftrace_hash hash; 4114 }; 4115 4116 /** 4117 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper 4118 * 4119 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data. 4120 */ 4121 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) 4122 { 4123 struct ftrace_hash *hash; 4124 4125 /* 4126 * The mapper is simply a ftrace_hash, but since the entries 4127 * in the hash are not ftrace_func_entry type, we define it 4128 * as a separate structure. 4129 */ 4130 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4131 return (struct ftrace_func_mapper *)hash; 4132 } 4133 4134 /** 4135 * ftrace_func_mapper_find_ip - Find some data mapped to an ip 4136 * @mapper: The mapper that has the ip maps 4137 * @ip: the instruction pointer to find the data for 4138 * 4139 * Returns the data mapped to @ip if found otherwise NULL. The return 4140 * is actually the address of the mapper data pointer. The address is 4141 * returned for use cases where the data is no bigger than a long, and 4142 * the user can use the data pointer as its data instead of having to 4143 * allocate more memory for the reference. 4144 */ 4145 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 4146 unsigned long ip) 4147 { 4148 struct ftrace_func_entry *entry; 4149 struct ftrace_func_map *map; 4150 4151 entry = ftrace_lookup_ip(&mapper->hash, ip); 4152 if (!entry) 4153 return NULL; 4154 4155 map = (struct ftrace_func_map *)entry; 4156 return &map->data; 4157 } 4158 4159 /** 4160 * ftrace_func_mapper_add_ip - Map some data to an ip 4161 * @mapper: The mapper that has the ip maps 4162 * @ip: The instruction pointer address to map @data to 4163 * @data: The data to map to @ip 4164 * 4165 * Returns 0 on succes otherwise an error. 4166 */ 4167 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 4168 unsigned long ip, void *data) 4169 { 4170 struct ftrace_func_entry *entry; 4171 struct ftrace_func_map *map; 4172 4173 entry = ftrace_lookup_ip(&mapper->hash, ip); 4174 if (entry) 4175 return -EBUSY; 4176 4177 map = kmalloc(sizeof(*map), GFP_KERNEL); 4178 if (!map) 4179 return -ENOMEM; 4180 4181 map->entry.ip = ip; 4182 map->data = data; 4183 4184 __add_hash_entry(&mapper->hash, &map->entry); 4185 4186 return 0; 4187 } 4188 4189 /** 4190 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping 4191 * @mapper: The mapper that has the ip maps 4192 * @ip: The instruction pointer address to remove the data from 4193 * 4194 * Returns the data if it is found, otherwise NULL. 4195 * Note, if the data pointer is used as the data itself, (see 4196 * ftrace_func_mapper_find_ip(), then the return value may be meaningless, 4197 * if the data pointer was set to zero. 4198 */ 4199 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 4200 unsigned long ip) 4201 { 4202 struct ftrace_func_entry *entry; 4203 struct ftrace_func_map *map; 4204 void *data; 4205 4206 entry = ftrace_lookup_ip(&mapper->hash, ip); 4207 if (!entry) 4208 return NULL; 4209 4210 map = (struct ftrace_func_map *)entry; 4211 data = map->data; 4212 4213 remove_hash_entry(&mapper->hash, entry); 4214 kfree(entry); 4215 4216 return data; 4217 } 4218 4219 /** 4220 * free_ftrace_func_mapper - free a mapping of ips and data 4221 * @mapper: The mapper that has the ip maps 4222 * @free_func: A function to be called on each data item. 4223 * 4224 * This is used to free the function mapper. The @free_func is optional 4225 * and can be used if the data needs to be freed as well. 4226 */ 4227 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 4228 ftrace_mapper_func free_func) 4229 { 4230 struct ftrace_func_entry *entry; 4231 struct ftrace_func_map *map; 4232 struct hlist_head *hhd; 4233 int size, i; 4234 4235 if (!mapper) 4236 return; 4237 4238 if (free_func && mapper->hash.count) { 4239 size = 1 << mapper->hash.size_bits; 4240 for (i = 0; i < size; i++) { 4241 hhd = &mapper->hash.buckets[i]; 4242 hlist_for_each_entry(entry, hhd, hlist) { 4243 map = (struct ftrace_func_map *)entry; 4244 free_func(map); 4245 } 4246 } 4247 } 4248 free_ftrace_hash(&mapper->hash); 4249 } 4250 4251 static void release_probe(struct ftrace_func_probe *probe) 4252 { 4253 struct ftrace_probe_ops *probe_ops; 4254 4255 mutex_lock(&ftrace_lock); 4256 4257 WARN_ON(probe->ref <= 0); 4258 4259 /* Subtract the ref that was used to protect this instance */ 4260 probe->ref--; 4261 4262 if (!probe->ref) { 4263 probe_ops = probe->probe_ops; 4264 /* 4265 * Sending zero as ip tells probe_ops to free 4266 * the probe->data itself 4267 */ 4268 if (probe_ops->free) 4269 probe_ops->free(probe_ops, probe->tr, 0, probe->data); 4270 list_del(&probe->list); 4271 kfree(probe); 4272 } 4273 mutex_unlock(&ftrace_lock); 4274 } 4275 4276 static void acquire_probe_locked(struct ftrace_func_probe *probe) 4277 { 4278 /* 4279 * Add one ref to keep it from being freed when releasing the 4280 * ftrace_lock mutex. 4281 */ 4282 probe->ref++; 4283 } 4284 4285 int 4286 register_ftrace_function_probe(char *glob, struct trace_array *tr, 4287 struct ftrace_probe_ops *probe_ops, 4288 void *data) 4289 { 4290 struct ftrace_func_entry *entry; 4291 struct ftrace_func_probe *probe; 4292 struct ftrace_hash **orig_hash; 4293 struct ftrace_hash *old_hash; 4294 struct ftrace_hash *hash; 4295 int count = 0; 4296 int size; 4297 int ret; 4298 int i; 4299 4300 if (WARN_ON(!tr)) 4301 return -EINVAL; 4302 4303 /* We do not support '!' for function probes */ 4304 if (WARN_ON(glob[0] == '!')) 4305 return -EINVAL; 4306 4307 4308 mutex_lock(&ftrace_lock); 4309 /* Check if the probe_ops is already registered */ 4310 list_for_each_entry(probe, &tr->func_probes, list) { 4311 if (probe->probe_ops == probe_ops) 4312 break; 4313 } 4314 if (&probe->list == &tr->func_probes) { 4315 probe = kzalloc(sizeof(*probe), GFP_KERNEL); 4316 if (!probe) { 4317 mutex_unlock(&ftrace_lock); 4318 return -ENOMEM; 4319 } 4320 probe->probe_ops = probe_ops; 4321 probe->ops.func = function_trace_probe_call; 4322 probe->tr = tr; 4323 ftrace_ops_init(&probe->ops); 4324 list_add(&probe->list, &tr->func_probes); 4325 } 4326 4327 acquire_probe_locked(probe); 4328 4329 mutex_unlock(&ftrace_lock); 4330 4331 /* 4332 * Note, there's a small window here that the func_hash->filter_hash 4333 * may be NULL or empty. Need to be carefule when reading the loop. 4334 */ 4335 mutex_lock(&probe->ops.func_hash->regex_lock); 4336 4337 orig_hash = &probe->ops.func_hash->filter_hash; 4338 old_hash = *orig_hash; 4339 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4340 4341 if (!hash) { 4342 ret = -ENOMEM; 4343 goto out; 4344 } 4345 4346 ret = ftrace_match_records(hash, glob, strlen(glob)); 4347 4348 /* Nothing found? */ 4349 if (!ret) 4350 ret = -EINVAL; 4351 4352 if (ret < 0) 4353 goto out; 4354 4355 size = 1 << hash->size_bits; 4356 for (i = 0; i < size; i++) { 4357 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4358 if (ftrace_lookup_ip(old_hash, entry->ip)) 4359 continue; 4360 /* 4361 * The caller might want to do something special 4362 * for each function we find. We call the callback 4363 * to give the caller an opportunity to do so. 4364 */ 4365 if (probe_ops->init) { 4366 ret = probe_ops->init(probe_ops, tr, 4367 entry->ip, data, 4368 &probe->data); 4369 if (ret < 0) { 4370 if (probe_ops->free && count) 4371 probe_ops->free(probe_ops, tr, 4372 0, probe->data); 4373 probe->data = NULL; 4374 goto out; 4375 } 4376 } 4377 count++; 4378 } 4379 } 4380 4381 mutex_lock(&ftrace_lock); 4382 4383 if (!count) { 4384 /* Nothing was added? */ 4385 ret = -EINVAL; 4386 goto out_unlock; 4387 } 4388 4389 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 4390 hash, 1); 4391 if (ret < 0) 4392 goto err_unlock; 4393 4394 /* One ref for each new function traced */ 4395 probe->ref += count; 4396 4397 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) 4398 ret = ftrace_startup(&probe->ops, 0); 4399 4400 out_unlock: 4401 mutex_unlock(&ftrace_lock); 4402 4403 if (!ret) 4404 ret = count; 4405 out: 4406 mutex_unlock(&probe->ops.func_hash->regex_lock); 4407 free_ftrace_hash(hash); 4408 4409 release_probe(probe); 4410 4411 return ret; 4412 4413 err_unlock: 4414 if (!probe_ops->free || !count) 4415 goto out_unlock; 4416 4417 /* Failed to do the move, need to call the free functions */ 4418 for (i = 0; i < size; i++) { 4419 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4420 if (ftrace_lookup_ip(old_hash, entry->ip)) 4421 continue; 4422 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 4423 } 4424 } 4425 goto out_unlock; 4426 } 4427 4428 int 4429 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 4430 struct ftrace_probe_ops *probe_ops) 4431 { 4432 struct ftrace_ops_hash old_hash_ops; 4433 struct ftrace_func_entry *entry; 4434 struct ftrace_func_probe *probe; 4435 struct ftrace_glob func_g; 4436 struct ftrace_hash **orig_hash; 4437 struct ftrace_hash *old_hash; 4438 struct ftrace_hash *hash = NULL; 4439 struct hlist_node *tmp; 4440 struct hlist_head hhd; 4441 char str[KSYM_SYMBOL_LEN]; 4442 int count = 0; 4443 int i, ret = -ENODEV; 4444 int size; 4445 4446 if (!glob || !strlen(glob) || !strcmp(glob, "*")) 4447 func_g.search = NULL; 4448 else { 4449 int not; 4450 4451 func_g.type = filter_parse_regex(glob, strlen(glob), 4452 &func_g.search, ¬); 4453 func_g.len = strlen(func_g.search); 4454 4455 /* we do not support '!' for function probes */ 4456 if (WARN_ON(not)) 4457 return -EINVAL; 4458 } 4459 4460 mutex_lock(&ftrace_lock); 4461 /* Check if the probe_ops is already registered */ 4462 list_for_each_entry(probe, &tr->func_probes, list) { 4463 if (probe->probe_ops == probe_ops) 4464 break; 4465 } 4466 if (&probe->list == &tr->func_probes) 4467 goto err_unlock_ftrace; 4468 4469 ret = -EINVAL; 4470 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) 4471 goto err_unlock_ftrace; 4472 4473 acquire_probe_locked(probe); 4474 4475 mutex_unlock(&ftrace_lock); 4476 4477 mutex_lock(&probe->ops.func_hash->regex_lock); 4478 4479 orig_hash = &probe->ops.func_hash->filter_hash; 4480 old_hash = *orig_hash; 4481 4482 if (ftrace_hash_empty(old_hash)) 4483 goto out_unlock; 4484 4485 old_hash_ops.filter_hash = old_hash; 4486 /* Probes only have filters */ 4487 old_hash_ops.notrace_hash = NULL; 4488 4489 ret = -ENOMEM; 4490 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4491 if (!hash) 4492 goto out_unlock; 4493 4494 INIT_HLIST_HEAD(&hhd); 4495 4496 size = 1 << hash->size_bits; 4497 for (i = 0; i < size; i++) { 4498 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { 4499 4500 if (func_g.search) { 4501 kallsyms_lookup(entry->ip, NULL, NULL, 4502 NULL, str); 4503 if (!ftrace_match(str, &func_g)) 4504 continue; 4505 } 4506 count++; 4507 remove_hash_entry(hash, entry); 4508 hlist_add_head(&entry->hlist, &hhd); 4509 } 4510 } 4511 4512 /* Nothing found? */ 4513 if (!count) { 4514 ret = -EINVAL; 4515 goto out_unlock; 4516 } 4517 4518 mutex_lock(&ftrace_lock); 4519 4520 WARN_ON(probe->ref < count); 4521 4522 probe->ref -= count; 4523 4524 if (ftrace_hash_empty(hash)) 4525 ftrace_shutdown(&probe->ops, 0); 4526 4527 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 4528 hash, 1); 4529 4530 /* still need to update the function call sites */ 4531 if (ftrace_enabled && !ftrace_hash_empty(hash)) 4532 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, 4533 &old_hash_ops); 4534 synchronize_rcu(); 4535 4536 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { 4537 hlist_del(&entry->hlist); 4538 if (probe_ops->free) 4539 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 4540 kfree(entry); 4541 } 4542 mutex_unlock(&ftrace_lock); 4543 4544 out_unlock: 4545 mutex_unlock(&probe->ops.func_hash->regex_lock); 4546 free_ftrace_hash(hash); 4547 4548 release_probe(probe); 4549 4550 return ret; 4551 4552 err_unlock_ftrace: 4553 mutex_unlock(&ftrace_lock); 4554 return ret; 4555 } 4556 4557 void clear_ftrace_function_probes(struct trace_array *tr) 4558 { 4559 struct ftrace_func_probe *probe, *n; 4560 4561 list_for_each_entry_safe(probe, n, &tr->func_probes, list) 4562 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); 4563 } 4564 4565 static LIST_HEAD(ftrace_commands); 4566 static DEFINE_MUTEX(ftrace_cmd_mutex); 4567 4568 /* 4569 * Currently we only register ftrace commands from __init, so mark this 4570 * __init too. 4571 */ 4572 __init int register_ftrace_command(struct ftrace_func_command *cmd) 4573 { 4574 struct ftrace_func_command *p; 4575 int ret = 0; 4576 4577 mutex_lock(&ftrace_cmd_mutex); 4578 list_for_each_entry(p, &ftrace_commands, list) { 4579 if (strcmp(cmd->name, p->name) == 0) { 4580 ret = -EBUSY; 4581 goto out_unlock; 4582 } 4583 } 4584 list_add(&cmd->list, &ftrace_commands); 4585 out_unlock: 4586 mutex_unlock(&ftrace_cmd_mutex); 4587 4588 return ret; 4589 } 4590 4591 /* 4592 * Currently we only unregister ftrace commands from __init, so mark 4593 * this __init too. 4594 */ 4595 __init int unregister_ftrace_command(struct ftrace_func_command *cmd) 4596 { 4597 struct ftrace_func_command *p, *n; 4598 int ret = -ENODEV; 4599 4600 mutex_lock(&ftrace_cmd_mutex); 4601 list_for_each_entry_safe(p, n, &ftrace_commands, list) { 4602 if (strcmp(cmd->name, p->name) == 0) { 4603 ret = 0; 4604 list_del_init(&p->list); 4605 goto out_unlock; 4606 } 4607 } 4608 out_unlock: 4609 mutex_unlock(&ftrace_cmd_mutex); 4610 4611 return ret; 4612 } 4613 4614 static int ftrace_process_regex(struct ftrace_iterator *iter, 4615 char *buff, int len, int enable) 4616 { 4617 struct ftrace_hash *hash = iter->hash; 4618 struct trace_array *tr = iter->ops->private; 4619 char *func, *command, *next = buff; 4620 struct ftrace_func_command *p; 4621 int ret = -EINVAL; 4622 4623 func = strsep(&next, ":"); 4624 4625 if (!next) { 4626 ret = ftrace_match_records(hash, func, len); 4627 if (!ret) 4628 ret = -EINVAL; 4629 if (ret < 0) 4630 return ret; 4631 return 0; 4632 } 4633 4634 /* command found */ 4635 4636 command = strsep(&next, ":"); 4637 4638 mutex_lock(&ftrace_cmd_mutex); 4639 list_for_each_entry(p, &ftrace_commands, list) { 4640 if (strcmp(p->name, command) == 0) { 4641 ret = p->func(tr, hash, func, command, next, enable); 4642 goto out_unlock; 4643 } 4644 } 4645 out_unlock: 4646 mutex_unlock(&ftrace_cmd_mutex); 4647 4648 return ret; 4649 } 4650 4651 static ssize_t 4652 ftrace_regex_write(struct file *file, const char __user *ubuf, 4653 size_t cnt, loff_t *ppos, int enable) 4654 { 4655 struct ftrace_iterator *iter; 4656 struct trace_parser *parser; 4657 ssize_t ret, read; 4658 4659 if (!cnt) 4660 return 0; 4661 4662 if (file->f_mode & FMODE_READ) { 4663 struct seq_file *m = file->private_data; 4664 iter = m->private; 4665 } else 4666 iter = file->private_data; 4667 4668 if (unlikely(ftrace_disabled)) 4669 return -ENODEV; 4670 4671 /* iter->hash is a local copy, so we don't need regex_lock */ 4672 4673 parser = &iter->parser; 4674 read = trace_get_user(parser, ubuf, cnt, ppos); 4675 4676 if (read >= 0 && trace_parser_loaded(parser) && 4677 !trace_parser_cont(parser)) { 4678 ret = ftrace_process_regex(iter, parser->buffer, 4679 parser->idx, enable); 4680 trace_parser_clear(parser); 4681 if (ret < 0) 4682 goto out; 4683 } 4684 4685 ret = read; 4686 out: 4687 return ret; 4688 } 4689 4690 ssize_t 4691 ftrace_filter_write(struct file *file, const char __user *ubuf, 4692 size_t cnt, loff_t *ppos) 4693 { 4694 return ftrace_regex_write(file, ubuf, cnt, ppos, 1); 4695 } 4696 4697 ssize_t 4698 ftrace_notrace_write(struct file *file, const char __user *ubuf, 4699 size_t cnt, loff_t *ppos) 4700 { 4701 return ftrace_regex_write(file, ubuf, cnt, ppos, 0); 4702 } 4703 4704 static int 4705 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) 4706 { 4707 struct ftrace_func_entry *entry; 4708 4709 if (!ftrace_location(ip)) 4710 return -EINVAL; 4711 4712 if (remove) { 4713 entry = ftrace_lookup_ip(hash, ip); 4714 if (!entry) 4715 return -ENOENT; 4716 free_hash_entry(hash, entry); 4717 return 0; 4718 } 4719 4720 return add_hash_entry(hash, ip); 4721 } 4722 4723 static int 4724 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 4725 unsigned long ip, int remove, int reset, int enable) 4726 { 4727 struct ftrace_hash **orig_hash; 4728 struct ftrace_hash *hash; 4729 int ret; 4730 4731 if (unlikely(ftrace_disabled)) 4732 return -ENODEV; 4733 4734 mutex_lock(&ops->func_hash->regex_lock); 4735 4736 if (enable) 4737 orig_hash = &ops->func_hash->filter_hash; 4738 else 4739 orig_hash = &ops->func_hash->notrace_hash; 4740 4741 if (reset) 4742 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4743 else 4744 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 4745 4746 if (!hash) { 4747 ret = -ENOMEM; 4748 goto out_regex_unlock; 4749 } 4750 4751 if (buf && !ftrace_match_records(hash, buf, len)) { 4752 ret = -EINVAL; 4753 goto out_regex_unlock; 4754 } 4755 if (ip) { 4756 ret = ftrace_match_addr(hash, ip, remove); 4757 if (ret < 0) 4758 goto out_regex_unlock; 4759 } 4760 4761 mutex_lock(&ftrace_lock); 4762 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); 4763 mutex_unlock(&ftrace_lock); 4764 4765 out_regex_unlock: 4766 mutex_unlock(&ops->func_hash->regex_lock); 4767 4768 free_ftrace_hash(hash); 4769 return ret; 4770 } 4771 4772 static int 4773 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, 4774 int reset, int enable) 4775 { 4776 return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable); 4777 } 4778 4779 /** 4780 * ftrace_set_filter_ip - set a function to filter on in ftrace by address 4781 * @ops - the ops to set the filter with 4782 * @ip - the address to add to or remove from the filter. 4783 * @remove - non zero to remove the ip from the filter 4784 * @reset - non zero to reset all filters before applying this filter. 4785 * 4786 * Filters denote which functions should be enabled when tracing is enabled 4787 * If @ip is NULL, it failes to update filter. 4788 */ 4789 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 4790 int remove, int reset) 4791 { 4792 ftrace_ops_init(ops); 4793 return ftrace_set_addr(ops, ip, remove, reset, 1); 4794 } 4795 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); 4796 4797 /** 4798 * ftrace_ops_set_global_filter - setup ops to use global filters 4799 * @ops - the ops which will use the global filters 4800 * 4801 * ftrace users who need global function trace filtering should call this. 4802 * It can set the global filter only if ops were not initialized before. 4803 */ 4804 void ftrace_ops_set_global_filter(struct ftrace_ops *ops) 4805 { 4806 if (ops->flags & FTRACE_OPS_FL_INITIALIZED) 4807 return; 4808 4809 ftrace_ops_init(ops); 4810 ops->func_hash = &global_ops.local_hash; 4811 } 4812 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter); 4813 4814 static int 4815 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 4816 int reset, int enable) 4817 { 4818 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); 4819 } 4820 4821 /** 4822 * ftrace_set_filter - set a function to filter on in ftrace 4823 * @ops - the ops to set the filter with 4824 * @buf - the string that holds the function filter text. 4825 * @len - the length of the string. 4826 * @reset - non zero to reset all filters before applying this filter. 4827 * 4828 * Filters denote which functions should be enabled when tracing is enabled. 4829 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 4830 */ 4831 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 4832 int len, int reset) 4833 { 4834 ftrace_ops_init(ops); 4835 return ftrace_set_regex(ops, buf, len, reset, 1); 4836 } 4837 EXPORT_SYMBOL_GPL(ftrace_set_filter); 4838 4839 /** 4840 * ftrace_set_notrace - set a function to not trace in ftrace 4841 * @ops - the ops to set the notrace filter with 4842 * @buf - the string that holds the function notrace text. 4843 * @len - the length of the string. 4844 * @reset - non zero to reset all filters before applying this filter. 4845 * 4846 * Notrace Filters denote which functions should not be enabled when tracing 4847 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 4848 * for tracing. 4849 */ 4850 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 4851 int len, int reset) 4852 { 4853 ftrace_ops_init(ops); 4854 return ftrace_set_regex(ops, buf, len, reset, 0); 4855 } 4856 EXPORT_SYMBOL_GPL(ftrace_set_notrace); 4857 /** 4858 * ftrace_set_global_filter - set a function to filter on with global tracers 4859 * @buf - the string that holds the function filter text. 4860 * @len - the length of the string. 4861 * @reset - non zero to reset all filters before applying this filter. 4862 * 4863 * Filters denote which functions should be enabled when tracing is enabled. 4864 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 4865 */ 4866 void ftrace_set_global_filter(unsigned char *buf, int len, int reset) 4867 { 4868 ftrace_set_regex(&global_ops, buf, len, reset, 1); 4869 } 4870 EXPORT_SYMBOL_GPL(ftrace_set_global_filter); 4871 4872 /** 4873 * ftrace_set_global_notrace - set a function to not trace with global tracers 4874 * @buf - the string that holds the function notrace text. 4875 * @len - the length of the string. 4876 * @reset - non zero to reset all filters before applying this filter. 4877 * 4878 * Notrace Filters denote which functions should not be enabled when tracing 4879 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 4880 * for tracing. 4881 */ 4882 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) 4883 { 4884 ftrace_set_regex(&global_ops, buf, len, reset, 0); 4885 } 4886 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); 4887 4888 /* 4889 * command line interface to allow users to set filters on boot up. 4890 */ 4891 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE 4892 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 4893 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 4894 4895 /* Used by function selftest to not test if filter is set */ 4896 bool ftrace_filter_param __initdata; 4897 4898 static int __init set_ftrace_notrace(char *str) 4899 { 4900 ftrace_filter_param = true; 4901 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 4902 return 1; 4903 } 4904 __setup("ftrace_notrace=", set_ftrace_notrace); 4905 4906 static int __init set_ftrace_filter(char *str) 4907 { 4908 ftrace_filter_param = true; 4909 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 4910 return 1; 4911 } 4912 __setup("ftrace_filter=", set_ftrace_filter); 4913 4914 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4915 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 4916 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 4917 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); 4918 4919 static int __init set_graph_function(char *str) 4920 { 4921 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 4922 return 1; 4923 } 4924 __setup("ftrace_graph_filter=", set_graph_function); 4925 4926 static int __init set_graph_notrace_function(char *str) 4927 { 4928 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); 4929 return 1; 4930 } 4931 __setup("ftrace_graph_notrace=", set_graph_notrace_function); 4932 4933 static int __init set_graph_max_depth_function(char *str) 4934 { 4935 if (!str) 4936 return 0; 4937 fgraph_max_depth = simple_strtoul(str, NULL, 0); 4938 return 1; 4939 } 4940 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function); 4941 4942 static void __init set_ftrace_early_graph(char *buf, int enable) 4943 { 4944 int ret; 4945 char *func; 4946 struct ftrace_hash *hash; 4947 4948 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4949 if (WARN_ON(!hash)) 4950 return; 4951 4952 while (buf) { 4953 func = strsep(&buf, ","); 4954 /* we allow only one expression at a time */ 4955 ret = ftrace_graph_set_hash(hash, func); 4956 if (ret) 4957 printk(KERN_DEBUG "ftrace: function %s not " 4958 "traceable\n", func); 4959 } 4960 4961 if (enable) 4962 ftrace_graph_hash = hash; 4963 else 4964 ftrace_graph_notrace_hash = hash; 4965 } 4966 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4967 4968 void __init 4969 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) 4970 { 4971 char *func; 4972 4973 ftrace_ops_init(ops); 4974 4975 while (buf) { 4976 func = strsep(&buf, ","); 4977 ftrace_set_regex(ops, func, strlen(func), 0, enable); 4978 } 4979 } 4980 4981 static void __init set_ftrace_early_filters(void) 4982 { 4983 if (ftrace_filter_buf[0]) 4984 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); 4985 if (ftrace_notrace_buf[0]) 4986 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); 4987 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4988 if (ftrace_graph_buf[0]) 4989 set_ftrace_early_graph(ftrace_graph_buf, 1); 4990 if (ftrace_graph_notrace_buf[0]) 4991 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); 4992 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4993 } 4994 4995 int ftrace_regex_release(struct inode *inode, struct file *file) 4996 { 4997 struct seq_file *m = (struct seq_file *)file->private_data; 4998 struct ftrace_iterator *iter; 4999 struct ftrace_hash **orig_hash; 5000 struct trace_parser *parser; 5001 int filter_hash; 5002 int ret; 5003 5004 if (file->f_mode & FMODE_READ) { 5005 iter = m->private; 5006 seq_release(inode, file); 5007 } else 5008 iter = file->private_data; 5009 5010 parser = &iter->parser; 5011 if (trace_parser_loaded(parser)) { 5012 ftrace_match_records(iter->hash, parser->buffer, parser->idx); 5013 } 5014 5015 trace_parser_put(parser); 5016 5017 mutex_lock(&iter->ops->func_hash->regex_lock); 5018 5019 if (file->f_mode & FMODE_WRITE) { 5020 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 5021 5022 if (filter_hash) { 5023 orig_hash = &iter->ops->func_hash->filter_hash; 5024 if (iter->tr && !list_empty(&iter->tr->mod_trace)) 5025 iter->hash->flags |= FTRACE_HASH_FL_MOD; 5026 } else 5027 orig_hash = &iter->ops->func_hash->notrace_hash; 5028 5029 mutex_lock(&ftrace_lock); 5030 ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash, 5031 iter->hash, filter_hash); 5032 mutex_unlock(&ftrace_lock); 5033 } else { 5034 /* For read only, the hash is the ops hash */ 5035 iter->hash = NULL; 5036 } 5037 5038 mutex_unlock(&iter->ops->func_hash->regex_lock); 5039 free_ftrace_hash(iter->hash); 5040 kfree(iter); 5041 5042 return 0; 5043 } 5044 5045 static const struct file_operations ftrace_avail_fops = { 5046 .open = ftrace_avail_open, 5047 .read = seq_read, 5048 .llseek = seq_lseek, 5049 .release = seq_release_private, 5050 }; 5051 5052 static const struct file_operations ftrace_enabled_fops = { 5053 .open = ftrace_enabled_open, 5054 .read = seq_read, 5055 .llseek = seq_lseek, 5056 .release = seq_release_private, 5057 }; 5058 5059 static const struct file_operations ftrace_filter_fops = { 5060 .open = ftrace_filter_open, 5061 .read = seq_read, 5062 .write = ftrace_filter_write, 5063 .llseek = tracing_lseek, 5064 .release = ftrace_regex_release, 5065 }; 5066 5067 static const struct file_operations ftrace_notrace_fops = { 5068 .open = ftrace_notrace_open, 5069 .read = seq_read, 5070 .write = ftrace_notrace_write, 5071 .llseek = tracing_lseek, 5072 .release = ftrace_regex_release, 5073 }; 5074 5075 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5076 5077 static DEFINE_MUTEX(graph_lock); 5078 5079 struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH; 5080 struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH; 5081 5082 enum graph_filter_type { 5083 GRAPH_FILTER_NOTRACE = 0, 5084 GRAPH_FILTER_FUNCTION, 5085 }; 5086 5087 #define FTRACE_GRAPH_EMPTY ((void *)1) 5088 5089 struct ftrace_graph_data { 5090 struct ftrace_hash *hash; 5091 struct ftrace_func_entry *entry; 5092 int idx; /* for hash table iteration */ 5093 enum graph_filter_type type; 5094 struct ftrace_hash *new_hash; 5095 const struct seq_operations *seq_ops; 5096 struct trace_parser parser; 5097 }; 5098 5099 static void * 5100 __g_next(struct seq_file *m, loff_t *pos) 5101 { 5102 struct ftrace_graph_data *fgd = m->private; 5103 struct ftrace_func_entry *entry = fgd->entry; 5104 struct hlist_head *head; 5105 int i, idx = fgd->idx; 5106 5107 if (*pos >= fgd->hash->count) 5108 return NULL; 5109 5110 if (entry) { 5111 hlist_for_each_entry_continue(entry, hlist) { 5112 fgd->entry = entry; 5113 return entry; 5114 } 5115 5116 idx++; 5117 } 5118 5119 for (i = idx; i < 1 << fgd->hash->size_bits; i++) { 5120 head = &fgd->hash->buckets[i]; 5121 hlist_for_each_entry(entry, head, hlist) { 5122 fgd->entry = entry; 5123 fgd->idx = i; 5124 return entry; 5125 } 5126 } 5127 return NULL; 5128 } 5129 5130 static void * 5131 g_next(struct seq_file *m, void *v, loff_t *pos) 5132 { 5133 (*pos)++; 5134 return __g_next(m, pos); 5135 } 5136 5137 static void *g_start(struct seq_file *m, loff_t *pos) 5138 { 5139 struct ftrace_graph_data *fgd = m->private; 5140 5141 mutex_lock(&graph_lock); 5142 5143 if (fgd->type == GRAPH_FILTER_FUNCTION) 5144 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 5145 lockdep_is_held(&graph_lock)); 5146 else 5147 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5148 lockdep_is_held(&graph_lock)); 5149 5150 /* Nothing, tell g_show to print all functions are enabled */ 5151 if (ftrace_hash_empty(fgd->hash) && !*pos) 5152 return FTRACE_GRAPH_EMPTY; 5153 5154 fgd->idx = 0; 5155 fgd->entry = NULL; 5156 return __g_next(m, pos); 5157 } 5158 5159 static void g_stop(struct seq_file *m, void *p) 5160 { 5161 mutex_unlock(&graph_lock); 5162 } 5163 5164 static int g_show(struct seq_file *m, void *v) 5165 { 5166 struct ftrace_func_entry *entry = v; 5167 5168 if (!entry) 5169 return 0; 5170 5171 if (entry == FTRACE_GRAPH_EMPTY) { 5172 struct ftrace_graph_data *fgd = m->private; 5173 5174 if (fgd->type == GRAPH_FILTER_FUNCTION) 5175 seq_puts(m, "#### all functions enabled ####\n"); 5176 else 5177 seq_puts(m, "#### no functions disabled ####\n"); 5178 return 0; 5179 } 5180 5181 seq_printf(m, "%ps\n", (void *)entry->ip); 5182 5183 return 0; 5184 } 5185 5186 static const struct seq_operations ftrace_graph_seq_ops = { 5187 .start = g_start, 5188 .next = g_next, 5189 .stop = g_stop, 5190 .show = g_show, 5191 }; 5192 5193 static int 5194 __ftrace_graph_open(struct inode *inode, struct file *file, 5195 struct ftrace_graph_data *fgd) 5196 { 5197 int ret = 0; 5198 struct ftrace_hash *new_hash = NULL; 5199 5200 if (file->f_mode & FMODE_WRITE) { 5201 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 5202 5203 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) 5204 return -ENOMEM; 5205 5206 if (file->f_flags & O_TRUNC) 5207 new_hash = alloc_ftrace_hash(size_bits); 5208 else 5209 new_hash = alloc_and_copy_ftrace_hash(size_bits, 5210 fgd->hash); 5211 if (!new_hash) { 5212 ret = -ENOMEM; 5213 goto out; 5214 } 5215 } 5216 5217 if (file->f_mode & FMODE_READ) { 5218 ret = seq_open(file, &ftrace_graph_seq_ops); 5219 if (!ret) { 5220 struct seq_file *m = file->private_data; 5221 m->private = fgd; 5222 } else { 5223 /* Failed */ 5224 free_ftrace_hash(new_hash); 5225 new_hash = NULL; 5226 } 5227 } else 5228 file->private_data = fgd; 5229 5230 out: 5231 if (ret < 0 && file->f_mode & FMODE_WRITE) 5232 trace_parser_put(&fgd->parser); 5233 5234 fgd->new_hash = new_hash; 5235 5236 /* 5237 * All uses of fgd->hash must be taken with the graph_lock 5238 * held. The graph_lock is going to be released, so force 5239 * fgd->hash to be reinitialized when it is taken again. 5240 */ 5241 fgd->hash = NULL; 5242 5243 return ret; 5244 } 5245 5246 static int 5247 ftrace_graph_open(struct inode *inode, struct file *file) 5248 { 5249 struct ftrace_graph_data *fgd; 5250 int ret; 5251 5252 if (unlikely(ftrace_disabled)) 5253 return -ENODEV; 5254 5255 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 5256 if (fgd == NULL) 5257 return -ENOMEM; 5258 5259 mutex_lock(&graph_lock); 5260 5261 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 5262 lockdep_is_held(&graph_lock)); 5263 fgd->type = GRAPH_FILTER_FUNCTION; 5264 fgd->seq_ops = &ftrace_graph_seq_ops; 5265 5266 ret = __ftrace_graph_open(inode, file, fgd); 5267 if (ret < 0) 5268 kfree(fgd); 5269 5270 mutex_unlock(&graph_lock); 5271 return ret; 5272 } 5273 5274 static int 5275 ftrace_graph_notrace_open(struct inode *inode, struct file *file) 5276 { 5277 struct ftrace_graph_data *fgd; 5278 int ret; 5279 5280 if (unlikely(ftrace_disabled)) 5281 return -ENODEV; 5282 5283 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 5284 if (fgd == NULL) 5285 return -ENOMEM; 5286 5287 mutex_lock(&graph_lock); 5288 5289 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5290 lockdep_is_held(&graph_lock)); 5291 fgd->type = GRAPH_FILTER_NOTRACE; 5292 fgd->seq_ops = &ftrace_graph_seq_ops; 5293 5294 ret = __ftrace_graph_open(inode, file, fgd); 5295 if (ret < 0) 5296 kfree(fgd); 5297 5298 mutex_unlock(&graph_lock); 5299 return ret; 5300 } 5301 5302 static int 5303 ftrace_graph_release(struct inode *inode, struct file *file) 5304 { 5305 struct ftrace_graph_data *fgd; 5306 struct ftrace_hash *old_hash, *new_hash; 5307 struct trace_parser *parser; 5308 int ret = 0; 5309 5310 if (file->f_mode & FMODE_READ) { 5311 struct seq_file *m = file->private_data; 5312 5313 fgd = m->private; 5314 seq_release(inode, file); 5315 } else { 5316 fgd = file->private_data; 5317 } 5318 5319 5320 if (file->f_mode & FMODE_WRITE) { 5321 5322 parser = &fgd->parser; 5323 5324 if (trace_parser_loaded((parser))) { 5325 ret = ftrace_graph_set_hash(fgd->new_hash, 5326 parser->buffer); 5327 } 5328 5329 trace_parser_put(parser); 5330 5331 new_hash = __ftrace_hash_move(fgd->new_hash); 5332 if (!new_hash) { 5333 ret = -ENOMEM; 5334 goto out; 5335 } 5336 5337 mutex_lock(&graph_lock); 5338 5339 if (fgd->type == GRAPH_FILTER_FUNCTION) { 5340 old_hash = rcu_dereference_protected(ftrace_graph_hash, 5341 lockdep_is_held(&graph_lock)); 5342 rcu_assign_pointer(ftrace_graph_hash, new_hash); 5343 } else { 5344 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5345 lockdep_is_held(&graph_lock)); 5346 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); 5347 } 5348 5349 mutex_unlock(&graph_lock); 5350 5351 /* Wait till all users are no longer using the old hash */ 5352 synchronize_rcu(); 5353 5354 free_ftrace_hash(old_hash); 5355 } 5356 5357 out: 5358 free_ftrace_hash(fgd->new_hash); 5359 kfree(fgd); 5360 5361 return ret; 5362 } 5363 5364 static int 5365 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) 5366 { 5367 struct ftrace_glob func_g; 5368 struct dyn_ftrace *rec; 5369 struct ftrace_page *pg; 5370 struct ftrace_func_entry *entry; 5371 int fail = 1; 5372 int not; 5373 5374 /* decode regex */ 5375 func_g.type = filter_parse_regex(buffer, strlen(buffer), 5376 &func_g.search, ¬); 5377 5378 func_g.len = strlen(func_g.search); 5379 5380 mutex_lock(&ftrace_lock); 5381 5382 if (unlikely(ftrace_disabled)) { 5383 mutex_unlock(&ftrace_lock); 5384 return -ENODEV; 5385 } 5386 5387 do_for_each_ftrace_rec(pg, rec) { 5388 5389 if (rec->flags & FTRACE_FL_DISABLED) 5390 continue; 5391 5392 if (ftrace_match_record(rec, &func_g, NULL, 0)) { 5393 entry = ftrace_lookup_ip(hash, rec->ip); 5394 5395 if (!not) { 5396 fail = 0; 5397 5398 if (entry) 5399 continue; 5400 if (add_hash_entry(hash, rec->ip) < 0) 5401 goto out; 5402 } else { 5403 if (entry) { 5404 free_hash_entry(hash, entry); 5405 fail = 0; 5406 } 5407 } 5408 } 5409 } while_for_each_ftrace_rec(); 5410 out: 5411 mutex_unlock(&ftrace_lock); 5412 5413 if (fail) 5414 return -EINVAL; 5415 5416 return 0; 5417 } 5418 5419 static ssize_t 5420 ftrace_graph_write(struct file *file, const char __user *ubuf, 5421 size_t cnt, loff_t *ppos) 5422 { 5423 ssize_t read, ret = 0; 5424 struct ftrace_graph_data *fgd = file->private_data; 5425 struct trace_parser *parser; 5426 5427 if (!cnt) 5428 return 0; 5429 5430 /* Read mode uses seq functions */ 5431 if (file->f_mode & FMODE_READ) { 5432 struct seq_file *m = file->private_data; 5433 fgd = m->private; 5434 } 5435 5436 parser = &fgd->parser; 5437 5438 read = trace_get_user(parser, ubuf, cnt, ppos); 5439 5440 if (read >= 0 && trace_parser_loaded(parser) && 5441 !trace_parser_cont(parser)) { 5442 5443 ret = ftrace_graph_set_hash(fgd->new_hash, 5444 parser->buffer); 5445 trace_parser_clear(parser); 5446 } 5447 5448 if (!ret) 5449 ret = read; 5450 5451 return ret; 5452 } 5453 5454 static const struct file_operations ftrace_graph_fops = { 5455 .open = ftrace_graph_open, 5456 .read = seq_read, 5457 .write = ftrace_graph_write, 5458 .llseek = tracing_lseek, 5459 .release = ftrace_graph_release, 5460 }; 5461 5462 static const struct file_operations ftrace_graph_notrace_fops = { 5463 .open = ftrace_graph_notrace_open, 5464 .read = seq_read, 5465 .write = ftrace_graph_write, 5466 .llseek = tracing_lseek, 5467 .release = ftrace_graph_release, 5468 }; 5469 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 5470 5471 void ftrace_create_filter_files(struct ftrace_ops *ops, 5472 struct dentry *parent) 5473 { 5474 5475 trace_create_file("set_ftrace_filter", 0644, parent, 5476 ops, &ftrace_filter_fops); 5477 5478 trace_create_file("set_ftrace_notrace", 0644, parent, 5479 ops, &ftrace_notrace_fops); 5480 } 5481 5482 /* 5483 * The name "destroy_filter_files" is really a misnomer. Although 5484 * in the future, it may actually delete the files, but this is 5485 * really intended to make sure the ops passed in are disabled 5486 * and that when this function returns, the caller is free to 5487 * free the ops. 5488 * 5489 * The "destroy" name is only to match the "create" name that this 5490 * should be paired with. 5491 */ 5492 void ftrace_destroy_filter_files(struct ftrace_ops *ops) 5493 { 5494 mutex_lock(&ftrace_lock); 5495 if (ops->flags & FTRACE_OPS_FL_ENABLED) 5496 ftrace_shutdown(ops, 0); 5497 ops->flags |= FTRACE_OPS_FL_DELETED; 5498 ftrace_free_filter(ops); 5499 mutex_unlock(&ftrace_lock); 5500 } 5501 5502 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) 5503 { 5504 5505 trace_create_file("available_filter_functions", 0444, 5506 d_tracer, NULL, &ftrace_avail_fops); 5507 5508 trace_create_file("enabled_functions", 0444, 5509 d_tracer, NULL, &ftrace_enabled_fops); 5510 5511 ftrace_create_filter_files(&global_ops, d_tracer); 5512 5513 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5514 trace_create_file("set_graph_function", 0644, d_tracer, 5515 NULL, 5516 &ftrace_graph_fops); 5517 trace_create_file("set_graph_notrace", 0644, d_tracer, 5518 NULL, 5519 &ftrace_graph_notrace_fops); 5520 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 5521 5522 return 0; 5523 } 5524 5525 static int ftrace_cmp_ips(const void *a, const void *b) 5526 { 5527 const unsigned long *ipa = a; 5528 const unsigned long *ipb = b; 5529 5530 if (*ipa > *ipb) 5531 return 1; 5532 if (*ipa < *ipb) 5533 return -1; 5534 return 0; 5535 } 5536 5537 static int ftrace_process_locs(struct module *mod, 5538 unsigned long *start, 5539 unsigned long *end) 5540 { 5541 struct ftrace_page *start_pg; 5542 struct ftrace_page *pg; 5543 struct dyn_ftrace *rec; 5544 unsigned long count; 5545 unsigned long *p; 5546 unsigned long addr; 5547 unsigned long flags = 0; /* Shut up gcc */ 5548 int ret = -ENOMEM; 5549 5550 count = end - start; 5551 5552 if (!count) 5553 return 0; 5554 5555 sort(start, count, sizeof(*start), 5556 ftrace_cmp_ips, NULL); 5557 5558 start_pg = ftrace_allocate_pages(count); 5559 if (!start_pg) 5560 return -ENOMEM; 5561 5562 mutex_lock(&ftrace_lock); 5563 5564 /* 5565 * Core and each module needs their own pages, as 5566 * modules will free them when they are removed. 5567 * Force a new page to be allocated for modules. 5568 */ 5569 if (!mod) { 5570 WARN_ON(ftrace_pages || ftrace_pages_start); 5571 /* First initialization */ 5572 ftrace_pages = ftrace_pages_start = start_pg; 5573 } else { 5574 if (!ftrace_pages) 5575 goto out; 5576 5577 if (WARN_ON(ftrace_pages->next)) { 5578 /* Hmm, we have free pages? */ 5579 while (ftrace_pages->next) 5580 ftrace_pages = ftrace_pages->next; 5581 } 5582 5583 ftrace_pages->next = start_pg; 5584 } 5585 5586 p = start; 5587 pg = start_pg; 5588 while (p < end) { 5589 addr = ftrace_call_adjust(*p++); 5590 /* 5591 * Some architecture linkers will pad between 5592 * the different mcount_loc sections of different 5593 * object files to satisfy alignments. 5594 * Skip any NULL pointers. 5595 */ 5596 if (!addr) 5597 continue; 5598 5599 if (pg->index == pg->size) { 5600 /* We should have allocated enough */ 5601 if (WARN_ON(!pg->next)) 5602 break; 5603 pg = pg->next; 5604 } 5605 5606 rec = &pg->records[pg->index++]; 5607 rec->ip = addr; 5608 } 5609 5610 /* We should have used all pages */ 5611 WARN_ON(pg->next); 5612 5613 /* Assign the last page to ftrace_pages */ 5614 ftrace_pages = pg; 5615 5616 /* 5617 * We only need to disable interrupts on start up 5618 * because we are modifying code that an interrupt 5619 * may execute, and the modification is not atomic. 5620 * But for modules, nothing runs the code we modify 5621 * until we are finished with it, and there's no 5622 * reason to cause large interrupt latencies while we do it. 5623 */ 5624 if (!mod) 5625 local_irq_save(flags); 5626 ftrace_update_code(mod, start_pg); 5627 if (!mod) 5628 local_irq_restore(flags); 5629 ret = 0; 5630 out: 5631 mutex_unlock(&ftrace_lock); 5632 5633 return ret; 5634 } 5635 5636 struct ftrace_mod_func { 5637 struct list_head list; 5638 char *name; 5639 unsigned long ip; 5640 unsigned int size; 5641 }; 5642 5643 struct ftrace_mod_map { 5644 struct rcu_head rcu; 5645 struct list_head list; 5646 struct module *mod; 5647 unsigned long start_addr; 5648 unsigned long end_addr; 5649 struct list_head funcs; 5650 unsigned int num_funcs; 5651 }; 5652 5653 #ifdef CONFIG_MODULES 5654 5655 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) 5656 5657 static LIST_HEAD(ftrace_mod_maps); 5658 5659 static int referenced_filters(struct dyn_ftrace *rec) 5660 { 5661 struct ftrace_ops *ops; 5662 int cnt = 0; 5663 5664 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { 5665 if (ops_references_rec(ops, rec)) 5666 cnt++; 5667 } 5668 5669 return cnt; 5670 } 5671 5672 static void 5673 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) 5674 { 5675 struct ftrace_func_entry *entry; 5676 struct dyn_ftrace *rec; 5677 int i; 5678 5679 if (ftrace_hash_empty(hash)) 5680 return; 5681 5682 for (i = 0; i < pg->index; i++) { 5683 rec = &pg->records[i]; 5684 entry = __ftrace_lookup_ip(hash, rec->ip); 5685 /* 5686 * Do not allow this rec to match again. 5687 * Yeah, it may waste some memory, but will be removed 5688 * if/when the hash is modified again. 5689 */ 5690 if (entry) 5691 entry->ip = 0; 5692 } 5693 } 5694 5695 /* Clear any records from hashs */ 5696 static void clear_mod_from_hashes(struct ftrace_page *pg) 5697 { 5698 struct trace_array *tr; 5699 5700 mutex_lock(&trace_types_lock); 5701 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 5702 if (!tr->ops || !tr->ops->func_hash) 5703 continue; 5704 mutex_lock(&tr->ops->func_hash->regex_lock); 5705 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); 5706 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); 5707 mutex_unlock(&tr->ops->func_hash->regex_lock); 5708 } 5709 mutex_unlock(&trace_types_lock); 5710 } 5711 5712 static void ftrace_free_mod_map(struct rcu_head *rcu) 5713 { 5714 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu); 5715 struct ftrace_mod_func *mod_func; 5716 struct ftrace_mod_func *n; 5717 5718 /* All the contents of mod_map are now not visible to readers */ 5719 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { 5720 kfree(mod_func->name); 5721 list_del(&mod_func->list); 5722 kfree(mod_func); 5723 } 5724 5725 kfree(mod_map); 5726 } 5727 5728 void ftrace_release_mod(struct module *mod) 5729 { 5730 struct ftrace_mod_map *mod_map; 5731 struct ftrace_mod_map *n; 5732 struct dyn_ftrace *rec; 5733 struct ftrace_page **last_pg; 5734 struct ftrace_page *tmp_page = NULL; 5735 struct ftrace_page *pg; 5736 int order; 5737 5738 mutex_lock(&ftrace_lock); 5739 5740 if (ftrace_disabled) 5741 goto out_unlock; 5742 5743 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { 5744 if (mod_map->mod == mod) { 5745 list_del_rcu(&mod_map->list); 5746 call_rcu(&mod_map->rcu, ftrace_free_mod_map); 5747 break; 5748 } 5749 } 5750 5751 /* 5752 * Each module has its own ftrace_pages, remove 5753 * them from the list. 5754 */ 5755 last_pg = &ftrace_pages_start; 5756 for (pg = ftrace_pages_start; pg; pg = *last_pg) { 5757 rec = &pg->records[0]; 5758 if (within_module_core(rec->ip, mod) || 5759 within_module_init(rec->ip, mod)) { 5760 /* 5761 * As core pages are first, the first 5762 * page should never be a module page. 5763 */ 5764 if (WARN_ON(pg == ftrace_pages_start)) 5765 goto out_unlock; 5766 5767 /* Check if we are deleting the last page */ 5768 if (pg == ftrace_pages) 5769 ftrace_pages = next_to_ftrace_page(last_pg); 5770 5771 ftrace_update_tot_cnt -= pg->index; 5772 *last_pg = pg->next; 5773 5774 pg->next = tmp_page; 5775 tmp_page = pg; 5776 } else 5777 last_pg = &pg->next; 5778 } 5779 out_unlock: 5780 mutex_unlock(&ftrace_lock); 5781 5782 for (pg = tmp_page; pg; pg = tmp_page) { 5783 5784 /* Needs to be called outside of ftrace_lock */ 5785 clear_mod_from_hashes(pg); 5786 5787 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 5788 free_pages((unsigned long)pg->records, order); 5789 tmp_page = pg->next; 5790 kfree(pg); 5791 } 5792 } 5793 5794 void ftrace_module_enable(struct module *mod) 5795 { 5796 struct dyn_ftrace *rec; 5797 struct ftrace_page *pg; 5798 5799 mutex_lock(&ftrace_lock); 5800 5801 if (ftrace_disabled) 5802 goto out_unlock; 5803 5804 /* 5805 * If the tracing is enabled, go ahead and enable the record. 5806 * 5807 * The reason not to enable the record immediately is the 5808 * inherent check of ftrace_make_nop/ftrace_make_call for 5809 * correct previous instructions. Making first the NOP 5810 * conversion puts the module to the correct state, thus 5811 * passing the ftrace_make_call check. 5812 * 5813 * We also delay this to after the module code already set the 5814 * text to read-only, as we now need to set it back to read-write 5815 * so that we can modify the text. 5816 */ 5817 if (ftrace_start_up) 5818 ftrace_arch_code_modify_prepare(); 5819 5820 do_for_each_ftrace_rec(pg, rec) { 5821 int cnt; 5822 /* 5823 * do_for_each_ftrace_rec() is a double loop. 5824 * module text shares the pg. If a record is 5825 * not part of this module, then skip this pg, 5826 * which the "break" will do. 5827 */ 5828 if (!within_module_core(rec->ip, mod) && 5829 !within_module_init(rec->ip, mod)) 5830 break; 5831 5832 cnt = 0; 5833 5834 /* 5835 * When adding a module, we need to check if tracers are 5836 * currently enabled and if they are, and can trace this record, 5837 * we need to enable the module functions as well as update the 5838 * reference counts for those function records. 5839 */ 5840 if (ftrace_start_up) 5841 cnt += referenced_filters(rec); 5842 5843 /* This clears FTRACE_FL_DISABLED */ 5844 rec->flags = cnt; 5845 5846 if (ftrace_start_up && cnt) { 5847 int failed = __ftrace_replace_code(rec, 1); 5848 if (failed) { 5849 ftrace_bug(failed, rec); 5850 goto out_loop; 5851 } 5852 } 5853 5854 } while_for_each_ftrace_rec(); 5855 5856 out_loop: 5857 if (ftrace_start_up) 5858 ftrace_arch_code_modify_post_process(); 5859 5860 out_unlock: 5861 mutex_unlock(&ftrace_lock); 5862 5863 process_cached_mods(mod->name); 5864 } 5865 5866 void ftrace_module_init(struct module *mod) 5867 { 5868 if (ftrace_disabled || !mod->num_ftrace_callsites) 5869 return; 5870 5871 ftrace_process_locs(mod, mod->ftrace_callsites, 5872 mod->ftrace_callsites + mod->num_ftrace_callsites); 5873 } 5874 5875 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 5876 struct dyn_ftrace *rec) 5877 { 5878 struct ftrace_mod_func *mod_func; 5879 unsigned long symsize; 5880 unsigned long offset; 5881 char str[KSYM_SYMBOL_LEN]; 5882 char *modname; 5883 const char *ret; 5884 5885 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); 5886 if (!ret) 5887 return; 5888 5889 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL); 5890 if (!mod_func) 5891 return; 5892 5893 mod_func->name = kstrdup(str, GFP_KERNEL); 5894 if (!mod_func->name) { 5895 kfree(mod_func); 5896 return; 5897 } 5898 5899 mod_func->ip = rec->ip - offset; 5900 mod_func->size = symsize; 5901 5902 mod_map->num_funcs++; 5903 5904 list_add_rcu(&mod_func->list, &mod_map->funcs); 5905 } 5906 5907 static struct ftrace_mod_map * 5908 allocate_ftrace_mod_map(struct module *mod, 5909 unsigned long start, unsigned long end) 5910 { 5911 struct ftrace_mod_map *mod_map; 5912 5913 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL); 5914 if (!mod_map) 5915 return NULL; 5916 5917 mod_map->mod = mod; 5918 mod_map->start_addr = start; 5919 mod_map->end_addr = end; 5920 mod_map->num_funcs = 0; 5921 5922 INIT_LIST_HEAD_RCU(&mod_map->funcs); 5923 5924 list_add_rcu(&mod_map->list, &ftrace_mod_maps); 5925 5926 return mod_map; 5927 } 5928 5929 static const char * 5930 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map, 5931 unsigned long addr, unsigned long *size, 5932 unsigned long *off, char *sym) 5933 { 5934 struct ftrace_mod_func *found_func = NULL; 5935 struct ftrace_mod_func *mod_func; 5936 5937 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 5938 if (addr >= mod_func->ip && 5939 addr < mod_func->ip + mod_func->size) { 5940 found_func = mod_func; 5941 break; 5942 } 5943 } 5944 5945 if (found_func) { 5946 if (size) 5947 *size = found_func->size; 5948 if (off) 5949 *off = addr - found_func->ip; 5950 if (sym) 5951 strlcpy(sym, found_func->name, KSYM_NAME_LEN); 5952 5953 return found_func->name; 5954 } 5955 5956 return NULL; 5957 } 5958 5959 const char * 5960 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 5961 unsigned long *off, char **modname, char *sym) 5962 { 5963 struct ftrace_mod_map *mod_map; 5964 const char *ret = NULL; 5965 5966 /* mod_map is freed via call_rcu() */ 5967 preempt_disable(); 5968 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 5969 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); 5970 if (ret) { 5971 if (modname) 5972 *modname = mod_map->mod->name; 5973 break; 5974 } 5975 } 5976 preempt_enable(); 5977 5978 return ret; 5979 } 5980 5981 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 5982 char *type, char *name, 5983 char *module_name, int *exported) 5984 { 5985 struct ftrace_mod_map *mod_map; 5986 struct ftrace_mod_func *mod_func; 5987 5988 preempt_disable(); 5989 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 5990 5991 if (symnum >= mod_map->num_funcs) { 5992 symnum -= mod_map->num_funcs; 5993 continue; 5994 } 5995 5996 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 5997 if (symnum > 1) { 5998 symnum--; 5999 continue; 6000 } 6001 6002 *value = mod_func->ip; 6003 *type = 'T'; 6004 strlcpy(name, mod_func->name, KSYM_NAME_LEN); 6005 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); 6006 *exported = 1; 6007 preempt_enable(); 6008 return 0; 6009 } 6010 WARN_ON(1); 6011 break; 6012 } 6013 preempt_enable(); 6014 return -ERANGE; 6015 } 6016 6017 #else 6018 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 6019 struct dyn_ftrace *rec) { } 6020 static inline struct ftrace_mod_map * 6021 allocate_ftrace_mod_map(struct module *mod, 6022 unsigned long start, unsigned long end) 6023 { 6024 return NULL; 6025 } 6026 #endif /* CONFIG_MODULES */ 6027 6028 struct ftrace_init_func { 6029 struct list_head list; 6030 unsigned long ip; 6031 }; 6032 6033 /* Clear any init ips from hashes */ 6034 static void 6035 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) 6036 { 6037 struct ftrace_func_entry *entry; 6038 6039 entry = ftrace_lookup_ip(hash, func->ip); 6040 /* 6041 * Do not allow this rec to match again. 6042 * Yeah, it may waste some memory, but will be removed 6043 * if/when the hash is modified again. 6044 */ 6045 if (entry) 6046 entry->ip = 0; 6047 } 6048 6049 static void 6050 clear_func_from_hashes(struct ftrace_init_func *func) 6051 { 6052 struct trace_array *tr; 6053 6054 mutex_lock(&trace_types_lock); 6055 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 6056 if (!tr->ops || !tr->ops->func_hash) 6057 continue; 6058 mutex_lock(&tr->ops->func_hash->regex_lock); 6059 clear_func_from_hash(func, tr->ops->func_hash->filter_hash); 6060 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); 6061 mutex_unlock(&tr->ops->func_hash->regex_lock); 6062 } 6063 mutex_unlock(&trace_types_lock); 6064 } 6065 6066 static void add_to_clear_hash_list(struct list_head *clear_list, 6067 struct dyn_ftrace *rec) 6068 { 6069 struct ftrace_init_func *func; 6070 6071 func = kmalloc(sizeof(*func), GFP_KERNEL); 6072 if (!func) { 6073 WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n"); 6074 return; 6075 } 6076 6077 func->ip = rec->ip; 6078 list_add(&func->list, clear_list); 6079 } 6080 6081 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) 6082 { 6083 unsigned long start = (unsigned long)(start_ptr); 6084 unsigned long end = (unsigned long)(end_ptr); 6085 struct ftrace_page **last_pg = &ftrace_pages_start; 6086 struct ftrace_page *pg; 6087 struct dyn_ftrace *rec; 6088 struct dyn_ftrace key; 6089 struct ftrace_mod_map *mod_map = NULL; 6090 struct ftrace_init_func *func, *func_next; 6091 struct list_head clear_hash; 6092 int order; 6093 6094 INIT_LIST_HEAD(&clear_hash); 6095 6096 key.ip = start; 6097 key.flags = end; /* overload flags, as it is unsigned long */ 6098 6099 mutex_lock(&ftrace_lock); 6100 6101 /* 6102 * If we are freeing module init memory, then check if 6103 * any tracer is active. If so, we need to save a mapping of 6104 * the module functions being freed with the address. 6105 */ 6106 if (mod && ftrace_ops_list != &ftrace_list_end) 6107 mod_map = allocate_ftrace_mod_map(mod, start, end); 6108 6109 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { 6110 if (end < pg->records[0].ip || 6111 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 6112 continue; 6113 again: 6114 rec = bsearch(&key, pg->records, pg->index, 6115 sizeof(struct dyn_ftrace), 6116 ftrace_cmp_recs); 6117 if (!rec) 6118 continue; 6119 6120 /* rec will be cleared from hashes after ftrace_lock unlock */ 6121 add_to_clear_hash_list(&clear_hash, rec); 6122 6123 if (mod_map) 6124 save_ftrace_mod_rec(mod_map, rec); 6125 6126 pg->index--; 6127 ftrace_update_tot_cnt--; 6128 if (!pg->index) { 6129 *last_pg = pg->next; 6130 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 6131 free_pages((unsigned long)pg->records, order); 6132 kfree(pg); 6133 pg = container_of(last_pg, struct ftrace_page, next); 6134 if (!(*last_pg)) 6135 ftrace_pages = pg; 6136 continue; 6137 } 6138 memmove(rec, rec + 1, 6139 (pg->index - (rec - pg->records)) * sizeof(*rec)); 6140 /* More than one function may be in this block */ 6141 goto again; 6142 } 6143 mutex_unlock(&ftrace_lock); 6144 6145 list_for_each_entry_safe(func, func_next, &clear_hash, list) { 6146 clear_func_from_hashes(func); 6147 kfree(func); 6148 } 6149 } 6150 6151 void __init ftrace_free_init_mem(void) 6152 { 6153 void *start = (void *)(&__init_begin); 6154 void *end = (void *)(&__init_end); 6155 6156 ftrace_free_mem(NULL, start, end); 6157 } 6158 6159 void __init ftrace_init(void) 6160 { 6161 extern unsigned long __start_mcount_loc[]; 6162 extern unsigned long __stop_mcount_loc[]; 6163 unsigned long count, flags; 6164 int ret; 6165 6166 local_irq_save(flags); 6167 ret = ftrace_dyn_arch_init(); 6168 local_irq_restore(flags); 6169 if (ret) 6170 goto failed; 6171 6172 count = __stop_mcount_loc - __start_mcount_loc; 6173 if (!count) { 6174 pr_info("ftrace: No functions to be traced?\n"); 6175 goto failed; 6176 } 6177 6178 pr_info("ftrace: allocating %ld entries in %ld pages\n", 6179 count, count / ENTRIES_PER_PAGE + 1); 6180 6181 last_ftrace_enabled = ftrace_enabled = 1; 6182 6183 ret = ftrace_process_locs(NULL, 6184 __start_mcount_loc, 6185 __stop_mcount_loc); 6186 6187 set_ftrace_early_filters(); 6188 6189 return; 6190 failed: 6191 ftrace_disabled = 1; 6192 } 6193 6194 /* Do nothing if arch does not support this */ 6195 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) 6196 { 6197 } 6198 6199 static void ftrace_update_trampoline(struct ftrace_ops *ops) 6200 { 6201 arch_ftrace_update_trampoline(ops); 6202 } 6203 6204 void ftrace_init_trace_array(struct trace_array *tr) 6205 { 6206 INIT_LIST_HEAD(&tr->func_probes); 6207 INIT_LIST_HEAD(&tr->mod_trace); 6208 INIT_LIST_HEAD(&tr->mod_notrace); 6209 } 6210 #else 6211 6212 struct ftrace_ops global_ops = { 6213 .func = ftrace_stub, 6214 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 6215 FTRACE_OPS_FL_INITIALIZED | 6216 FTRACE_OPS_FL_PID, 6217 }; 6218 6219 static int __init ftrace_nodyn_init(void) 6220 { 6221 ftrace_enabled = 1; 6222 return 0; 6223 } 6224 core_initcall(ftrace_nodyn_init); 6225 6226 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } 6227 static inline void ftrace_startup_enable(int command) { } 6228 static inline void ftrace_startup_all(int command) { } 6229 6230 # define ftrace_startup_sysctl() do { } while (0) 6231 # define ftrace_shutdown_sysctl() do { } while (0) 6232 6233 static void ftrace_update_trampoline(struct ftrace_ops *ops) 6234 { 6235 } 6236 6237 #endif /* CONFIG_DYNAMIC_FTRACE */ 6238 6239 __init void ftrace_init_global_array_ops(struct trace_array *tr) 6240 { 6241 tr->ops = &global_ops; 6242 tr->ops->private = tr; 6243 ftrace_init_trace_array(tr); 6244 } 6245 6246 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) 6247 { 6248 /* If we filter on pids, update to use the pid function */ 6249 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { 6250 if (WARN_ON(tr->ops->func != ftrace_stub)) 6251 printk("ftrace ops had %pS for function\n", 6252 tr->ops->func); 6253 } 6254 tr->ops->func = func; 6255 tr->ops->private = tr; 6256 } 6257 6258 void ftrace_reset_array_ops(struct trace_array *tr) 6259 { 6260 tr->ops->func = ftrace_stub; 6261 } 6262 6263 static nokprobe_inline void 6264 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 6265 struct ftrace_ops *ignored, struct pt_regs *regs) 6266 { 6267 struct ftrace_ops *op; 6268 int bit; 6269 6270 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 6271 if (bit < 0) 6272 return; 6273 6274 /* 6275 * Some of the ops may be dynamically allocated, 6276 * they must be freed after a synchronize_rcu(). 6277 */ 6278 preempt_disable_notrace(); 6279 6280 do_for_each_ftrace_op(op, ftrace_ops_list) { 6281 /* Stub functions don't need to be called nor tested */ 6282 if (op->flags & FTRACE_OPS_FL_STUB) 6283 continue; 6284 /* 6285 * Check the following for each ops before calling their func: 6286 * if RCU flag is set, then rcu_is_watching() must be true 6287 * if PER_CPU is set, then ftrace_function_local_disable() 6288 * must be false 6289 * Otherwise test if the ip matches the ops filter 6290 * 6291 * If any of the above fails then the op->func() is not executed. 6292 */ 6293 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && 6294 ftrace_ops_test(op, ip, regs)) { 6295 if (FTRACE_WARN_ON(!op->func)) { 6296 pr_warn("op=%p %pS\n", op, op); 6297 goto out; 6298 } 6299 op->func(ip, parent_ip, op, regs); 6300 } 6301 } while_for_each_ftrace_op(op); 6302 out: 6303 preempt_enable_notrace(); 6304 trace_clear_recursion(bit); 6305 } 6306 6307 /* 6308 * Some archs only support passing ip and parent_ip. Even though 6309 * the list function ignores the op parameter, we do not want any 6310 * C side effects, where a function is called without the caller 6311 * sending a third parameter. 6312 * Archs are to support both the regs and ftrace_ops at the same time. 6313 * If they support ftrace_ops, it is assumed they support regs. 6314 * If call backs want to use regs, they must either check for regs 6315 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. 6316 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. 6317 * An architecture can pass partial regs with ftrace_ops and still 6318 * set the ARCH_SUPPORTS_FTRACE_OPS. 6319 */ 6320 #if ARCH_SUPPORTS_FTRACE_OPS 6321 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 6322 struct ftrace_ops *op, struct pt_regs *regs) 6323 { 6324 __ftrace_ops_list_func(ip, parent_ip, NULL, regs); 6325 } 6326 NOKPROBE_SYMBOL(ftrace_ops_list_func); 6327 #else 6328 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) 6329 { 6330 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); 6331 } 6332 NOKPROBE_SYMBOL(ftrace_ops_no_ops); 6333 #endif 6334 6335 /* 6336 * If there's only one function registered but it does not support 6337 * recursion, needs RCU protection and/or requires per cpu handling, then 6338 * this function will be called by the mcount trampoline. 6339 */ 6340 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, 6341 struct ftrace_ops *op, struct pt_regs *regs) 6342 { 6343 int bit; 6344 6345 if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) 6346 return; 6347 6348 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 6349 if (bit < 0) 6350 return; 6351 6352 preempt_disable_notrace(); 6353 6354 op->func(ip, parent_ip, op, regs); 6355 6356 preempt_enable_notrace(); 6357 trace_clear_recursion(bit); 6358 } 6359 NOKPROBE_SYMBOL(ftrace_ops_assist_func); 6360 6361 /** 6362 * ftrace_ops_get_func - get the function a trampoline should call 6363 * @ops: the ops to get the function for 6364 * 6365 * Normally the mcount trampoline will call the ops->func, but there 6366 * are times that it should not. For example, if the ops does not 6367 * have its own recursion protection, then it should call the 6368 * ftrace_ops_assist_func() instead. 6369 * 6370 * Returns the function that the trampoline should call for @ops. 6371 */ 6372 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) 6373 { 6374 /* 6375 * If the function does not handle recursion, needs to be RCU safe, 6376 * or does per cpu logic, then we need to call the assist handler. 6377 */ 6378 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || 6379 ops->flags & FTRACE_OPS_FL_RCU) 6380 return ftrace_ops_assist_func; 6381 6382 return ops->func; 6383 } 6384 6385 static void 6386 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, 6387 struct task_struct *prev, struct task_struct *next) 6388 { 6389 struct trace_array *tr = data; 6390 struct trace_pid_list *pid_list; 6391 6392 pid_list = rcu_dereference_sched(tr->function_pids); 6393 6394 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, 6395 trace_ignore_this_task(pid_list, next)); 6396 } 6397 6398 static void 6399 ftrace_pid_follow_sched_process_fork(void *data, 6400 struct task_struct *self, 6401 struct task_struct *task) 6402 { 6403 struct trace_pid_list *pid_list; 6404 struct trace_array *tr = data; 6405 6406 pid_list = rcu_dereference_sched(tr->function_pids); 6407 trace_filter_add_remove_task(pid_list, self, task); 6408 } 6409 6410 static void 6411 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) 6412 { 6413 struct trace_pid_list *pid_list; 6414 struct trace_array *tr = data; 6415 6416 pid_list = rcu_dereference_sched(tr->function_pids); 6417 trace_filter_add_remove_task(pid_list, NULL, task); 6418 } 6419 6420 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) 6421 { 6422 if (enable) { 6423 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 6424 tr); 6425 register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, 6426 tr); 6427 } else { 6428 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 6429 tr); 6430 unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, 6431 tr); 6432 } 6433 } 6434 6435 static void clear_ftrace_pids(struct trace_array *tr) 6436 { 6437 struct trace_pid_list *pid_list; 6438 int cpu; 6439 6440 pid_list = rcu_dereference_protected(tr->function_pids, 6441 lockdep_is_held(&ftrace_lock)); 6442 if (!pid_list) 6443 return; 6444 6445 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 6446 6447 for_each_possible_cpu(cpu) 6448 per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false; 6449 6450 rcu_assign_pointer(tr->function_pids, NULL); 6451 6452 /* Wait till all users are no longer using pid filtering */ 6453 synchronize_rcu(); 6454 6455 trace_free_pid_list(pid_list); 6456 } 6457 6458 void ftrace_clear_pids(struct trace_array *tr) 6459 { 6460 mutex_lock(&ftrace_lock); 6461 6462 clear_ftrace_pids(tr); 6463 6464 mutex_unlock(&ftrace_lock); 6465 } 6466 6467 static void ftrace_pid_reset(struct trace_array *tr) 6468 { 6469 mutex_lock(&ftrace_lock); 6470 clear_ftrace_pids(tr); 6471 6472 ftrace_update_pid_func(); 6473 ftrace_startup_all(0); 6474 6475 mutex_unlock(&ftrace_lock); 6476 } 6477 6478 /* Greater than any max PID */ 6479 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) 6480 6481 static void *fpid_start(struct seq_file *m, loff_t *pos) 6482 __acquires(RCU) 6483 { 6484 struct trace_pid_list *pid_list; 6485 struct trace_array *tr = m->private; 6486 6487 mutex_lock(&ftrace_lock); 6488 rcu_read_lock_sched(); 6489 6490 pid_list = rcu_dereference_sched(tr->function_pids); 6491 6492 if (!pid_list) 6493 return !(*pos) ? FTRACE_NO_PIDS : NULL; 6494 6495 return trace_pid_start(pid_list, pos); 6496 } 6497 6498 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) 6499 { 6500 struct trace_array *tr = m->private; 6501 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); 6502 6503 if (v == FTRACE_NO_PIDS) 6504 return NULL; 6505 6506 return trace_pid_next(pid_list, v, pos); 6507 } 6508 6509 static void fpid_stop(struct seq_file *m, void *p) 6510 __releases(RCU) 6511 { 6512 rcu_read_unlock_sched(); 6513 mutex_unlock(&ftrace_lock); 6514 } 6515 6516 static int fpid_show(struct seq_file *m, void *v) 6517 { 6518 if (v == FTRACE_NO_PIDS) { 6519 seq_puts(m, "no pid\n"); 6520 return 0; 6521 } 6522 6523 return trace_pid_show(m, v); 6524 } 6525 6526 static const struct seq_operations ftrace_pid_sops = { 6527 .start = fpid_start, 6528 .next = fpid_next, 6529 .stop = fpid_stop, 6530 .show = fpid_show, 6531 }; 6532 6533 static int 6534 ftrace_pid_open(struct inode *inode, struct file *file) 6535 { 6536 struct trace_array *tr = inode->i_private; 6537 struct seq_file *m; 6538 int ret = 0; 6539 6540 if (trace_array_get(tr) < 0) 6541 return -ENODEV; 6542 6543 if ((file->f_mode & FMODE_WRITE) && 6544 (file->f_flags & O_TRUNC)) 6545 ftrace_pid_reset(tr); 6546 6547 ret = seq_open(file, &ftrace_pid_sops); 6548 if (ret < 0) { 6549 trace_array_put(tr); 6550 } else { 6551 m = file->private_data; 6552 /* copy tr over to seq ops */ 6553 m->private = tr; 6554 } 6555 6556 return ret; 6557 } 6558 6559 static void ignore_task_cpu(void *data) 6560 { 6561 struct trace_array *tr = data; 6562 struct trace_pid_list *pid_list; 6563 6564 /* 6565 * This function is called by on_each_cpu() while the 6566 * event_mutex is held. 6567 */ 6568 pid_list = rcu_dereference_protected(tr->function_pids, 6569 mutex_is_locked(&ftrace_lock)); 6570 6571 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, 6572 trace_ignore_this_task(pid_list, current)); 6573 } 6574 6575 static ssize_t 6576 ftrace_pid_write(struct file *filp, const char __user *ubuf, 6577 size_t cnt, loff_t *ppos) 6578 { 6579 struct seq_file *m = filp->private_data; 6580 struct trace_array *tr = m->private; 6581 struct trace_pid_list *filtered_pids = NULL; 6582 struct trace_pid_list *pid_list; 6583 ssize_t ret; 6584 6585 if (!cnt) 6586 return 0; 6587 6588 mutex_lock(&ftrace_lock); 6589 6590 filtered_pids = rcu_dereference_protected(tr->function_pids, 6591 lockdep_is_held(&ftrace_lock)); 6592 6593 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 6594 if (ret < 0) 6595 goto out; 6596 6597 rcu_assign_pointer(tr->function_pids, pid_list); 6598 6599 if (filtered_pids) { 6600 synchronize_rcu(); 6601 trace_free_pid_list(filtered_pids); 6602 } else if (pid_list) { 6603 /* Register a probe to set whether to ignore the tracing of a task */ 6604 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 6605 } 6606 6607 /* 6608 * Ignoring of pids is done at task switch. But we have to 6609 * check for those tasks that are currently running. 6610 * Always do this in case a pid was appended or removed. 6611 */ 6612 on_each_cpu(ignore_task_cpu, tr, 1); 6613 6614 ftrace_update_pid_func(); 6615 ftrace_startup_all(0); 6616 out: 6617 mutex_unlock(&ftrace_lock); 6618 6619 if (ret > 0) 6620 *ppos += ret; 6621 6622 return ret; 6623 } 6624 6625 static int 6626 ftrace_pid_release(struct inode *inode, struct file *file) 6627 { 6628 struct trace_array *tr = inode->i_private; 6629 6630 trace_array_put(tr); 6631 6632 return seq_release(inode, file); 6633 } 6634 6635 static const struct file_operations ftrace_pid_fops = { 6636 .open = ftrace_pid_open, 6637 .write = ftrace_pid_write, 6638 .read = seq_read, 6639 .llseek = tracing_lseek, 6640 .release = ftrace_pid_release, 6641 }; 6642 6643 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) 6644 { 6645 trace_create_file("set_ftrace_pid", 0644, d_tracer, 6646 tr, &ftrace_pid_fops); 6647 } 6648 6649 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, 6650 struct dentry *d_tracer) 6651 { 6652 /* Only the top level directory has the dyn_tracefs and profile */ 6653 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 6654 6655 ftrace_init_dyn_tracefs(d_tracer); 6656 ftrace_profile_tracefs(d_tracer); 6657 } 6658 6659 /** 6660 * ftrace_kill - kill ftrace 6661 * 6662 * This function should be used by panic code. It stops ftrace 6663 * but in a not so nice way. If you need to simply kill ftrace 6664 * from a non-atomic section, use ftrace_kill. 6665 */ 6666 void ftrace_kill(void) 6667 { 6668 ftrace_disabled = 1; 6669 ftrace_enabled = 0; 6670 ftrace_trace_function = ftrace_stub; 6671 } 6672 6673 /** 6674 * Test if ftrace is dead or not. 6675 */ 6676 int ftrace_is_dead(void) 6677 { 6678 return ftrace_disabled; 6679 } 6680 6681 /** 6682 * register_ftrace_function - register a function for profiling 6683 * @ops - ops structure that holds the function for profiling. 6684 * 6685 * Register a function to be called by all functions in the 6686 * kernel. 6687 * 6688 * Note: @ops->func and all the functions it calls must be labeled 6689 * with "notrace", otherwise it will go into a 6690 * recursive loop. 6691 */ 6692 int register_ftrace_function(struct ftrace_ops *ops) 6693 { 6694 int ret = -1; 6695 6696 ftrace_ops_init(ops); 6697 6698 mutex_lock(&ftrace_lock); 6699 6700 ret = ftrace_startup(ops, 0); 6701 6702 mutex_unlock(&ftrace_lock); 6703 6704 return ret; 6705 } 6706 EXPORT_SYMBOL_GPL(register_ftrace_function); 6707 6708 /** 6709 * unregister_ftrace_function - unregister a function for profiling. 6710 * @ops - ops structure that holds the function to unregister 6711 * 6712 * Unregister a function that was added to be called by ftrace profiling. 6713 */ 6714 int unregister_ftrace_function(struct ftrace_ops *ops) 6715 { 6716 int ret; 6717 6718 mutex_lock(&ftrace_lock); 6719 ret = ftrace_shutdown(ops, 0); 6720 mutex_unlock(&ftrace_lock); 6721 6722 return ret; 6723 } 6724 EXPORT_SYMBOL_GPL(unregister_ftrace_function); 6725 6726 int 6727 ftrace_enable_sysctl(struct ctl_table *table, int write, 6728 void __user *buffer, size_t *lenp, 6729 loff_t *ppos) 6730 { 6731 int ret = -ENODEV; 6732 6733 mutex_lock(&ftrace_lock); 6734 6735 if (unlikely(ftrace_disabled)) 6736 goto out; 6737 6738 ret = proc_dointvec(table, write, buffer, lenp, ppos); 6739 6740 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 6741 goto out; 6742 6743 last_ftrace_enabled = !!ftrace_enabled; 6744 6745 if (ftrace_enabled) { 6746 6747 /* we are starting ftrace again */ 6748 if (rcu_dereference_protected(ftrace_ops_list, 6749 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) 6750 update_ftrace_function(); 6751 6752 ftrace_startup_sysctl(); 6753 6754 } else { 6755 /* stopping ftrace calls (just send to ftrace_stub) */ 6756 ftrace_trace_function = ftrace_stub; 6757 6758 ftrace_shutdown_sysctl(); 6759 } 6760 6761 out: 6762 mutex_unlock(&ftrace_lock); 6763 return ret; 6764 } 6765