1 /* 2 * Infrastructure for profiling code inserted by 'gcc -pg'. 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Originally ported from the -rt patch by: 8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 9 * 10 * Based on code in the latency_tracer, that is: 11 * 12 * Copyright (C) 2004-2006 Ingo Molnar 13 * Copyright (C) 2004 Nadia Yvette Chambers 14 */ 15 16 #include <linux/stop_machine.h> 17 #include <linux/clocksource.h> 18 #include <linux/kallsyms.h> 19 #include <linux/seq_file.h> 20 #include <linux/suspend.h> 21 #include <linux/tracefs.h> 22 #include <linux/hardirq.h> 23 #include <linux/kthread.h> 24 #include <linux/uaccess.h> 25 #include <linux/bsearch.h> 26 #include <linux/module.h> 27 #include <linux/ftrace.h> 28 #include <linux/sysctl.h> 29 #include <linux/slab.h> 30 #include <linux/ctype.h> 31 #include <linux/sort.h> 32 #include <linux/list.h> 33 #include <linux/hash.h> 34 #include <linux/rcupdate.h> 35 36 #include <trace/events/sched.h> 37 38 #include <asm/setup.h> 39 40 #include "trace_output.h" 41 #include "trace_stat.h" 42 43 #define FTRACE_WARN_ON(cond) \ 44 ({ \ 45 int ___r = cond; \ 46 if (WARN_ON(___r)) \ 47 ftrace_kill(); \ 48 ___r; \ 49 }) 50 51 #define FTRACE_WARN_ON_ONCE(cond) \ 52 ({ \ 53 int ___r = cond; \ 54 if (WARN_ON_ONCE(___r)) \ 55 ftrace_kill(); \ 56 ___r; \ 57 }) 58 59 /* hash bits for specific function selection */ 60 #define FTRACE_HASH_BITS 7 61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) 62 #define FTRACE_HASH_DEFAULT_BITS 10 63 #define FTRACE_HASH_MAX_BITS 12 64 65 #ifdef CONFIG_DYNAMIC_FTRACE 66 #define INIT_OPS_HASH(opsname) \ 67 .func_hash = &opsname.local_hash, \ 68 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 69 #define ASSIGN_OPS_HASH(opsname, val) \ 70 .func_hash = val, \ 71 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 72 #else 73 #define INIT_OPS_HASH(opsname) 74 #define ASSIGN_OPS_HASH(opsname, val) 75 #endif 76 77 static struct ftrace_ops ftrace_list_end __read_mostly = { 78 .func = ftrace_stub, 79 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 80 INIT_OPS_HASH(ftrace_list_end) 81 }; 82 83 /* ftrace_enabled is a method to turn ftrace on or off */ 84 int ftrace_enabled __read_mostly; 85 static int last_ftrace_enabled; 86 87 /* Current function tracing op */ 88 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 89 /* What to set function_trace_op to */ 90 static struct ftrace_ops *set_function_trace_op; 91 92 static bool ftrace_pids_enabled(struct ftrace_ops *ops) 93 { 94 struct trace_array *tr; 95 96 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) 97 return false; 98 99 tr = ops->private; 100 101 return tr->function_pids != NULL; 102 } 103 104 static void ftrace_update_trampoline(struct ftrace_ops *ops); 105 106 /* 107 * ftrace_disabled is set when an anomaly is discovered. 108 * ftrace_disabled is much stronger than ftrace_enabled. 109 */ 110 static int ftrace_disabled __read_mostly; 111 112 static DEFINE_MUTEX(ftrace_lock); 113 114 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 115 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 116 static struct ftrace_ops global_ops; 117 118 #if ARCH_SUPPORTS_FTRACE_OPS 119 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 120 struct ftrace_ops *op, struct pt_regs *regs); 121 #else 122 /* See comment below, where ftrace_ops_list_func is defined */ 123 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); 124 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) 125 #endif 126 127 /* 128 * Traverse the ftrace_global_list, invoking all entries. The reason that we 129 * can use rcu_dereference_raw_notrace() is that elements removed from this list 130 * are simply leaked, so there is no need to interact with a grace-period 131 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle 132 * concurrent insertions into the ftrace_global_list. 133 * 134 * Silly Alpha and silly pointer-speculation compiler optimizations! 135 */ 136 #define do_for_each_ftrace_op(op, list) \ 137 op = rcu_dereference_raw_notrace(list); \ 138 do 139 140 /* 141 * Optimized for just a single item in the list (as that is the normal case). 142 */ 143 #define while_for_each_ftrace_op(op) \ 144 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ 145 unlikely((op) != &ftrace_list_end)) 146 147 static inline void ftrace_ops_init(struct ftrace_ops *ops) 148 { 149 #ifdef CONFIG_DYNAMIC_FTRACE 150 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 151 mutex_init(&ops->local_hash.regex_lock); 152 ops->func_hash = &ops->local_hash; 153 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 154 } 155 #endif 156 } 157 158 /** 159 * ftrace_nr_registered_ops - return number of ops registered 160 * 161 * Returns the number of ftrace_ops registered and tracing functions 162 */ 163 int ftrace_nr_registered_ops(void) 164 { 165 struct ftrace_ops *ops; 166 int cnt = 0; 167 168 mutex_lock(&ftrace_lock); 169 170 for (ops = ftrace_ops_list; 171 ops != &ftrace_list_end; ops = ops->next) 172 cnt++; 173 174 mutex_unlock(&ftrace_lock); 175 176 return cnt; 177 } 178 179 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 180 struct ftrace_ops *op, struct pt_regs *regs) 181 { 182 struct trace_array *tr = op->private; 183 184 if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid)) 185 return; 186 187 op->saved_func(ip, parent_ip, op, regs); 188 } 189 190 /** 191 * clear_ftrace_function - reset the ftrace function 192 * 193 * This NULLs the ftrace function and in essence stops 194 * tracing. There may be lag 195 */ 196 void clear_ftrace_function(void) 197 { 198 ftrace_trace_function = ftrace_stub; 199 } 200 201 static void per_cpu_ops_disable_all(struct ftrace_ops *ops) 202 { 203 int cpu; 204 205 for_each_possible_cpu(cpu) 206 *per_cpu_ptr(ops->disabled, cpu) = 1; 207 } 208 209 static int per_cpu_ops_alloc(struct ftrace_ops *ops) 210 { 211 int __percpu *disabled; 212 213 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) 214 return -EINVAL; 215 216 disabled = alloc_percpu(int); 217 if (!disabled) 218 return -ENOMEM; 219 220 ops->disabled = disabled; 221 per_cpu_ops_disable_all(ops); 222 return 0; 223 } 224 225 static void ftrace_sync(struct work_struct *work) 226 { 227 /* 228 * This function is just a stub to implement a hard force 229 * of synchronize_sched(). This requires synchronizing 230 * tasks even in userspace and idle. 231 * 232 * Yes, function tracing is rude. 233 */ 234 } 235 236 static void ftrace_sync_ipi(void *data) 237 { 238 /* Probably not needed, but do it anyway */ 239 smp_rmb(); 240 } 241 242 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 243 static void update_function_graph_func(void); 244 245 /* Both enabled by default (can be cleared by function_graph tracer flags */ 246 static bool fgraph_sleep_time = true; 247 static bool fgraph_graph_time = true; 248 249 #else 250 static inline void update_function_graph_func(void) { } 251 #endif 252 253 254 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) 255 { 256 /* 257 * If this is a dynamic, RCU, or per CPU ops, or we force list func, 258 * then it needs to call the list anyway. 259 */ 260 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU | 261 FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC) 262 return ftrace_ops_list_func; 263 264 return ftrace_ops_get_func(ops); 265 } 266 267 static void update_ftrace_function(void) 268 { 269 ftrace_func_t func; 270 271 /* 272 * Prepare the ftrace_ops that the arch callback will use. 273 * If there's only one ftrace_ops registered, the ftrace_ops_list 274 * will point to the ops we want. 275 */ 276 set_function_trace_op = ftrace_ops_list; 277 278 /* If there's no ftrace_ops registered, just call the stub function */ 279 if (ftrace_ops_list == &ftrace_list_end) { 280 func = ftrace_stub; 281 282 /* 283 * If we are at the end of the list and this ops is 284 * recursion safe and not dynamic and the arch supports passing ops, 285 * then have the mcount trampoline call the function directly. 286 */ 287 } else if (ftrace_ops_list->next == &ftrace_list_end) { 288 func = ftrace_ops_get_list_func(ftrace_ops_list); 289 290 } else { 291 /* Just use the default ftrace_ops */ 292 set_function_trace_op = &ftrace_list_end; 293 func = ftrace_ops_list_func; 294 } 295 296 update_function_graph_func(); 297 298 /* If there's no change, then do nothing more here */ 299 if (ftrace_trace_function == func) 300 return; 301 302 /* 303 * If we are using the list function, it doesn't care 304 * about the function_trace_ops. 305 */ 306 if (func == ftrace_ops_list_func) { 307 ftrace_trace_function = func; 308 /* 309 * Don't even bother setting function_trace_ops, 310 * it would be racy to do so anyway. 311 */ 312 return; 313 } 314 315 #ifndef CONFIG_DYNAMIC_FTRACE 316 /* 317 * For static tracing, we need to be a bit more careful. 318 * The function change takes affect immediately. Thus, 319 * we need to coorditate the setting of the function_trace_ops 320 * with the setting of the ftrace_trace_function. 321 * 322 * Set the function to the list ops, which will call the 323 * function we want, albeit indirectly, but it handles the 324 * ftrace_ops and doesn't depend on function_trace_op. 325 */ 326 ftrace_trace_function = ftrace_ops_list_func; 327 /* 328 * Make sure all CPUs see this. Yes this is slow, but static 329 * tracing is slow and nasty to have enabled. 330 */ 331 schedule_on_each_cpu(ftrace_sync); 332 /* Now all cpus are using the list ops. */ 333 function_trace_op = set_function_trace_op; 334 /* Make sure the function_trace_op is visible on all CPUs */ 335 smp_wmb(); 336 /* Nasty way to force a rmb on all cpus */ 337 smp_call_function(ftrace_sync_ipi, NULL, 1); 338 /* OK, we are all set to update the ftrace_trace_function now! */ 339 #endif /* !CONFIG_DYNAMIC_FTRACE */ 340 341 ftrace_trace_function = func; 342 } 343 344 int using_ftrace_ops_list_func(void) 345 { 346 return ftrace_trace_function == ftrace_ops_list_func; 347 } 348 349 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) 350 { 351 ops->next = *list; 352 /* 353 * We are entering ops into the list but another 354 * CPU might be walking that list. We need to make sure 355 * the ops->next pointer is valid before another CPU sees 356 * the ops pointer included into the list. 357 */ 358 rcu_assign_pointer(*list, ops); 359 } 360 361 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) 362 { 363 struct ftrace_ops **p; 364 365 /* 366 * If we are removing the last function, then simply point 367 * to the ftrace_stub. 368 */ 369 if (*list == ops && ops->next == &ftrace_list_end) { 370 *list = &ftrace_list_end; 371 return 0; 372 } 373 374 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) 375 if (*p == ops) 376 break; 377 378 if (*p != ops) 379 return -1; 380 381 *p = (*p)->next; 382 return 0; 383 } 384 385 static void ftrace_update_trampoline(struct ftrace_ops *ops); 386 387 static int __register_ftrace_function(struct ftrace_ops *ops) 388 { 389 if (ops->flags & FTRACE_OPS_FL_DELETED) 390 return -EINVAL; 391 392 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 393 return -EBUSY; 394 395 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS 396 /* 397 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 398 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. 399 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. 400 */ 401 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && 402 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) 403 return -EINVAL; 404 405 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) 406 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; 407 #endif 408 409 if (!core_kernel_data((unsigned long)ops)) 410 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 411 412 if (ops->flags & FTRACE_OPS_FL_PER_CPU) { 413 if (per_cpu_ops_alloc(ops)) 414 return -ENOMEM; 415 } 416 417 add_ftrace_ops(&ftrace_ops_list, ops); 418 419 /* Always save the function, and reset at unregistering */ 420 ops->saved_func = ops->func; 421 422 if (ftrace_pids_enabled(ops)) 423 ops->func = ftrace_pid_func; 424 425 ftrace_update_trampoline(ops); 426 427 if (ftrace_enabled) 428 update_ftrace_function(); 429 430 return 0; 431 } 432 433 static int __unregister_ftrace_function(struct ftrace_ops *ops) 434 { 435 int ret; 436 437 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 438 return -EBUSY; 439 440 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 441 442 if (ret < 0) 443 return ret; 444 445 if (ftrace_enabled) 446 update_ftrace_function(); 447 448 ops->func = ops->saved_func; 449 450 return 0; 451 } 452 453 static void ftrace_update_pid_func(void) 454 { 455 struct ftrace_ops *op; 456 457 /* Only do something if we are tracing something */ 458 if (ftrace_trace_function == ftrace_stub) 459 return; 460 461 do_for_each_ftrace_op(op, ftrace_ops_list) { 462 if (op->flags & FTRACE_OPS_FL_PID) { 463 op->func = ftrace_pids_enabled(op) ? 464 ftrace_pid_func : op->saved_func; 465 ftrace_update_trampoline(op); 466 } 467 } while_for_each_ftrace_op(op); 468 469 update_ftrace_function(); 470 } 471 472 #ifdef CONFIG_FUNCTION_PROFILER 473 struct ftrace_profile { 474 struct hlist_node node; 475 unsigned long ip; 476 unsigned long counter; 477 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 478 unsigned long long time; 479 unsigned long long time_squared; 480 #endif 481 }; 482 483 struct ftrace_profile_page { 484 struct ftrace_profile_page *next; 485 unsigned long index; 486 struct ftrace_profile records[]; 487 }; 488 489 struct ftrace_profile_stat { 490 atomic_t disabled; 491 struct hlist_head *hash; 492 struct ftrace_profile_page *pages; 493 struct ftrace_profile_page *start; 494 struct tracer_stat stat; 495 }; 496 497 #define PROFILE_RECORDS_SIZE \ 498 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) 499 500 #define PROFILES_PER_PAGE \ 501 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) 502 503 static int ftrace_profile_enabled __read_mostly; 504 505 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ 506 static DEFINE_MUTEX(ftrace_profile_lock); 507 508 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); 509 510 #define FTRACE_PROFILE_HASH_BITS 10 511 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) 512 513 static void * 514 function_stat_next(void *v, int idx) 515 { 516 struct ftrace_profile *rec = v; 517 struct ftrace_profile_page *pg; 518 519 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 520 521 again: 522 if (idx != 0) 523 rec++; 524 525 if ((void *)rec >= (void *)&pg->records[pg->index]) { 526 pg = pg->next; 527 if (!pg) 528 return NULL; 529 rec = &pg->records[0]; 530 if (!rec->counter) 531 goto again; 532 } 533 534 return rec; 535 } 536 537 static void *function_stat_start(struct tracer_stat *trace) 538 { 539 struct ftrace_profile_stat *stat = 540 container_of(trace, struct ftrace_profile_stat, stat); 541 542 if (!stat || !stat->start) 543 return NULL; 544 545 return function_stat_next(&stat->start->records[0], 0); 546 } 547 548 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 549 /* function graph compares on total time */ 550 static int function_stat_cmp(void *p1, void *p2) 551 { 552 struct ftrace_profile *a = p1; 553 struct ftrace_profile *b = p2; 554 555 if (a->time < b->time) 556 return -1; 557 if (a->time > b->time) 558 return 1; 559 else 560 return 0; 561 } 562 #else 563 /* not function graph compares against hits */ 564 static int function_stat_cmp(void *p1, void *p2) 565 { 566 struct ftrace_profile *a = p1; 567 struct ftrace_profile *b = p2; 568 569 if (a->counter < b->counter) 570 return -1; 571 if (a->counter > b->counter) 572 return 1; 573 else 574 return 0; 575 } 576 #endif 577 578 static int function_stat_headers(struct seq_file *m) 579 { 580 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 581 seq_puts(m, " Function " 582 "Hit Time Avg s^2\n" 583 " -------- " 584 "--- ---- --- ---\n"); 585 #else 586 seq_puts(m, " Function Hit\n" 587 " -------- ---\n"); 588 #endif 589 return 0; 590 } 591 592 static int function_stat_show(struct seq_file *m, void *v) 593 { 594 struct ftrace_profile *rec = v; 595 char str[KSYM_SYMBOL_LEN]; 596 int ret = 0; 597 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 598 static struct trace_seq s; 599 unsigned long long avg; 600 unsigned long long stddev; 601 #endif 602 mutex_lock(&ftrace_profile_lock); 603 604 /* we raced with function_profile_reset() */ 605 if (unlikely(rec->counter == 0)) { 606 ret = -EBUSY; 607 goto out; 608 } 609 610 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 611 avg = rec->time; 612 do_div(avg, rec->counter); 613 if (tracing_thresh && (avg < tracing_thresh)) 614 goto out; 615 #endif 616 617 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 618 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 619 620 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 621 seq_puts(m, " "); 622 623 /* Sample standard deviation (s^2) */ 624 if (rec->counter <= 1) 625 stddev = 0; 626 else { 627 /* 628 * Apply Welford's method: 629 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) 630 */ 631 stddev = rec->counter * rec->time_squared - 632 rec->time * rec->time; 633 634 /* 635 * Divide only 1000 for ns^2 -> us^2 conversion. 636 * trace_print_graph_duration will divide 1000 again. 637 */ 638 do_div(stddev, rec->counter * (rec->counter - 1) * 1000); 639 } 640 641 trace_seq_init(&s); 642 trace_print_graph_duration(rec->time, &s); 643 trace_seq_puts(&s, " "); 644 trace_print_graph_duration(avg, &s); 645 trace_seq_puts(&s, " "); 646 trace_print_graph_duration(stddev, &s); 647 trace_print_seq(m, &s); 648 #endif 649 seq_putc(m, '\n'); 650 out: 651 mutex_unlock(&ftrace_profile_lock); 652 653 return ret; 654 } 655 656 static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 657 { 658 struct ftrace_profile_page *pg; 659 660 pg = stat->pages = stat->start; 661 662 while (pg) { 663 memset(pg->records, 0, PROFILE_RECORDS_SIZE); 664 pg->index = 0; 665 pg = pg->next; 666 } 667 668 memset(stat->hash, 0, 669 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); 670 } 671 672 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) 673 { 674 struct ftrace_profile_page *pg; 675 int functions; 676 int pages; 677 int i; 678 679 /* If we already allocated, do nothing */ 680 if (stat->pages) 681 return 0; 682 683 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); 684 if (!stat->pages) 685 return -ENOMEM; 686 687 #ifdef CONFIG_DYNAMIC_FTRACE 688 functions = ftrace_update_tot_cnt; 689 #else 690 /* 691 * We do not know the number of functions that exist because 692 * dynamic tracing is what counts them. With past experience 693 * we have around 20K functions. That should be more than enough. 694 * It is highly unlikely we will execute every function in 695 * the kernel. 696 */ 697 functions = 20000; 698 #endif 699 700 pg = stat->start = stat->pages; 701 702 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); 703 704 for (i = 1; i < pages; i++) { 705 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 706 if (!pg->next) 707 goto out_free; 708 pg = pg->next; 709 } 710 711 return 0; 712 713 out_free: 714 pg = stat->start; 715 while (pg) { 716 unsigned long tmp = (unsigned long)pg; 717 718 pg = pg->next; 719 free_page(tmp); 720 } 721 722 stat->pages = NULL; 723 stat->start = NULL; 724 725 return -ENOMEM; 726 } 727 728 static int ftrace_profile_init_cpu(int cpu) 729 { 730 struct ftrace_profile_stat *stat; 731 int size; 732 733 stat = &per_cpu(ftrace_profile_stats, cpu); 734 735 if (stat->hash) { 736 /* If the profile is already created, simply reset it */ 737 ftrace_profile_reset(stat); 738 return 0; 739 } 740 741 /* 742 * We are profiling all functions, but usually only a few thousand 743 * functions are hit. We'll make a hash of 1024 items. 744 */ 745 size = FTRACE_PROFILE_HASH_SIZE; 746 747 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); 748 749 if (!stat->hash) 750 return -ENOMEM; 751 752 /* Preallocate the function profiling pages */ 753 if (ftrace_profile_pages_init(stat) < 0) { 754 kfree(stat->hash); 755 stat->hash = NULL; 756 return -ENOMEM; 757 } 758 759 return 0; 760 } 761 762 static int ftrace_profile_init(void) 763 { 764 int cpu; 765 int ret = 0; 766 767 for_each_possible_cpu(cpu) { 768 ret = ftrace_profile_init_cpu(cpu); 769 if (ret) 770 break; 771 } 772 773 return ret; 774 } 775 776 /* interrupts must be disabled */ 777 static struct ftrace_profile * 778 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) 779 { 780 struct ftrace_profile *rec; 781 struct hlist_head *hhd; 782 unsigned long key; 783 784 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); 785 hhd = &stat->hash[key]; 786 787 if (hlist_empty(hhd)) 788 return NULL; 789 790 hlist_for_each_entry_rcu_notrace(rec, hhd, node) { 791 if (rec->ip == ip) 792 return rec; 793 } 794 795 return NULL; 796 } 797 798 static void ftrace_add_profile(struct ftrace_profile_stat *stat, 799 struct ftrace_profile *rec) 800 { 801 unsigned long key; 802 803 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); 804 hlist_add_head_rcu(&rec->node, &stat->hash[key]); 805 } 806 807 /* 808 * The memory is already allocated, this simply finds a new record to use. 809 */ 810 static struct ftrace_profile * 811 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) 812 { 813 struct ftrace_profile *rec = NULL; 814 815 /* prevent recursion (from NMIs) */ 816 if (atomic_inc_return(&stat->disabled) != 1) 817 goto out; 818 819 /* 820 * Try to find the function again since an NMI 821 * could have added it 822 */ 823 rec = ftrace_find_profiled_func(stat, ip); 824 if (rec) 825 goto out; 826 827 if (stat->pages->index == PROFILES_PER_PAGE) { 828 if (!stat->pages->next) 829 goto out; 830 stat->pages = stat->pages->next; 831 } 832 833 rec = &stat->pages->records[stat->pages->index++]; 834 rec->ip = ip; 835 ftrace_add_profile(stat, rec); 836 837 out: 838 atomic_dec(&stat->disabled); 839 840 return rec; 841 } 842 843 static void 844 function_profile_call(unsigned long ip, unsigned long parent_ip, 845 struct ftrace_ops *ops, struct pt_regs *regs) 846 { 847 struct ftrace_profile_stat *stat; 848 struct ftrace_profile *rec; 849 unsigned long flags; 850 851 if (!ftrace_profile_enabled) 852 return; 853 854 local_irq_save(flags); 855 856 stat = this_cpu_ptr(&ftrace_profile_stats); 857 if (!stat->hash || !ftrace_profile_enabled) 858 goto out; 859 860 rec = ftrace_find_profiled_func(stat, ip); 861 if (!rec) { 862 rec = ftrace_profile_alloc(stat, ip); 863 if (!rec) 864 goto out; 865 } 866 867 rec->counter++; 868 out: 869 local_irq_restore(flags); 870 } 871 872 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 873 static int profile_graph_entry(struct ftrace_graph_ent *trace) 874 { 875 int index = trace->depth; 876 877 function_profile_call(trace->func, 0, NULL, NULL); 878 879 if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) 880 current->ret_stack[index].subtime = 0; 881 882 return 1; 883 } 884 885 static void profile_graph_return(struct ftrace_graph_ret *trace) 886 { 887 struct ftrace_profile_stat *stat; 888 unsigned long long calltime; 889 struct ftrace_profile *rec; 890 unsigned long flags; 891 892 local_irq_save(flags); 893 stat = this_cpu_ptr(&ftrace_profile_stats); 894 if (!stat->hash || !ftrace_profile_enabled) 895 goto out; 896 897 /* If the calltime was zero'd ignore it */ 898 if (!trace->calltime) 899 goto out; 900 901 calltime = trace->rettime - trace->calltime; 902 903 if (!fgraph_graph_time) { 904 int index; 905 906 index = trace->depth; 907 908 /* Append this call time to the parent time to subtract */ 909 if (index) 910 current->ret_stack[index - 1].subtime += calltime; 911 912 if (current->ret_stack[index].subtime < calltime) 913 calltime -= current->ret_stack[index].subtime; 914 else 915 calltime = 0; 916 } 917 918 rec = ftrace_find_profiled_func(stat, trace->func); 919 if (rec) { 920 rec->time += calltime; 921 rec->time_squared += calltime * calltime; 922 } 923 924 out: 925 local_irq_restore(flags); 926 } 927 928 static int register_ftrace_profiler(void) 929 { 930 return register_ftrace_graph(&profile_graph_return, 931 &profile_graph_entry); 932 } 933 934 static void unregister_ftrace_profiler(void) 935 { 936 unregister_ftrace_graph(); 937 } 938 #else 939 static struct ftrace_ops ftrace_profile_ops __read_mostly = { 940 .func = function_profile_call, 941 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 942 INIT_OPS_HASH(ftrace_profile_ops) 943 }; 944 945 static int register_ftrace_profiler(void) 946 { 947 return register_ftrace_function(&ftrace_profile_ops); 948 } 949 950 static void unregister_ftrace_profiler(void) 951 { 952 unregister_ftrace_function(&ftrace_profile_ops); 953 } 954 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 955 956 static ssize_t 957 ftrace_profile_write(struct file *filp, const char __user *ubuf, 958 size_t cnt, loff_t *ppos) 959 { 960 unsigned long val; 961 int ret; 962 963 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 964 if (ret) 965 return ret; 966 967 val = !!val; 968 969 mutex_lock(&ftrace_profile_lock); 970 if (ftrace_profile_enabled ^ val) { 971 if (val) { 972 ret = ftrace_profile_init(); 973 if (ret < 0) { 974 cnt = ret; 975 goto out; 976 } 977 978 ret = register_ftrace_profiler(); 979 if (ret < 0) { 980 cnt = ret; 981 goto out; 982 } 983 ftrace_profile_enabled = 1; 984 } else { 985 ftrace_profile_enabled = 0; 986 /* 987 * unregister_ftrace_profiler calls stop_machine 988 * so this acts like an synchronize_sched. 989 */ 990 unregister_ftrace_profiler(); 991 } 992 } 993 out: 994 mutex_unlock(&ftrace_profile_lock); 995 996 *ppos += cnt; 997 998 return cnt; 999 } 1000 1001 static ssize_t 1002 ftrace_profile_read(struct file *filp, char __user *ubuf, 1003 size_t cnt, loff_t *ppos) 1004 { 1005 char buf[64]; /* big enough to hold a number */ 1006 int r; 1007 1008 r = sprintf(buf, "%u\n", ftrace_profile_enabled); 1009 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 1010 } 1011 1012 static const struct file_operations ftrace_profile_fops = { 1013 .open = tracing_open_generic, 1014 .read = ftrace_profile_read, 1015 .write = ftrace_profile_write, 1016 .llseek = default_llseek, 1017 }; 1018 1019 /* used to initialize the real stat files */ 1020 static struct tracer_stat function_stats __initdata = { 1021 .name = "functions", 1022 .stat_start = function_stat_start, 1023 .stat_next = function_stat_next, 1024 .stat_cmp = function_stat_cmp, 1025 .stat_headers = function_stat_headers, 1026 .stat_show = function_stat_show 1027 }; 1028 1029 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 1030 { 1031 struct ftrace_profile_stat *stat; 1032 struct dentry *entry; 1033 char *name; 1034 int ret; 1035 int cpu; 1036 1037 for_each_possible_cpu(cpu) { 1038 stat = &per_cpu(ftrace_profile_stats, cpu); 1039 1040 name = kasprintf(GFP_KERNEL, "function%d", cpu); 1041 if (!name) { 1042 /* 1043 * The files created are permanent, if something happens 1044 * we still do not free memory. 1045 */ 1046 WARN(1, 1047 "Could not allocate stat file for cpu %d\n", 1048 cpu); 1049 return; 1050 } 1051 stat->stat = function_stats; 1052 stat->stat.name = name; 1053 ret = register_stat_tracer(&stat->stat); 1054 if (ret) { 1055 WARN(1, 1056 "Could not register function stat for cpu %d\n", 1057 cpu); 1058 kfree(name); 1059 return; 1060 } 1061 } 1062 1063 entry = tracefs_create_file("function_profile_enabled", 0644, 1064 d_tracer, NULL, &ftrace_profile_fops); 1065 if (!entry) 1066 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n"); 1067 } 1068 1069 #else /* CONFIG_FUNCTION_PROFILER */ 1070 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 1071 { 1072 } 1073 #endif /* CONFIG_FUNCTION_PROFILER */ 1074 1075 static struct pid * const ftrace_swapper_pid = &init_struct_pid; 1076 1077 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1078 static int ftrace_graph_active; 1079 #else 1080 # define ftrace_graph_active 0 1081 #endif 1082 1083 #ifdef CONFIG_DYNAMIC_FTRACE 1084 1085 static struct ftrace_ops *removed_ops; 1086 1087 /* 1088 * Set when doing a global update, like enabling all recs or disabling them. 1089 * It is not set when just updating a single ftrace_ops. 1090 */ 1091 static bool update_all_ops; 1092 1093 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 1094 # error Dynamic ftrace depends on MCOUNT_RECORD 1095 #endif 1096 1097 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; 1098 1099 struct ftrace_func_probe { 1100 struct hlist_node node; 1101 struct ftrace_probe_ops *ops; 1102 unsigned long flags; 1103 unsigned long ip; 1104 void *data; 1105 struct list_head free_list; 1106 }; 1107 1108 struct ftrace_func_entry { 1109 struct hlist_node hlist; 1110 unsigned long ip; 1111 }; 1112 1113 struct ftrace_hash { 1114 unsigned long size_bits; 1115 struct hlist_head *buckets; 1116 unsigned long count; 1117 struct rcu_head rcu; 1118 }; 1119 1120 /* 1121 * We make these constant because no one should touch them, 1122 * but they are used as the default "empty hash", to avoid allocating 1123 * it all the time. These are in a read only section such that if 1124 * anyone does try to modify it, it will cause an exception. 1125 */ 1126 static const struct hlist_head empty_buckets[1]; 1127 static const struct ftrace_hash empty_hash = { 1128 .buckets = (struct hlist_head *)empty_buckets, 1129 }; 1130 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1131 1132 static struct ftrace_ops global_ops = { 1133 .func = ftrace_stub, 1134 .local_hash.notrace_hash = EMPTY_HASH, 1135 .local_hash.filter_hash = EMPTY_HASH, 1136 INIT_OPS_HASH(global_ops) 1137 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1138 FTRACE_OPS_FL_INITIALIZED | 1139 FTRACE_OPS_FL_PID, 1140 }; 1141 1142 /* 1143 * This is used by __kernel_text_address() to return true if the 1144 * address is on a dynamically allocated trampoline that would 1145 * not return true for either core_kernel_text() or 1146 * is_module_text_address(). 1147 */ 1148 bool is_ftrace_trampoline(unsigned long addr) 1149 { 1150 struct ftrace_ops *op; 1151 bool ret = false; 1152 1153 /* 1154 * Some of the ops may be dynamically allocated, 1155 * they are freed after a synchronize_sched(). 1156 */ 1157 preempt_disable_notrace(); 1158 1159 do_for_each_ftrace_op(op, ftrace_ops_list) { 1160 /* 1161 * This is to check for dynamically allocated trampolines. 1162 * Trampolines that are in kernel text will have 1163 * core_kernel_text() return true. 1164 */ 1165 if (op->trampoline && op->trampoline_size) 1166 if (addr >= op->trampoline && 1167 addr < op->trampoline + op->trampoline_size) { 1168 ret = true; 1169 goto out; 1170 } 1171 } while_for_each_ftrace_op(op); 1172 1173 out: 1174 preempt_enable_notrace(); 1175 1176 return ret; 1177 } 1178 1179 struct ftrace_page { 1180 struct ftrace_page *next; 1181 struct dyn_ftrace *records; 1182 int index; 1183 int size; 1184 }; 1185 1186 #define ENTRY_SIZE sizeof(struct dyn_ftrace) 1187 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1188 1189 /* estimate from running different kernels */ 1190 #define NR_TO_INIT 10000 1191 1192 static struct ftrace_page *ftrace_pages_start; 1193 static struct ftrace_page *ftrace_pages; 1194 1195 static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash) 1196 { 1197 return !hash || !hash->count; 1198 } 1199 1200 static struct ftrace_func_entry * 1201 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1202 { 1203 unsigned long key; 1204 struct ftrace_func_entry *entry; 1205 struct hlist_head *hhd; 1206 1207 if (ftrace_hash_empty(hash)) 1208 return NULL; 1209 1210 if (hash->size_bits > 0) 1211 key = hash_long(ip, hash->size_bits); 1212 else 1213 key = 0; 1214 1215 hhd = &hash->buckets[key]; 1216 1217 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { 1218 if (entry->ip == ip) 1219 return entry; 1220 } 1221 return NULL; 1222 } 1223 1224 static void __add_hash_entry(struct ftrace_hash *hash, 1225 struct ftrace_func_entry *entry) 1226 { 1227 struct hlist_head *hhd; 1228 unsigned long key; 1229 1230 if (hash->size_bits) 1231 key = hash_long(entry->ip, hash->size_bits); 1232 else 1233 key = 0; 1234 1235 hhd = &hash->buckets[key]; 1236 hlist_add_head(&entry->hlist, hhd); 1237 hash->count++; 1238 } 1239 1240 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) 1241 { 1242 struct ftrace_func_entry *entry; 1243 1244 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1245 if (!entry) 1246 return -ENOMEM; 1247 1248 entry->ip = ip; 1249 __add_hash_entry(hash, entry); 1250 1251 return 0; 1252 } 1253 1254 static void 1255 free_hash_entry(struct ftrace_hash *hash, 1256 struct ftrace_func_entry *entry) 1257 { 1258 hlist_del(&entry->hlist); 1259 kfree(entry); 1260 hash->count--; 1261 } 1262 1263 static void 1264 remove_hash_entry(struct ftrace_hash *hash, 1265 struct ftrace_func_entry *entry) 1266 { 1267 hlist_del(&entry->hlist); 1268 hash->count--; 1269 } 1270 1271 static void ftrace_hash_clear(struct ftrace_hash *hash) 1272 { 1273 struct hlist_head *hhd; 1274 struct hlist_node *tn; 1275 struct ftrace_func_entry *entry; 1276 int size = 1 << hash->size_bits; 1277 int i; 1278 1279 if (!hash->count) 1280 return; 1281 1282 for (i = 0; i < size; i++) { 1283 hhd = &hash->buckets[i]; 1284 hlist_for_each_entry_safe(entry, tn, hhd, hlist) 1285 free_hash_entry(hash, entry); 1286 } 1287 FTRACE_WARN_ON(hash->count); 1288 } 1289 1290 static void free_ftrace_hash(struct ftrace_hash *hash) 1291 { 1292 if (!hash || hash == EMPTY_HASH) 1293 return; 1294 ftrace_hash_clear(hash); 1295 kfree(hash->buckets); 1296 kfree(hash); 1297 } 1298 1299 static void __free_ftrace_hash_rcu(struct rcu_head *rcu) 1300 { 1301 struct ftrace_hash *hash; 1302 1303 hash = container_of(rcu, struct ftrace_hash, rcu); 1304 free_ftrace_hash(hash); 1305 } 1306 1307 static void free_ftrace_hash_rcu(struct ftrace_hash *hash) 1308 { 1309 if (!hash || hash == EMPTY_HASH) 1310 return; 1311 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); 1312 } 1313 1314 void ftrace_free_filter(struct ftrace_ops *ops) 1315 { 1316 ftrace_ops_init(ops); 1317 free_ftrace_hash(ops->func_hash->filter_hash); 1318 free_ftrace_hash(ops->func_hash->notrace_hash); 1319 } 1320 1321 static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1322 { 1323 struct ftrace_hash *hash; 1324 int size; 1325 1326 hash = kzalloc(sizeof(*hash), GFP_KERNEL); 1327 if (!hash) 1328 return NULL; 1329 1330 size = 1 << size_bits; 1331 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); 1332 1333 if (!hash->buckets) { 1334 kfree(hash); 1335 return NULL; 1336 } 1337 1338 hash->size_bits = size_bits; 1339 1340 return hash; 1341 } 1342 1343 static struct ftrace_hash * 1344 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) 1345 { 1346 struct ftrace_func_entry *entry; 1347 struct ftrace_hash *new_hash; 1348 int size; 1349 int ret; 1350 int i; 1351 1352 new_hash = alloc_ftrace_hash(size_bits); 1353 if (!new_hash) 1354 return NULL; 1355 1356 /* Empty hash? */ 1357 if (ftrace_hash_empty(hash)) 1358 return new_hash; 1359 1360 size = 1 << hash->size_bits; 1361 for (i = 0; i < size; i++) { 1362 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 1363 ret = add_hash_entry(new_hash, entry->ip); 1364 if (ret < 0) 1365 goto free_hash; 1366 } 1367 } 1368 1369 FTRACE_WARN_ON(new_hash->count != hash->count); 1370 1371 return new_hash; 1372 1373 free_hash: 1374 free_ftrace_hash(new_hash); 1375 return NULL; 1376 } 1377 1378 static void 1379 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); 1380 static void 1381 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); 1382 1383 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1384 struct ftrace_hash *new_hash); 1385 1386 static int 1387 ftrace_hash_move(struct ftrace_ops *ops, int enable, 1388 struct ftrace_hash **dst, struct ftrace_hash *src) 1389 { 1390 struct ftrace_func_entry *entry; 1391 struct hlist_node *tn; 1392 struct hlist_head *hhd; 1393 struct ftrace_hash *new_hash; 1394 int size = src->count; 1395 int bits = 0; 1396 int ret; 1397 int i; 1398 1399 /* Reject setting notrace hash on IPMODIFY ftrace_ops */ 1400 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) 1401 return -EINVAL; 1402 1403 /* 1404 * If the new source is empty, just free dst and assign it 1405 * the empty_hash. 1406 */ 1407 if (!src->count) { 1408 new_hash = EMPTY_HASH; 1409 goto update; 1410 } 1411 1412 /* 1413 * Make the hash size about 1/2 the # found 1414 */ 1415 for (size /= 2; size; size >>= 1) 1416 bits++; 1417 1418 /* Don't allocate too much */ 1419 if (bits > FTRACE_HASH_MAX_BITS) 1420 bits = FTRACE_HASH_MAX_BITS; 1421 1422 new_hash = alloc_ftrace_hash(bits); 1423 if (!new_hash) 1424 return -ENOMEM; 1425 1426 size = 1 << src->size_bits; 1427 for (i = 0; i < size; i++) { 1428 hhd = &src->buckets[i]; 1429 hlist_for_each_entry_safe(entry, tn, hhd, hlist) { 1430 remove_hash_entry(src, entry); 1431 __add_hash_entry(new_hash, entry); 1432 } 1433 } 1434 1435 update: 1436 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ 1437 if (enable) { 1438 /* IPMODIFY should be updated only when filter_hash updating */ 1439 ret = ftrace_hash_ipmodify_update(ops, new_hash); 1440 if (ret < 0) { 1441 free_ftrace_hash(new_hash); 1442 return ret; 1443 } 1444 } 1445 1446 /* 1447 * Remove the current set, update the hash and add 1448 * them back. 1449 */ 1450 ftrace_hash_rec_disable_modify(ops, enable); 1451 1452 rcu_assign_pointer(*dst, new_hash); 1453 1454 ftrace_hash_rec_enable_modify(ops, enable); 1455 1456 return 0; 1457 } 1458 1459 static bool hash_contains_ip(unsigned long ip, 1460 struct ftrace_ops_hash *hash) 1461 { 1462 /* 1463 * The function record is a match if it exists in the filter 1464 * hash and not in the notrace hash. Note, an emty hash is 1465 * considered a match for the filter hash, but an empty 1466 * notrace hash is considered not in the notrace hash. 1467 */ 1468 return (ftrace_hash_empty(hash->filter_hash) || 1469 ftrace_lookup_ip(hash->filter_hash, ip)) && 1470 (ftrace_hash_empty(hash->notrace_hash) || 1471 !ftrace_lookup_ip(hash->notrace_hash, ip)); 1472 } 1473 1474 /* 1475 * Test the hashes for this ops to see if we want to call 1476 * the ops->func or not. 1477 * 1478 * It's a match if the ip is in the ops->filter_hash or 1479 * the filter_hash does not exist or is empty, 1480 * AND 1481 * the ip is not in the ops->notrace_hash. 1482 * 1483 * This needs to be called with preemption disabled as 1484 * the hashes are freed with call_rcu_sched(). 1485 */ 1486 static int 1487 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1488 { 1489 struct ftrace_ops_hash hash; 1490 int ret; 1491 1492 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 1493 /* 1494 * There's a small race when adding ops that the ftrace handler 1495 * that wants regs, may be called without them. We can not 1496 * allow that handler to be called if regs is NULL. 1497 */ 1498 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) 1499 return 0; 1500 #endif 1501 1502 hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); 1503 hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); 1504 1505 if (hash_contains_ip(ip, &hash)) 1506 ret = 1; 1507 else 1508 ret = 0; 1509 1510 return ret; 1511 } 1512 1513 /* 1514 * This is a double for. Do not use 'break' to break out of the loop, 1515 * you must use a goto. 1516 */ 1517 #define do_for_each_ftrace_rec(pg, rec) \ 1518 for (pg = ftrace_pages_start; pg; pg = pg->next) { \ 1519 int _____i; \ 1520 for (_____i = 0; _____i < pg->index; _____i++) { \ 1521 rec = &pg->records[_____i]; 1522 1523 #define while_for_each_ftrace_rec() \ 1524 } \ 1525 } 1526 1527 1528 static int ftrace_cmp_recs(const void *a, const void *b) 1529 { 1530 const struct dyn_ftrace *key = a; 1531 const struct dyn_ftrace *rec = b; 1532 1533 if (key->flags < rec->ip) 1534 return -1; 1535 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) 1536 return 1; 1537 return 0; 1538 } 1539 1540 /** 1541 * ftrace_location_range - return the first address of a traced location 1542 * if it touches the given ip range 1543 * @start: start of range to search. 1544 * @end: end of range to search (inclusive). @end points to the last byte 1545 * to check. 1546 * 1547 * Returns rec->ip if the related ftrace location is a least partly within 1548 * the given address range. That is, the first address of the instruction 1549 * that is either a NOP or call to the function tracer. It checks the ftrace 1550 * internal tables to determine if the address belongs or not. 1551 */ 1552 unsigned long ftrace_location_range(unsigned long start, unsigned long end) 1553 { 1554 struct ftrace_page *pg; 1555 struct dyn_ftrace *rec; 1556 struct dyn_ftrace key; 1557 1558 key.ip = start; 1559 key.flags = end; /* overload flags, as it is unsigned long */ 1560 1561 for (pg = ftrace_pages_start; pg; pg = pg->next) { 1562 if (end < pg->records[0].ip || 1563 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 1564 continue; 1565 rec = bsearch(&key, pg->records, pg->index, 1566 sizeof(struct dyn_ftrace), 1567 ftrace_cmp_recs); 1568 if (rec) 1569 return rec->ip; 1570 } 1571 1572 return 0; 1573 } 1574 1575 /** 1576 * ftrace_location - return true if the ip giving is a traced location 1577 * @ip: the instruction pointer to check 1578 * 1579 * Returns rec->ip if @ip given is a pointer to a ftrace location. 1580 * That is, the instruction that is either a NOP or call to 1581 * the function tracer. It checks the ftrace internal tables to 1582 * determine if the address belongs or not. 1583 */ 1584 unsigned long ftrace_location(unsigned long ip) 1585 { 1586 return ftrace_location_range(ip, ip); 1587 } 1588 1589 /** 1590 * ftrace_text_reserved - return true if range contains an ftrace location 1591 * @start: start of range to search 1592 * @end: end of range to search (inclusive). @end points to the last byte to check. 1593 * 1594 * Returns 1 if @start and @end contains a ftrace location. 1595 * That is, the instruction that is either a NOP or call to 1596 * the function tracer. It checks the ftrace internal tables to 1597 * determine if the address belongs or not. 1598 */ 1599 int ftrace_text_reserved(const void *start, const void *end) 1600 { 1601 unsigned long ret; 1602 1603 ret = ftrace_location_range((unsigned long)start, 1604 (unsigned long)end); 1605 1606 return (int)!!ret; 1607 } 1608 1609 /* Test if ops registered to this rec needs regs */ 1610 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) 1611 { 1612 struct ftrace_ops *ops; 1613 bool keep_regs = false; 1614 1615 for (ops = ftrace_ops_list; 1616 ops != &ftrace_list_end; ops = ops->next) { 1617 /* pass rec in as regs to have non-NULL val */ 1618 if (ftrace_ops_test(ops, rec->ip, rec)) { 1619 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1620 keep_regs = true; 1621 break; 1622 } 1623 } 1624 } 1625 1626 return keep_regs; 1627 } 1628 1629 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, 1630 int filter_hash, 1631 bool inc) 1632 { 1633 struct ftrace_hash *hash; 1634 struct ftrace_hash *other_hash; 1635 struct ftrace_page *pg; 1636 struct dyn_ftrace *rec; 1637 bool update = false; 1638 int count = 0; 1639 int all = 0; 1640 1641 /* Only update if the ops has been registered */ 1642 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1643 return false; 1644 1645 /* 1646 * In the filter_hash case: 1647 * If the count is zero, we update all records. 1648 * Otherwise we just update the items in the hash. 1649 * 1650 * In the notrace_hash case: 1651 * We enable the update in the hash. 1652 * As disabling notrace means enabling the tracing, 1653 * and enabling notrace means disabling, the inc variable 1654 * gets inversed. 1655 */ 1656 if (filter_hash) { 1657 hash = ops->func_hash->filter_hash; 1658 other_hash = ops->func_hash->notrace_hash; 1659 if (ftrace_hash_empty(hash)) 1660 all = 1; 1661 } else { 1662 inc = !inc; 1663 hash = ops->func_hash->notrace_hash; 1664 other_hash = ops->func_hash->filter_hash; 1665 /* 1666 * If the notrace hash has no items, 1667 * then there's nothing to do. 1668 */ 1669 if (ftrace_hash_empty(hash)) 1670 return false; 1671 } 1672 1673 do_for_each_ftrace_rec(pg, rec) { 1674 int in_other_hash = 0; 1675 int in_hash = 0; 1676 int match = 0; 1677 1678 if (rec->flags & FTRACE_FL_DISABLED) 1679 continue; 1680 1681 if (all) { 1682 /* 1683 * Only the filter_hash affects all records. 1684 * Update if the record is not in the notrace hash. 1685 */ 1686 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) 1687 match = 1; 1688 } else { 1689 in_hash = !!ftrace_lookup_ip(hash, rec->ip); 1690 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); 1691 1692 /* 1693 * If filter_hash is set, we want to match all functions 1694 * that are in the hash but not in the other hash. 1695 * 1696 * If filter_hash is not set, then we are decrementing. 1697 * That means we match anything that is in the hash 1698 * and also in the other_hash. That is, we need to turn 1699 * off functions in the other hash because they are disabled 1700 * by this hash. 1701 */ 1702 if (filter_hash && in_hash && !in_other_hash) 1703 match = 1; 1704 else if (!filter_hash && in_hash && 1705 (in_other_hash || ftrace_hash_empty(other_hash))) 1706 match = 1; 1707 } 1708 if (!match) 1709 continue; 1710 1711 if (inc) { 1712 rec->flags++; 1713 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) 1714 return false; 1715 1716 /* 1717 * If there's only a single callback registered to a 1718 * function, and the ops has a trampoline registered 1719 * for it, then we can call it directly. 1720 */ 1721 if (ftrace_rec_count(rec) == 1 && ops->trampoline) 1722 rec->flags |= FTRACE_FL_TRAMP; 1723 else 1724 /* 1725 * If we are adding another function callback 1726 * to this function, and the previous had a 1727 * custom trampoline in use, then we need to go 1728 * back to the default trampoline. 1729 */ 1730 rec->flags &= ~FTRACE_FL_TRAMP; 1731 1732 /* 1733 * If any ops wants regs saved for this function 1734 * then all ops will get saved regs. 1735 */ 1736 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 1737 rec->flags |= FTRACE_FL_REGS; 1738 } else { 1739 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) 1740 return false; 1741 rec->flags--; 1742 1743 /* 1744 * If the rec had REGS enabled and the ops that is 1745 * being removed had REGS set, then see if there is 1746 * still any ops for this record that wants regs. 1747 * If not, we can stop recording them. 1748 */ 1749 if (ftrace_rec_count(rec) > 0 && 1750 rec->flags & FTRACE_FL_REGS && 1751 ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1752 if (!test_rec_ops_needs_regs(rec)) 1753 rec->flags &= ~FTRACE_FL_REGS; 1754 } 1755 1756 /* 1757 * If the rec had TRAMP enabled, then it needs to 1758 * be cleared. As TRAMP can only be enabled iff 1759 * there is only a single ops attached to it. 1760 * In otherwords, always disable it on decrementing. 1761 * In the future, we may set it if rec count is 1762 * decremented to one, and the ops that is left 1763 * has a trampoline. 1764 */ 1765 rec->flags &= ~FTRACE_FL_TRAMP; 1766 1767 /* 1768 * flags will be cleared in ftrace_check_record() 1769 * if rec count is zero. 1770 */ 1771 } 1772 count++; 1773 1774 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ 1775 update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE; 1776 1777 /* Shortcut, if we handled all records, we are done. */ 1778 if (!all && count == hash->count) 1779 return update; 1780 } while_for_each_ftrace_rec(); 1781 1782 return update; 1783 } 1784 1785 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops, 1786 int filter_hash) 1787 { 1788 return __ftrace_hash_rec_update(ops, filter_hash, 0); 1789 } 1790 1791 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops, 1792 int filter_hash) 1793 { 1794 return __ftrace_hash_rec_update(ops, filter_hash, 1); 1795 } 1796 1797 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, 1798 int filter_hash, int inc) 1799 { 1800 struct ftrace_ops *op; 1801 1802 __ftrace_hash_rec_update(ops, filter_hash, inc); 1803 1804 if (ops->func_hash != &global_ops.local_hash) 1805 return; 1806 1807 /* 1808 * If the ops shares the global_ops hash, then we need to update 1809 * all ops that are enabled and use this hash. 1810 */ 1811 do_for_each_ftrace_op(op, ftrace_ops_list) { 1812 /* Already done */ 1813 if (op == ops) 1814 continue; 1815 if (op->func_hash == &global_ops.local_hash) 1816 __ftrace_hash_rec_update(op, filter_hash, inc); 1817 } while_for_each_ftrace_op(op); 1818 } 1819 1820 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, 1821 int filter_hash) 1822 { 1823 ftrace_hash_rec_update_modify(ops, filter_hash, 0); 1824 } 1825 1826 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, 1827 int filter_hash) 1828 { 1829 ftrace_hash_rec_update_modify(ops, filter_hash, 1); 1830 } 1831 1832 /* 1833 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK 1834 * or no-needed to update, -EBUSY if it detects a conflict of the flag 1835 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. 1836 * Note that old_hash and new_hash has below meanings 1837 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) 1838 * - If the hash is EMPTY_HASH, it hits nothing 1839 * - Anything else hits the recs which match the hash entries. 1840 */ 1841 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, 1842 struct ftrace_hash *old_hash, 1843 struct ftrace_hash *new_hash) 1844 { 1845 struct ftrace_page *pg; 1846 struct dyn_ftrace *rec, *end = NULL; 1847 int in_old, in_new; 1848 1849 /* Only update if the ops has been registered */ 1850 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1851 return 0; 1852 1853 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 1854 return 0; 1855 1856 /* 1857 * Since the IPMODIFY is a very address sensitive action, we do not 1858 * allow ftrace_ops to set all functions to new hash. 1859 */ 1860 if (!new_hash || !old_hash) 1861 return -EINVAL; 1862 1863 /* Update rec->flags */ 1864 do_for_each_ftrace_rec(pg, rec) { 1865 /* We need to update only differences of filter_hash */ 1866 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1867 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1868 if (in_old == in_new) 1869 continue; 1870 1871 if (in_new) { 1872 /* New entries must ensure no others are using it */ 1873 if (rec->flags & FTRACE_FL_IPMODIFY) 1874 goto rollback; 1875 rec->flags |= FTRACE_FL_IPMODIFY; 1876 } else /* Removed entry */ 1877 rec->flags &= ~FTRACE_FL_IPMODIFY; 1878 } while_for_each_ftrace_rec(); 1879 1880 return 0; 1881 1882 rollback: 1883 end = rec; 1884 1885 /* Roll back what we did above */ 1886 do_for_each_ftrace_rec(pg, rec) { 1887 if (rec == end) 1888 goto err_out; 1889 1890 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1891 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1892 if (in_old == in_new) 1893 continue; 1894 1895 if (in_new) 1896 rec->flags &= ~FTRACE_FL_IPMODIFY; 1897 else 1898 rec->flags |= FTRACE_FL_IPMODIFY; 1899 } while_for_each_ftrace_rec(); 1900 1901 err_out: 1902 return -EBUSY; 1903 } 1904 1905 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) 1906 { 1907 struct ftrace_hash *hash = ops->func_hash->filter_hash; 1908 1909 if (ftrace_hash_empty(hash)) 1910 hash = NULL; 1911 1912 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); 1913 } 1914 1915 /* Disabling always succeeds */ 1916 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) 1917 { 1918 struct ftrace_hash *hash = ops->func_hash->filter_hash; 1919 1920 if (ftrace_hash_empty(hash)) 1921 hash = NULL; 1922 1923 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); 1924 } 1925 1926 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1927 struct ftrace_hash *new_hash) 1928 { 1929 struct ftrace_hash *old_hash = ops->func_hash->filter_hash; 1930 1931 if (ftrace_hash_empty(old_hash)) 1932 old_hash = NULL; 1933 1934 if (ftrace_hash_empty(new_hash)) 1935 new_hash = NULL; 1936 1937 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); 1938 } 1939 1940 static void print_ip_ins(const char *fmt, const unsigned char *p) 1941 { 1942 int i; 1943 1944 printk(KERN_CONT "%s", fmt); 1945 1946 for (i = 0; i < MCOUNT_INSN_SIZE; i++) 1947 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); 1948 } 1949 1950 static struct ftrace_ops * 1951 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); 1952 static struct ftrace_ops * 1953 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); 1954 1955 enum ftrace_bug_type ftrace_bug_type; 1956 const void *ftrace_expected; 1957 1958 static void print_bug_type(void) 1959 { 1960 switch (ftrace_bug_type) { 1961 case FTRACE_BUG_UNKNOWN: 1962 break; 1963 case FTRACE_BUG_INIT: 1964 pr_info("Initializing ftrace call sites\n"); 1965 break; 1966 case FTRACE_BUG_NOP: 1967 pr_info("Setting ftrace call site to NOP\n"); 1968 break; 1969 case FTRACE_BUG_CALL: 1970 pr_info("Setting ftrace call site to call ftrace function\n"); 1971 break; 1972 case FTRACE_BUG_UPDATE: 1973 pr_info("Updating ftrace call site to call a different ftrace function\n"); 1974 break; 1975 } 1976 } 1977 1978 /** 1979 * ftrace_bug - report and shutdown function tracer 1980 * @failed: The failed type (EFAULT, EINVAL, EPERM) 1981 * @rec: The record that failed 1982 * 1983 * The arch code that enables or disables the function tracing 1984 * can call ftrace_bug() when it has detected a problem in 1985 * modifying the code. @failed should be one of either: 1986 * EFAULT - if the problem happens on reading the @ip address 1987 * EINVAL - if what is read at @ip is not what was expected 1988 * EPERM - if the problem happens on writting to the @ip address 1989 */ 1990 void ftrace_bug(int failed, struct dyn_ftrace *rec) 1991 { 1992 unsigned long ip = rec ? rec->ip : 0; 1993 1994 switch (failed) { 1995 case -EFAULT: 1996 FTRACE_WARN_ON_ONCE(1); 1997 pr_info("ftrace faulted on modifying "); 1998 print_ip_sym(ip); 1999 break; 2000 case -EINVAL: 2001 FTRACE_WARN_ON_ONCE(1); 2002 pr_info("ftrace failed to modify "); 2003 print_ip_sym(ip); 2004 print_ip_ins(" actual: ", (unsigned char *)ip); 2005 pr_cont("\n"); 2006 if (ftrace_expected) { 2007 print_ip_ins(" expected: ", ftrace_expected); 2008 pr_cont("\n"); 2009 } 2010 break; 2011 case -EPERM: 2012 FTRACE_WARN_ON_ONCE(1); 2013 pr_info("ftrace faulted on writing "); 2014 print_ip_sym(ip); 2015 break; 2016 default: 2017 FTRACE_WARN_ON_ONCE(1); 2018 pr_info("ftrace faulted on unknown error "); 2019 print_ip_sym(ip); 2020 } 2021 print_bug_type(); 2022 if (rec) { 2023 struct ftrace_ops *ops = NULL; 2024 2025 pr_info("ftrace record flags: %lx\n", rec->flags); 2026 pr_cont(" (%ld)%s", ftrace_rec_count(rec), 2027 rec->flags & FTRACE_FL_REGS ? " R" : " "); 2028 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2029 ops = ftrace_find_tramp_ops_any(rec); 2030 if (ops) { 2031 do { 2032 pr_cont("\ttramp: %pS (%pS)", 2033 (void *)ops->trampoline, 2034 (void *)ops->func); 2035 ops = ftrace_find_tramp_ops_next(rec, ops); 2036 } while (ops); 2037 } else 2038 pr_cont("\ttramp: ERROR!"); 2039 2040 } 2041 ip = ftrace_get_addr_curr(rec); 2042 pr_cont("\n expected tramp: %lx\n", ip); 2043 } 2044 } 2045 2046 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) 2047 { 2048 unsigned long flag = 0UL; 2049 2050 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2051 2052 if (rec->flags & FTRACE_FL_DISABLED) 2053 return FTRACE_UPDATE_IGNORE; 2054 2055 /* 2056 * If we are updating calls: 2057 * 2058 * If the record has a ref count, then we need to enable it 2059 * because someone is using it. 2060 * 2061 * Otherwise we make sure its disabled. 2062 * 2063 * If we are disabling calls, then disable all records that 2064 * are enabled. 2065 */ 2066 if (enable && ftrace_rec_count(rec)) 2067 flag = FTRACE_FL_ENABLED; 2068 2069 /* 2070 * If enabling and the REGS flag does not match the REGS_EN, or 2071 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore 2072 * this record. Set flags to fail the compare against ENABLED. 2073 */ 2074 if (flag) { 2075 if (!(rec->flags & FTRACE_FL_REGS) != 2076 !(rec->flags & FTRACE_FL_REGS_EN)) 2077 flag |= FTRACE_FL_REGS; 2078 2079 if (!(rec->flags & FTRACE_FL_TRAMP) != 2080 !(rec->flags & FTRACE_FL_TRAMP_EN)) 2081 flag |= FTRACE_FL_TRAMP; 2082 } 2083 2084 /* If the state of this record hasn't changed, then do nothing */ 2085 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 2086 return FTRACE_UPDATE_IGNORE; 2087 2088 if (flag) { 2089 /* Save off if rec is being enabled (for return value) */ 2090 flag ^= rec->flags & FTRACE_FL_ENABLED; 2091 2092 if (update) { 2093 rec->flags |= FTRACE_FL_ENABLED; 2094 if (flag & FTRACE_FL_REGS) { 2095 if (rec->flags & FTRACE_FL_REGS) 2096 rec->flags |= FTRACE_FL_REGS_EN; 2097 else 2098 rec->flags &= ~FTRACE_FL_REGS_EN; 2099 } 2100 if (flag & FTRACE_FL_TRAMP) { 2101 if (rec->flags & FTRACE_FL_TRAMP) 2102 rec->flags |= FTRACE_FL_TRAMP_EN; 2103 else 2104 rec->flags &= ~FTRACE_FL_TRAMP_EN; 2105 } 2106 } 2107 2108 /* 2109 * If this record is being updated from a nop, then 2110 * return UPDATE_MAKE_CALL. 2111 * Otherwise, 2112 * return UPDATE_MODIFY_CALL to tell the caller to convert 2113 * from the save regs, to a non-save regs function or 2114 * vice versa, or from a trampoline call. 2115 */ 2116 if (flag & FTRACE_FL_ENABLED) { 2117 ftrace_bug_type = FTRACE_BUG_CALL; 2118 return FTRACE_UPDATE_MAKE_CALL; 2119 } 2120 2121 ftrace_bug_type = FTRACE_BUG_UPDATE; 2122 return FTRACE_UPDATE_MODIFY_CALL; 2123 } 2124 2125 if (update) { 2126 /* If there's no more users, clear all flags */ 2127 if (!ftrace_rec_count(rec)) 2128 rec->flags = 0; 2129 else 2130 /* 2131 * Just disable the record, but keep the ops TRAMP 2132 * and REGS states. The _EN flags must be disabled though. 2133 */ 2134 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | 2135 FTRACE_FL_REGS_EN); 2136 } 2137 2138 ftrace_bug_type = FTRACE_BUG_NOP; 2139 return FTRACE_UPDATE_MAKE_NOP; 2140 } 2141 2142 /** 2143 * ftrace_update_record, set a record that now is tracing or not 2144 * @rec: the record to update 2145 * @enable: set to 1 if the record is tracing, zero to force disable 2146 * 2147 * The records that represent all functions that can be traced need 2148 * to be updated when tracing has been enabled. 2149 */ 2150 int ftrace_update_record(struct dyn_ftrace *rec, int enable) 2151 { 2152 return ftrace_check_record(rec, enable, 1); 2153 } 2154 2155 /** 2156 * ftrace_test_record, check if the record has been enabled or not 2157 * @rec: the record to test 2158 * @enable: set to 1 to check if enabled, 0 if it is disabled 2159 * 2160 * The arch code may need to test if a record is already set to 2161 * tracing to determine how to modify the function code that it 2162 * represents. 2163 */ 2164 int ftrace_test_record(struct dyn_ftrace *rec, int enable) 2165 { 2166 return ftrace_check_record(rec, enable, 0); 2167 } 2168 2169 static struct ftrace_ops * 2170 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) 2171 { 2172 struct ftrace_ops *op; 2173 unsigned long ip = rec->ip; 2174 2175 do_for_each_ftrace_op(op, ftrace_ops_list) { 2176 2177 if (!op->trampoline) 2178 continue; 2179 2180 if (hash_contains_ip(ip, op->func_hash)) 2181 return op; 2182 } while_for_each_ftrace_op(op); 2183 2184 return NULL; 2185 } 2186 2187 static struct ftrace_ops * 2188 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, 2189 struct ftrace_ops *op) 2190 { 2191 unsigned long ip = rec->ip; 2192 2193 while_for_each_ftrace_op(op) { 2194 2195 if (!op->trampoline) 2196 continue; 2197 2198 if (hash_contains_ip(ip, op->func_hash)) 2199 return op; 2200 } 2201 2202 return NULL; 2203 } 2204 2205 static struct ftrace_ops * 2206 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) 2207 { 2208 struct ftrace_ops *op; 2209 unsigned long ip = rec->ip; 2210 2211 /* 2212 * Need to check removed ops first. 2213 * If they are being removed, and this rec has a tramp, 2214 * and this rec is in the ops list, then it would be the 2215 * one with the tramp. 2216 */ 2217 if (removed_ops) { 2218 if (hash_contains_ip(ip, &removed_ops->old_hash)) 2219 return removed_ops; 2220 } 2221 2222 /* 2223 * Need to find the current trampoline for a rec. 2224 * Now, a trampoline is only attached to a rec if there 2225 * was a single 'ops' attached to it. But this can be called 2226 * when we are adding another op to the rec or removing the 2227 * current one. Thus, if the op is being added, we can 2228 * ignore it because it hasn't attached itself to the rec 2229 * yet. 2230 * 2231 * If an ops is being modified (hooking to different functions) 2232 * then we don't care about the new functions that are being 2233 * added, just the old ones (that are probably being removed). 2234 * 2235 * If we are adding an ops to a function that already is using 2236 * a trampoline, it needs to be removed (trampolines are only 2237 * for single ops connected), then an ops that is not being 2238 * modified also needs to be checked. 2239 */ 2240 do_for_each_ftrace_op(op, ftrace_ops_list) { 2241 2242 if (!op->trampoline) 2243 continue; 2244 2245 /* 2246 * If the ops is being added, it hasn't gotten to 2247 * the point to be removed from this tree yet. 2248 */ 2249 if (op->flags & FTRACE_OPS_FL_ADDING) 2250 continue; 2251 2252 2253 /* 2254 * If the ops is being modified and is in the old 2255 * hash, then it is probably being removed from this 2256 * function. 2257 */ 2258 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && 2259 hash_contains_ip(ip, &op->old_hash)) 2260 return op; 2261 /* 2262 * If the ops is not being added or modified, and it's 2263 * in its normal filter hash, then this must be the one 2264 * we want! 2265 */ 2266 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && 2267 hash_contains_ip(ip, op->func_hash)) 2268 return op; 2269 2270 } while_for_each_ftrace_op(op); 2271 2272 return NULL; 2273 } 2274 2275 static struct ftrace_ops * 2276 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) 2277 { 2278 struct ftrace_ops *op; 2279 unsigned long ip = rec->ip; 2280 2281 do_for_each_ftrace_op(op, ftrace_ops_list) { 2282 /* pass rec in as regs to have non-NULL val */ 2283 if (hash_contains_ip(ip, op->func_hash)) 2284 return op; 2285 } while_for_each_ftrace_op(op); 2286 2287 return NULL; 2288 } 2289 2290 /** 2291 * ftrace_get_addr_new - Get the call address to set to 2292 * @rec: The ftrace record descriptor 2293 * 2294 * If the record has the FTRACE_FL_REGS set, that means that it 2295 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS 2296 * is not not set, then it wants to convert to the normal callback. 2297 * 2298 * Returns the address of the trampoline to set to 2299 */ 2300 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) 2301 { 2302 struct ftrace_ops *ops; 2303 2304 /* Trampolines take precedence over regs */ 2305 if (rec->flags & FTRACE_FL_TRAMP) { 2306 ops = ftrace_find_tramp_ops_new(rec); 2307 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { 2308 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", 2309 (void *)rec->ip, (void *)rec->ip, rec->flags); 2310 /* Ftrace is shutting down, return anything */ 2311 return (unsigned long)FTRACE_ADDR; 2312 } 2313 return ops->trampoline; 2314 } 2315 2316 if (rec->flags & FTRACE_FL_REGS) 2317 return (unsigned long)FTRACE_REGS_ADDR; 2318 else 2319 return (unsigned long)FTRACE_ADDR; 2320 } 2321 2322 /** 2323 * ftrace_get_addr_curr - Get the call address that is already there 2324 * @rec: The ftrace record descriptor 2325 * 2326 * The FTRACE_FL_REGS_EN is set when the record already points to 2327 * a function that saves all the regs. Basically the '_EN' version 2328 * represents the current state of the function. 2329 * 2330 * Returns the address of the trampoline that is currently being called 2331 */ 2332 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) 2333 { 2334 struct ftrace_ops *ops; 2335 2336 /* Trampolines take precedence over regs */ 2337 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2338 ops = ftrace_find_tramp_ops_curr(rec); 2339 if (FTRACE_WARN_ON(!ops)) { 2340 pr_warn("Bad trampoline accounting at: %p (%pS)\n", 2341 (void *)rec->ip, (void *)rec->ip); 2342 /* Ftrace is shutting down, return anything */ 2343 return (unsigned long)FTRACE_ADDR; 2344 } 2345 return ops->trampoline; 2346 } 2347 2348 if (rec->flags & FTRACE_FL_REGS_EN) 2349 return (unsigned long)FTRACE_REGS_ADDR; 2350 else 2351 return (unsigned long)FTRACE_ADDR; 2352 } 2353 2354 static int 2355 __ftrace_replace_code(struct dyn_ftrace *rec, int enable) 2356 { 2357 unsigned long ftrace_old_addr; 2358 unsigned long ftrace_addr; 2359 int ret; 2360 2361 ftrace_addr = ftrace_get_addr_new(rec); 2362 2363 /* This needs to be done before we call ftrace_update_record */ 2364 ftrace_old_addr = ftrace_get_addr_curr(rec); 2365 2366 ret = ftrace_update_record(rec, enable); 2367 2368 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2369 2370 switch (ret) { 2371 case FTRACE_UPDATE_IGNORE: 2372 return 0; 2373 2374 case FTRACE_UPDATE_MAKE_CALL: 2375 ftrace_bug_type = FTRACE_BUG_CALL; 2376 return ftrace_make_call(rec, ftrace_addr); 2377 2378 case FTRACE_UPDATE_MAKE_NOP: 2379 ftrace_bug_type = FTRACE_BUG_NOP; 2380 return ftrace_make_nop(NULL, rec, ftrace_old_addr); 2381 2382 case FTRACE_UPDATE_MODIFY_CALL: 2383 ftrace_bug_type = FTRACE_BUG_UPDATE; 2384 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 2385 } 2386 2387 return -1; /* unknow ftrace bug */ 2388 } 2389 2390 void __weak ftrace_replace_code(int enable) 2391 { 2392 struct dyn_ftrace *rec; 2393 struct ftrace_page *pg; 2394 int failed; 2395 2396 if (unlikely(ftrace_disabled)) 2397 return; 2398 2399 do_for_each_ftrace_rec(pg, rec) { 2400 failed = __ftrace_replace_code(rec, enable); 2401 if (failed) { 2402 ftrace_bug(failed, rec); 2403 /* Stop processing */ 2404 return; 2405 } 2406 } while_for_each_ftrace_rec(); 2407 } 2408 2409 struct ftrace_rec_iter { 2410 struct ftrace_page *pg; 2411 int index; 2412 }; 2413 2414 /** 2415 * ftrace_rec_iter_start, start up iterating over traced functions 2416 * 2417 * Returns an iterator handle that is used to iterate over all 2418 * the records that represent address locations where functions 2419 * are traced. 2420 * 2421 * May return NULL if no records are available. 2422 */ 2423 struct ftrace_rec_iter *ftrace_rec_iter_start(void) 2424 { 2425 /* 2426 * We only use a single iterator. 2427 * Protected by the ftrace_lock mutex. 2428 */ 2429 static struct ftrace_rec_iter ftrace_rec_iter; 2430 struct ftrace_rec_iter *iter = &ftrace_rec_iter; 2431 2432 iter->pg = ftrace_pages_start; 2433 iter->index = 0; 2434 2435 /* Could have empty pages */ 2436 while (iter->pg && !iter->pg->index) 2437 iter->pg = iter->pg->next; 2438 2439 if (!iter->pg) 2440 return NULL; 2441 2442 return iter; 2443 } 2444 2445 /** 2446 * ftrace_rec_iter_next, get the next record to process. 2447 * @iter: The handle to the iterator. 2448 * 2449 * Returns the next iterator after the given iterator @iter. 2450 */ 2451 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) 2452 { 2453 iter->index++; 2454 2455 if (iter->index >= iter->pg->index) { 2456 iter->pg = iter->pg->next; 2457 iter->index = 0; 2458 2459 /* Could have empty pages */ 2460 while (iter->pg && !iter->pg->index) 2461 iter->pg = iter->pg->next; 2462 } 2463 2464 if (!iter->pg) 2465 return NULL; 2466 2467 return iter; 2468 } 2469 2470 /** 2471 * ftrace_rec_iter_record, get the record at the iterator location 2472 * @iter: The current iterator location 2473 * 2474 * Returns the record that the current @iter is at. 2475 */ 2476 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) 2477 { 2478 return &iter->pg->records[iter->index]; 2479 } 2480 2481 static int 2482 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) 2483 { 2484 int ret; 2485 2486 if (unlikely(ftrace_disabled)) 2487 return 0; 2488 2489 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); 2490 if (ret) { 2491 ftrace_bug_type = FTRACE_BUG_INIT; 2492 ftrace_bug(ret, rec); 2493 return 0; 2494 } 2495 return 1; 2496 } 2497 2498 /* 2499 * archs can override this function if they must do something 2500 * before the modifying code is performed. 2501 */ 2502 int __weak ftrace_arch_code_modify_prepare(void) 2503 { 2504 return 0; 2505 } 2506 2507 /* 2508 * archs can override this function if they must do something 2509 * after the modifying code is performed. 2510 */ 2511 int __weak ftrace_arch_code_modify_post_process(void) 2512 { 2513 return 0; 2514 } 2515 2516 void ftrace_modify_all_code(int command) 2517 { 2518 int update = command & FTRACE_UPDATE_TRACE_FUNC; 2519 int err = 0; 2520 2521 /* 2522 * If the ftrace_caller calls a ftrace_ops func directly, 2523 * we need to make sure that it only traces functions it 2524 * expects to trace. When doing the switch of functions, 2525 * we need to update to the ftrace_ops_list_func first 2526 * before the transition between old and new calls are set, 2527 * as the ftrace_ops_list_func will check the ops hashes 2528 * to make sure the ops are having the right functions 2529 * traced. 2530 */ 2531 if (update) { 2532 err = ftrace_update_ftrace_func(ftrace_ops_list_func); 2533 if (FTRACE_WARN_ON(err)) 2534 return; 2535 } 2536 2537 if (command & FTRACE_UPDATE_CALLS) 2538 ftrace_replace_code(1); 2539 else if (command & FTRACE_DISABLE_CALLS) 2540 ftrace_replace_code(0); 2541 2542 if (update && ftrace_trace_function != ftrace_ops_list_func) { 2543 function_trace_op = set_function_trace_op; 2544 smp_wmb(); 2545 /* If irqs are disabled, we are in stop machine */ 2546 if (!irqs_disabled()) 2547 smp_call_function(ftrace_sync_ipi, NULL, 1); 2548 err = ftrace_update_ftrace_func(ftrace_trace_function); 2549 if (FTRACE_WARN_ON(err)) 2550 return; 2551 } 2552 2553 if (command & FTRACE_START_FUNC_RET) 2554 err = ftrace_enable_ftrace_graph_caller(); 2555 else if (command & FTRACE_STOP_FUNC_RET) 2556 err = ftrace_disable_ftrace_graph_caller(); 2557 FTRACE_WARN_ON(err); 2558 } 2559 2560 static int __ftrace_modify_code(void *data) 2561 { 2562 int *command = data; 2563 2564 ftrace_modify_all_code(*command); 2565 2566 return 0; 2567 } 2568 2569 /** 2570 * ftrace_run_stop_machine, go back to the stop machine method 2571 * @command: The command to tell ftrace what to do 2572 * 2573 * If an arch needs to fall back to the stop machine method, the 2574 * it can call this function. 2575 */ 2576 void ftrace_run_stop_machine(int command) 2577 { 2578 stop_machine(__ftrace_modify_code, &command, NULL); 2579 } 2580 2581 /** 2582 * arch_ftrace_update_code, modify the code to trace or not trace 2583 * @command: The command that needs to be done 2584 * 2585 * Archs can override this function if it does not need to 2586 * run stop_machine() to modify code. 2587 */ 2588 void __weak arch_ftrace_update_code(int command) 2589 { 2590 ftrace_run_stop_machine(command); 2591 } 2592 2593 static void ftrace_run_update_code(int command) 2594 { 2595 int ret; 2596 2597 ret = ftrace_arch_code_modify_prepare(); 2598 FTRACE_WARN_ON(ret); 2599 if (ret) 2600 return; 2601 2602 /* 2603 * By default we use stop_machine() to modify the code. 2604 * But archs can do what ever they want as long as it 2605 * is safe. The stop_machine() is the safest, but also 2606 * produces the most overhead. 2607 */ 2608 arch_ftrace_update_code(command); 2609 2610 ret = ftrace_arch_code_modify_post_process(); 2611 FTRACE_WARN_ON(ret); 2612 } 2613 2614 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, 2615 struct ftrace_ops_hash *old_hash) 2616 { 2617 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2618 ops->old_hash.filter_hash = old_hash->filter_hash; 2619 ops->old_hash.notrace_hash = old_hash->notrace_hash; 2620 ftrace_run_update_code(command); 2621 ops->old_hash.filter_hash = NULL; 2622 ops->old_hash.notrace_hash = NULL; 2623 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2624 } 2625 2626 static ftrace_func_t saved_ftrace_func; 2627 static int ftrace_start_up; 2628 2629 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) 2630 { 2631 } 2632 2633 static void per_cpu_ops_free(struct ftrace_ops *ops) 2634 { 2635 free_percpu(ops->disabled); 2636 } 2637 2638 static void ftrace_startup_enable(int command) 2639 { 2640 if (saved_ftrace_func != ftrace_trace_function) { 2641 saved_ftrace_func = ftrace_trace_function; 2642 command |= FTRACE_UPDATE_TRACE_FUNC; 2643 } 2644 2645 if (!command || !ftrace_enabled) 2646 return; 2647 2648 ftrace_run_update_code(command); 2649 } 2650 2651 static void ftrace_startup_all(int command) 2652 { 2653 update_all_ops = true; 2654 ftrace_startup_enable(command); 2655 update_all_ops = false; 2656 } 2657 2658 static int ftrace_startup(struct ftrace_ops *ops, int command) 2659 { 2660 int ret; 2661 2662 if (unlikely(ftrace_disabled)) 2663 return -ENODEV; 2664 2665 ret = __register_ftrace_function(ops); 2666 if (ret) 2667 return ret; 2668 2669 ftrace_start_up++; 2670 2671 /* 2672 * Note that ftrace probes uses this to start up 2673 * and modify functions it will probe. But we still 2674 * set the ADDING flag for modification, as probes 2675 * do not have trampolines. If they add them in the 2676 * future, then the probes will need to distinguish 2677 * between adding and updating probes. 2678 */ 2679 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; 2680 2681 ret = ftrace_hash_ipmodify_enable(ops); 2682 if (ret < 0) { 2683 /* Rollback registration process */ 2684 __unregister_ftrace_function(ops); 2685 ftrace_start_up--; 2686 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2687 return ret; 2688 } 2689 2690 if (ftrace_hash_rec_enable(ops, 1)) 2691 command |= FTRACE_UPDATE_CALLS; 2692 2693 ftrace_startup_enable(command); 2694 2695 ops->flags &= ~FTRACE_OPS_FL_ADDING; 2696 2697 return 0; 2698 } 2699 2700 static int ftrace_shutdown(struct ftrace_ops *ops, int command) 2701 { 2702 int ret; 2703 2704 if (unlikely(ftrace_disabled)) 2705 return -ENODEV; 2706 2707 ret = __unregister_ftrace_function(ops); 2708 if (ret) 2709 return ret; 2710 2711 ftrace_start_up--; 2712 /* 2713 * Just warn in case of unbalance, no need to kill ftrace, it's not 2714 * critical but the ftrace_call callers may be never nopped again after 2715 * further ftrace uses. 2716 */ 2717 WARN_ON_ONCE(ftrace_start_up < 0); 2718 2719 /* Disabling ipmodify never fails */ 2720 ftrace_hash_ipmodify_disable(ops); 2721 2722 if (ftrace_hash_rec_disable(ops, 1)) 2723 command |= FTRACE_UPDATE_CALLS; 2724 2725 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2726 2727 if (saved_ftrace_func != ftrace_trace_function) { 2728 saved_ftrace_func = ftrace_trace_function; 2729 command |= FTRACE_UPDATE_TRACE_FUNC; 2730 } 2731 2732 if (!command || !ftrace_enabled) { 2733 /* 2734 * If these are per_cpu ops, they still need their 2735 * per_cpu field freed. Since, function tracing is 2736 * not currently active, we can just free them 2737 * without synchronizing all CPUs. 2738 */ 2739 if (ops->flags & FTRACE_OPS_FL_PER_CPU) 2740 per_cpu_ops_free(ops); 2741 return 0; 2742 } 2743 2744 /* 2745 * If the ops uses a trampoline, then it needs to be 2746 * tested first on update. 2747 */ 2748 ops->flags |= FTRACE_OPS_FL_REMOVING; 2749 removed_ops = ops; 2750 2751 /* The trampoline logic checks the old hashes */ 2752 ops->old_hash.filter_hash = ops->func_hash->filter_hash; 2753 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; 2754 2755 ftrace_run_update_code(command); 2756 2757 /* 2758 * If there's no more ops registered with ftrace, run a 2759 * sanity check to make sure all rec flags are cleared. 2760 */ 2761 if (ftrace_ops_list == &ftrace_list_end) { 2762 struct ftrace_page *pg; 2763 struct dyn_ftrace *rec; 2764 2765 do_for_each_ftrace_rec(pg, rec) { 2766 if (FTRACE_WARN_ON_ONCE(rec->flags)) 2767 pr_warn(" %pS flags:%lx\n", 2768 (void *)rec->ip, rec->flags); 2769 } while_for_each_ftrace_rec(); 2770 } 2771 2772 ops->old_hash.filter_hash = NULL; 2773 ops->old_hash.notrace_hash = NULL; 2774 2775 removed_ops = NULL; 2776 ops->flags &= ~FTRACE_OPS_FL_REMOVING; 2777 2778 /* 2779 * Dynamic ops may be freed, we must make sure that all 2780 * callers are done before leaving this function. 2781 * The same goes for freeing the per_cpu data of the per_cpu 2782 * ops. 2783 * 2784 * Again, normal synchronize_sched() is not good enough. 2785 * We need to do a hard force of sched synchronization. 2786 * This is because we use preempt_disable() to do RCU, but 2787 * the function tracers can be called where RCU is not watching 2788 * (like before user_exit()). We can not rely on the RCU 2789 * infrastructure to do the synchronization, thus we must do it 2790 * ourselves. 2791 */ 2792 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) { 2793 schedule_on_each_cpu(ftrace_sync); 2794 2795 arch_ftrace_trampoline_free(ops); 2796 2797 if (ops->flags & FTRACE_OPS_FL_PER_CPU) 2798 per_cpu_ops_free(ops); 2799 } 2800 2801 return 0; 2802 } 2803 2804 static void ftrace_startup_sysctl(void) 2805 { 2806 int command; 2807 2808 if (unlikely(ftrace_disabled)) 2809 return; 2810 2811 /* Force update next time */ 2812 saved_ftrace_func = NULL; 2813 /* ftrace_start_up is true if we want ftrace running */ 2814 if (ftrace_start_up) { 2815 command = FTRACE_UPDATE_CALLS; 2816 if (ftrace_graph_active) 2817 command |= FTRACE_START_FUNC_RET; 2818 ftrace_startup_enable(command); 2819 } 2820 } 2821 2822 static void ftrace_shutdown_sysctl(void) 2823 { 2824 int command; 2825 2826 if (unlikely(ftrace_disabled)) 2827 return; 2828 2829 /* ftrace_start_up is true if ftrace is running */ 2830 if (ftrace_start_up) { 2831 command = FTRACE_DISABLE_CALLS; 2832 if (ftrace_graph_active) 2833 command |= FTRACE_STOP_FUNC_RET; 2834 ftrace_run_update_code(command); 2835 } 2836 } 2837 2838 static cycle_t ftrace_update_time; 2839 unsigned long ftrace_update_tot_cnt; 2840 2841 static inline int ops_traces_mod(struct ftrace_ops *ops) 2842 { 2843 /* 2844 * Filter_hash being empty will default to trace module. 2845 * But notrace hash requires a test of individual module functions. 2846 */ 2847 return ftrace_hash_empty(ops->func_hash->filter_hash) && 2848 ftrace_hash_empty(ops->func_hash->notrace_hash); 2849 } 2850 2851 /* 2852 * Check if the current ops references the record. 2853 * 2854 * If the ops traces all functions, then it was already accounted for. 2855 * If the ops does not trace the current record function, skip it. 2856 * If the ops ignores the function via notrace filter, skip it. 2857 */ 2858 static inline bool 2859 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) 2860 { 2861 /* If ops isn't enabled, ignore it */ 2862 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 2863 return 0; 2864 2865 /* If ops traces all then it includes this function */ 2866 if (ops_traces_mod(ops)) 2867 return 1; 2868 2869 /* The function must be in the filter */ 2870 if (!ftrace_hash_empty(ops->func_hash->filter_hash) && 2871 !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) 2872 return 0; 2873 2874 /* If in notrace hash, we ignore it too */ 2875 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) 2876 return 0; 2877 2878 return 1; 2879 } 2880 2881 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) 2882 { 2883 struct ftrace_page *pg; 2884 struct dyn_ftrace *p; 2885 cycle_t start, stop; 2886 unsigned long update_cnt = 0; 2887 unsigned long rec_flags = 0; 2888 int i; 2889 2890 start = ftrace_now(raw_smp_processor_id()); 2891 2892 /* 2893 * When a module is loaded, this function is called to convert 2894 * the calls to mcount in its text to nops, and also to create 2895 * an entry in the ftrace data. Now, if ftrace is activated 2896 * after this call, but before the module sets its text to 2897 * read-only, the modification of enabling ftrace can fail if 2898 * the read-only is done while ftrace is converting the calls. 2899 * To prevent this, the module's records are set as disabled 2900 * and will be enabled after the call to set the module's text 2901 * to read-only. 2902 */ 2903 if (mod) 2904 rec_flags |= FTRACE_FL_DISABLED; 2905 2906 for (pg = new_pgs; pg; pg = pg->next) { 2907 2908 for (i = 0; i < pg->index; i++) { 2909 2910 /* If something went wrong, bail without enabling anything */ 2911 if (unlikely(ftrace_disabled)) 2912 return -1; 2913 2914 p = &pg->records[i]; 2915 p->flags = rec_flags; 2916 2917 /* 2918 * Do the initial record conversion from mcount jump 2919 * to the NOP instructions. 2920 */ 2921 if (!ftrace_code_disable(mod, p)) 2922 break; 2923 2924 update_cnt++; 2925 } 2926 } 2927 2928 stop = ftrace_now(raw_smp_processor_id()); 2929 ftrace_update_time = stop - start; 2930 ftrace_update_tot_cnt += update_cnt; 2931 2932 return 0; 2933 } 2934 2935 static int ftrace_allocate_records(struct ftrace_page *pg, int count) 2936 { 2937 int order; 2938 int cnt; 2939 2940 if (WARN_ON(!count)) 2941 return -EINVAL; 2942 2943 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); 2944 2945 /* 2946 * We want to fill as much as possible. No more than a page 2947 * may be empty. 2948 */ 2949 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) 2950 order--; 2951 2952 again: 2953 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 2954 2955 if (!pg->records) { 2956 /* if we can't allocate this size, try something smaller */ 2957 if (!order) 2958 return -ENOMEM; 2959 order >>= 1; 2960 goto again; 2961 } 2962 2963 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; 2964 pg->size = cnt; 2965 2966 if (cnt > count) 2967 cnt = count; 2968 2969 return cnt; 2970 } 2971 2972 static struct ftrace_page * 2973 ftrace_allocate_pages(unsigned long num_to_init) 2974 { 2975 struct ftrace_page *start_pg; 2976 struct ftrace_page *pg; 2977 int order; 2978 int cnt; 2979 2980 if (!num_to_init) 2981 return 0; 2982 2983 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); 2984 if (!pg) 2985 return NULL; 2986 2987 /* 2988 * Try to allocate as much as possible in one continues 2989 * location that fills in all of the space. We want to 2990 * waste as little space as possible. 2991 */ 2992 for (;;) { 2993 cnt = ftrace_allocate_records(pg, num_to_init); 2994 if (cnt < 0) 2995 goto free_pages; 2996 2997 num_to_init -= cnt; 2998 if (!num_to_init) 2999 break; 3000 3001 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); 3002 if (!pg->next) 3003 goto free_pages; 3004 3005 pg = pg->next; 3006 } 3007 3008 return start_pg; 3009 3010 free_pages: 3011 pg = start_pg; 3012 while (pg) { 3013 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 3014 free_pages((unsigned long)pg->records, order); 3015 start_pg = pg->next; 3016 kfree(pg); 3017 pg = start_pg; 3018 } 3019 pr_info("ftrace: FAILED to allocate memory for functions\n"); 3020 return NULL; 3021 } 3022 3023 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 3024 3025 struct ftrace_iterator { 3026 loff_t pos; 3027 loff_t func_pos; 3028 struct ftrace_page *pg; 3029 struct dyn_ftrace *func; 3030 struct ftrace_func_probe *probe; 3031 struct trace_parser parser; 3032 struct ftrace_hash *hash; 3033 struct ftrace_ops *ops; 3034 int hidx; 3035 int idx; 3036 unsigned flags; 3037 }; 3038 3039 static void * 3040 t_hash_next(struct seq_file *m, loff_t *pos) 3041 { 3042 struct ftrace_iterator *iter = m->private; 3043 struct hlist_node *hnd = NULL; 3044 struct hlist_head *hhd; 3045 3046 (*pos)++; 3047 iter->pos = *pos; 3048 3049 if (iter->probe) 3050 hnd = &iter->probe->node; 3051 retry: 3052 if (iter->hidx >= FTRACE_FUNC_HASHSIZE) 3053 return NULL; 3054 3055 hhd = &ftrace_func_hash[iter->hidx]; 3056 3057 if (hlist_empty(hhd)) { 3058 iter->hidx++; 3059 hnd = NULL; 3060 goto retry; 3061 } 3062 3063 if (!hnd) 3064 hnd = hhd->first; 3065 else { 3066 hnd = hnd->next; 3067 if (!hnd) { 3068 iter->hidx++; 3069 goto retry; 3070 } 3071 } 3072 3073 if (WARN_ON_ONCE(!hnd)) 3074 return NULL; 3075 3076 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); 3077 3078 return iter; 3079 } 3080 3081 static void *t_hash_start(struct seq_file *m, loff_t *pos) 3082 { 3083 struct ftrace_iterator *iter = m->private; 3084 void *p = NULL; 3085 loff_t l; 3086 3087 if (!(iter->flags & FTRACE_ITER_DO_HASH)) 3088 return NULL; 3089 3090 if (iter->func_pos > *pos) 3091 return NULL; 3092 3093 iter->hidx = 0; 3094 for (l = 0; l <= (*pos - iter->func_pos); ) { 3095 p = t_hash_next(m, &l); 3096 if (!p) 3097 break; 3098 } 3099 if (!p) 3100 return NULL; 3101 3102 /* Only set this if we have an item */ 3103 iter->flags |= FTRACE_ITER_HASH; 3104 3105 return iter; 3106 } 3107 3108 static int 3109 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) 3110 { 3111 struct ftrace_func_probe *rec; 3112 3113 rec = iter->probe; 3114 if (WARN_ON_ONCE(!rec)) 3115 return -EIO; 3116 3117 if (rec->ops->print) 3118 return rec->ops->print(m, rec->ip, rec->ops, rec->data); 3119 3120 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); 3121 3122 if (rec->data) 3123 seq_printf(m, ":%p", rec->data); 3124 seq_putc(m, '\n'); 3125 3126 return 0; 3127 } 3128 3129 static void * 3130 t_next(struct seq_file *m, void *v, loff_t *pos) 3131 { 3132 struct ftrace_iterator *iter = m->private; 3133 struct ftrace_ops *ops = iter->ops; 3134 struct dyn_ftrace *rec = NULL; 3135 3136 if (unlikely(ftrace_disabled)) 3137 return NULL; 3138 3139 if (iter->flags & FTRACE_ITER_HASH) 3140 return t_hash_next(m, pos); 3141 3142 (*pos)++; 3143 iter->pos = iter->func_pos = *pos; 3144 3145 if (iter->flags & FTRACE_ITER_PRINTALL) 3146 return t_hash_start(m, pos); 3147 3148 retry: 3149 if (iter->idx >= iter->pg->index) { 3150 if (iter->pg->next) { 3151 iter->pg = iter->pg->next; 3152 iter->idx = 0; 3153 goto retry; 3154 } 3155 } else { 3156 rec = &iter->pg->records[iter->idx++]; 3157 if (((iter->flags & FTRACE_ITER_FILTER) && 3158 !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) || 3159 3160 ((iter->flags & FTRACE_ITER_NOTRACE) && 3161 !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) || 3162 3163 ((iter->flags & FTRACE_ITER_ENABLED) && 3164 !(rec->flags & FTRACE_FL_ENABLED))) { 3165 3166 rec = NULL; 3167 goto retry; 3168 } 3169 } 3170 3171 if (!rec) 3172 return t_hash_start(m, pos); 3173 3174 iter->func = rec; 3175 3176 return iter; 3177 } 3178 3179 static void reset_iter_read(struct ftrace_iterator *iter) 3180 { 3181 iter->pos = 0; 3182 iter->func_pos = 0; 3183 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); 3184 } 3185 3186 static void *t_start(struct seq_file *m, loff_t *pos) 3187 { 3188 struct ftrace_iterator *iter = m->private; 3189 struct ftrace_ops *ops = iter->ops; 3190 void *p = NULL; 3191 loff_t l; 3192 3193 mutex_lock(&ftrace_lock); 3194 3195 if (unlikely(ftrace_disabled)) 3196 return NULL; 3197 3198 /* 3199 * If an lseek was done, then reset and start from beginning. 3200 */ 3201 if (*pos < iter->pos) 3202 reset_iter_read(iter); 3203 3204 /* 3205 * For set_ftrace_filter reading, if we have the filter 3206 * off, we can short cut and just print out that all 3207 * functions are enabled. 3208 */ 3209 if ((iter->flags & FTRACE_ITER_FILTER && 3210 ftrace_hash_empty(ops->func_hash->filter_hash)) || 3211 (iter->flags & FTRACE_ITER_NOTRACE && 3212 ftrace_hash_empty(ops->func_hash->notrace_hash))) { 3213 if (*pos > 0) 3214 return t_hash_start(m, pos); 3215 iter->flags |= FTRACE_ITER_PRINTALL; 3216 /* reset in case of seek/pread */ 3217 iter->flags &= ~FTRACE_ITER_HASH; 3218 return iter; 3219 } 3220 3221 if (iter->flags & FTRACE_ITER_HASH) 3222 return t_hash_start(m, pos); 3223 3224 /* 3225 * Unfortunately, we need to restart at ftrace_pages_start 3226 * every time we let go of the ftrace_mutex. This is because 3227 * those pointers can change without the lock. 3228 */ 3229 iter->pg = ftrace_pages_start; 3230 iter->idx = 0; 3231 for (l = 0; l <= *pos; ) { 3232 p = t_next(m, p, &l); 3233 if (!p) 3234 break; 3235 } 3236 3237 if (!p) 3238 return t_hash_start(m, pos); 3239 3240 return iter; 3241 } 3242 3243 static void t_stop(struct seq_file *m, void *p) 3244 { 3245 mutex_unlock(&ftrace_lock); 3246 } 3247 3248 void * __weak 3249 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 3250 { 3251 return NULL; 3252 } 3253 3254 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, 3255 struct dyn_ftrace *rec) 3256 { 3257 void *ptr; 3258 3259 ptr = arch_ftrace_trampoline_func(ops, rec); 3260 if (ptr) 3261 seq_printf(m, " ->%pS", ptr); 3262 } 3263 3264 static int t_show(struct seq_file *m, void *v) 3265 { 3266 struct ftrace_iterator *iter = m->private; 3267 struct dyn_ftrace *rec; 3268 3269 if (iter->flags & FTRACE_ITER_HASH) 3270 return t_hash_show(m, iter); 3271 3272 if (iter->flags & FTRACE_ITER_PRINTALL) { 3273 if (iter->flags & FTRACE_ITER_NOTRACE) 3274 seq_puts(m, "#### no functions disabled ####\n"); 3275 else 3276 seq_puts(m, "#### all functions enabled ####\n"); 3277 return 0; 3278 } 3279 3280 rec = iter->func; 3281 3282 if (!rec) 3283 return 0; 3284 3285 seq_printf(m, "%ps", (void *)rec->ip); 3286 if (iter->flags & FTRACE_ITER_ENABLED) { 3287 struct ftrace_ops *ops; 3288 3289 seq_printf(m, " (%ld)%s%s", 3290 ftrace_rec_count(rec), 3291 rec->flags & FTRACE_FL_REGS ? " R" : " ", 3292 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); 3293 if (rec->flags & FTRACE_FL_TRAMP_EN) { 3294 ops = ftrace_find_tramp_ops_any(rec); 3295 if (ops) { 3296 do { 3297 seq_printf(m, "\ttramp: %pS (%pS)", 3298 (void *)ops->trampoline, 3299 (void *)ops->func); 3300 add_trampoline_func(m, ops, rec); 3301 ops = ftrace_find_tramp_ops_next(rec, ops); 3302 } while (ops); 3303 } else 3304 seq_puts(m, "\ttramp: ERROR!"); 3305 } else { 3306 add_trampoline_func(m, NULL, rec); 3307 } 3308 } 3309 3310 seq_putc(m, '\n'); 3311 3312 return 0; 3313 } 3314 3315 static const struct seq_operations show_ftrace_seq_ops = { 3316 .start = t_start, 3317 .next = t_next, 3318 .stop = t_stop, 3319 .show = t_show, 3320 }; 3321 3322 static int 3323 ftrace_avail_open(struct inode *inode, struct file *file) 3324 { 3325 struct ftrace_iterator *iter; 3326 3327 if (unlikely(ftrace_disabled)) 3328 return -ENODEV; 3329 3330 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3331 if (iter) { 3332 iter->pg = ftrace_pages_start; 3333 iter->ops = &global_ops; 3334 } 3335 3336 return iter ? 0 : -ENOMEM; 3337 } 3338 3339 static int 3340 ftrace_enabled_open(struct inode *inode, struct file *file) 3341 { 3342 struct ftrace_iterator *iter; 3343 3344 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3345 if (iter) { 3346 iter->pg = ftrace_pages_start; 3347 iter->flags = FTRACE_ITER_ENABLED; 3348 iter->ops = &global_ops; 3349 } 3350 3351 return iter ? 0 : -ENOMEM; 3352 } 3353 3354 /** 3355 * ftrace_regex_open - initialize function tracer filter files 3356 * @ops: The ftrace_ops that hold the hash filters 3357 * @flag: The type of filter to process 3358 * @inode: The inode, usually passed in to your open routine 3359 * @file: The file, usually passed in to your open routine 3360 * 3361 * ftrace_regex_open() initializes the filter files for the 3362 * @ops. Depending on @flag it may process the filter hash or 3363 * the notrace hash of @ops. With this called from the open 3364 * routine, you can use ftrace_filter_write() for the write 3365 * routine if @flag has FTRACE_ITER_FILTER set, or 3366 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 3367 * tracing_lseek() should be used as the lseek routine, and 3368 * release must call ftrace_regex_release(). 3369 */ 3370 int 3371 ftrace_regex_open(struct ftrace_ops *ops, int flag, 3372 struct inode *inode, struct file *file) 3373 { 3374 struct ftrace_iterator *iter; 3375 struct ftrace_hash *hash; 3376 int ret = 0; 3377 3378 ftrace_ops_init(ops); 3379 3380 if (unlikely(ftrace_disabled)) 3381 return -ENODEV; 3382 3383 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3384 if (!iter) 3385 return -ENOMEM; 3386 3387 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { 3388 kfree(iter); 3389 return -ENOMEM; 3390 } 3391 3392 iter->ops = ops; 3393 iter->flags = flag; 3394 3395 mutex_lock(&ops->func_hash->regex_lock); 3396 3397 if (flag & FTRACE_ITER_NOTRACE) 3398 hash = ops->func_hash->notrace_hash; 3399 else 3400 hash = ops->func_hash->filter_hash; 3401 3402 if (file->f_mode & FMODE_WRITE) { 3403 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 3404 3405 if (file->f_flags & O_TRUNC) 3406 iter->hash = alloc_ftrace_hash(size_bits); 3407 else 3408 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); 3409 3410 if (!iter->hash) { 3411 trace_parser_put(&iter->parser); 3412 kfree(iter); 3413 ret = -ENOMEM; 3414 goto out_unlock; 3415 } 3416 } 3417 3418 if (file->f_mode & FMODE_READ) { 3419 iter->pg = ftrace_pages_start; 3420 3421 ret = seq_open(file, &show_ftrace_seq_ops); 3422 if (!ret) { 3423 struct seq_file *m = file->private_data; 3424 m->private = iter; 3425 } else { 3426 /* Failed */ 3427 free_ftrace_hash(iter->hash); 3428 trace_parser_put(&iter->parser); 3429 kfree(iter); 3430 } 3431 } else 3432 file->private_data = iter; 3433 3434 out_unlock: 3435 mutex_unlock(&ops->func_hash->regex_lock); 3436 3437 return ret; 3438 } 3439 3440 static int 3441 ftrace_filter_open(struct inode *inode, struct file *file) 3442 { 3443 struct ftrace_ops *ops = inode->i_private; 3444 3445 return ftrace_regex_open(ops, 3446 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, 3447 inode, file); 3448 } 3449 3450 static int 3451 ftrace_notrace_open(struct inode *inode, struct file *file) 3452 { 3453 struct ftrace_ops *ops = inode->i_private; 3454 3455 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, 3456 inode, file); 3457 } 3458 3459 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ 3460 struct ftrace_glob { 3461 char *search; 3462 unsigned len; 3463 int type; 3464 }; 3465 3466 /* 3467 * If symbols in an architecture don't correspond exactly to the user-visible 3468 * name of what they represent, it is possible to define this function to 3469 * perform the necessary adjustments. 3470 */ 3471 char * __weak arch_ftrace_match_adjust(char *str, const char *search) 3472 { 3473 return str; 3474 } 3475 3476 static int ftrace_match(char *str, struct ftrace_glob *g) 3477 { 3478 int matched = 0; 3479 int slen; 3480 3481 str = arch_ftrace_match_adjust(str, g->search); 3482 3483 switch (g->type) { 3484 case MATCH_FULL: 3485 if (strcmp(str, g->search) == 0) 3486 matched = 1; 3487 break; 3488 case MATCH_FRONT_ONLY: 3489 if (strncmp(str, g->search, g->len) == 0) 3490 matched = 1; 3491 break; 3492 case MATCH_MIDDLE_ONLY: 3493 if (strstr(str, g->search)) 3494 matched = 1; 3495 break; 3496 case MATCH_END_ONLY: 3497 slen = strlen(str); 3498 if (slen >= g->len && 3499 memcmp(str + slen - g->len, g->search, g->len) == 0) 3500 matched = 1; 3501 break; 3502 } 3503 3504 return matched; 3505 } 3506 3507 static int 3508 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) 3509 { 3510 struct ftrace_func_entry *entry; 3511 int ret = 0; 3512 3513 entry = ftrace_lookup_ip(hash, rec->ip); 3514 if (clear_filter) { 3515 /* Do nothing if it doesn't exist */ 3516 if (!entry) 3517 return 0; 3518 3519 free_hash_entry(hash, entry); 3520 } else { 3521 /* Do nothing if it exists */ 3522 if (entry) 3523 return 0; 3524 3525 ret = add_hash_entry(hash, rec->ip); 3526 } 3527 return ret; 3528 } 3529 3530 static int 3531 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, 3532 struct ftrace_glob *mod_g, int exclude_mod) 3533 { 3534 char str[KSYM_SYMBOL_LEN]; 3535 char *modname; 3536 3537 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); 3538 3539 if (mod_g) { 3540 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0; 3541 3542 /* blank module name to match all modules */ 3543 if (!mod_g->len) { 3544 /* blank module globbing: modname xor exclude_mod */ 3545 if ((!exclude_mod) != (!modname)) 3546 goto func_match; 3547 return 0; 3548 } 3549 3550 /* not matching the module */ 3551 if (!modname || !mod_matches) { 3552 if (exclude_mod) 3553 goto func_match; 3554 else 3555 return 0; 3556 } 3557 3558 if (mod_matches && exclude_mod) 3559 return 0; 3560 3561 func_match: 3562 /* blank search means to match all funcs in the mod */ 3563 if (!func_g->len) 3564 return 1; 3565 } 3566 3567 return ftrace_match(str, func_g); 3568 } 3569 3570 static int 3571 match_records(struct ftrace_hash *hash, char *func, int len, char *mod) 3572 { 3573 struct ftrace_page *pg; 3574 struct dyn_ftrace *rec; 3575 struct ftrace_glob func_g = { .type = MATCH_FULL }; 3576 struct ftrace_glob mod_g = { .type = MATCH_FULL }; 3577 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; 3578 int exclude_mod = 0; 3579 int found = 0; 3580 int ret; 3581 int clear_filter; 3582 3583 if (func) { 3584 func_g.type = filter_parse_regex(func, len, &func_g.search, 3585 &clear_filter); 3586 func_g.len = strlen(func_g.search); 3587 } 3588 3589 if (mod) { 3590 mod_g.type = filter_parse_regex(mod, strlen(mod), 3591 &mod_g.search, &exclude_mod); 3592 mod_g.len = strlen(mod_g.search); 3593 } 3594 3595 mutex_lock(&ftrace_lock); 3596 3597 if (unlikely(ftrace_disabled)) 3598 goto out_unlock; 3599 3600 do_for_each_ftrace_rec(pg, rec) { 3601 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 3602 ret = enter_record(hash, rec, clear_filter); 3603 if (ret < 0) { 3604 found = ret; 3605 goto out_unlock; 3606 } 3607 found = 1; 3608 } 3609 } while_for_each_ftrace_rec(); 3610 out_unlock: 3611 mutex_unlock(&ftrace_lock); 3612 3613 return found; 3614 } 3615 3616 static int 3617 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) 3618 { 3619 return match_records(hash, buff, len, NULL); 3620 } 3621 3622 3623 /* 3624 * We register the module command as a template to show others how 3625 * to register the a command as well. 3626 */ 3627 3628 static int 3629 ftrace_mod_callback(struct ftrace_hash *hash, 3630 char *func, char *cmd, char *module, int enable) 3631 { 3632 int ret; 3633 3634 /* 3635 * cmd == 'mod' because we only registered this func 3636 * for the 'mod' ftrace_func_command. 3637 * But if you register one func with multiple commands, 3638 * you can tell which command was used by the cmd 3639 * parameter. 3640 */ 3641 ret = match_records(hash, func, strlen(func), module); 3642 if (!ret) 3643 return -EINVAL; 3644 if (ret < 0) 3645 return ret; 3646 return 0; 3647 } 3648 3649 static struct ftrace_func_command ftrace_mod_cmd = { 3650 .name = "mod", 3651 .func = ftrace_mod_callback, 3652 }; 3653 3654 static int __init ftrace_mod_cmd_init(void) 3655 { 3656 return register_ftrace_command(&ftrace_mod_cmd); 3657 } 3658 core_initcall(ftrace_mod_cmd_init); 3659 3660 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 3661 struct ftrace_ops *op, struct pt_regs *pt_regs) 3662 { 3663 struct ftrace_func_probe *entry; 3664 struct hlist_head *hhd; 3665 unsigned long key; 3666 3667 key = hash_long(ip, FTRACE_HASH_BITS); 3668 3669 hhd = &ftrace_func_hash[key]; 3670 3671 if (hlist_empty(hhd)) 3672 return; 3673 3674 /* 3675 * Disable preemption for these calls to prevent a RCU grace 3676 * period. This syncs the hash iteration and freeing of items 3677 * on the hash. rcu_read_lock is too dangerous here. 3678 */ 3679 preempt_disable_notrace(); 3680 hlist_for_each_entry_rcu_notrace(entry, hhd, node) { 3681 if (entry->ip == ip) 3682 entry->ops->func(ip, parent_ip, &entry->data); 3683 } 3684 preempt_enable_notrace(); 3685 } 3686 3687 static struct ftrace_ops trace_probe_ops __read_mostly = 3688 { 3689 .func = function_trace_probe_call, 3690 .flags = FTRACE_OPS_FL_INITIALIZED, 3691 INIT_OPS_HASH(trace_probe_ops) 3692 }; 3693 3694 static int ftrace_probe_registered; 3695 3696 static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) 3697 { 3698 int ret; 3699 int i; 3700 3701 if (ftrace_probe_registered) { 3702 /* still need to update the function call sites */ 3703 if (ftrace_enabled) 3704 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, 3705 old_hash); 3706 return; 3707 } 3708 3709 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3710 struct hlist_head *hhd = &ftrace_func_hash[i]; 3711 if (hhd->first) 3712 break; 3713 } 3714 /* Nothing registered? */ 3715 if (i == FTRACE_FUNC_HASHSIZE) 3716 return; 3717 3718 ret = ftrace_startup(&trace_probe_ops, 0); 3719 3720 ftrace_probe_registered = 1; 3721 } 3722 3723 static void __disable_ftrace_function_probe(void) 3724 { 3725 int i; 3726 3727 if (!ftrace_probe_registered) 3728 return; 3729 3730 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3731 struct hlist_head *hhd = &ftrace_func_hash[i]; 3732 if (hhd->first) 3733 return; 3734 } 3735 3736 /* no more funcs left */ 3737 ftrace_shutdown(&trace_probe_ops, 0); 3738 3739 ftrace_probe_registered = 0; 3740 } 3741 3742 3743 static void ftrace_free_entry(struct ftrace_func_probe *entry) 3744 { 3745 if (entry->ops->free) 3746 entry->ops->free(entry->ops, entry->ip, &entry->data); 3747 kfree(entry); 3748 } 3749 3750 int 3751 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3752 void *data) 3753 { 3754 struct ftrace_ops_hash old_hash_ops; 3755 struct ftrace_func_probe *entry; 3756 struct ftrace_glob func_g; 3757 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 3758 struct ftrace_hash *old_hash = *orig_hash; 3759 struct ftrace_hash *hash; 3760 struct ftrace_page *pg; 3761 struct dyn_ftrace *rec; 3762 int not; 3763 unsigned long key; 3764 int count = 0; 3765 int ret; 3766 3767 func_g.type = filter_parse_regex(glob, strlen(glob), 3768 &func_g.search, ¬); 3769 func_g.len = strlen(func_g.search); 3770 3771 /* we do not support '!' for function probes */ 3772 if (WARN_ON(not)) 3773 return -EINVAL; 3774 3775 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3776 3777 old_hash_ops.filter_hash = old_hash; 3778 /* Probes only have filters */ 3779 old_hash_ops.notrace_hash = NULL; 3780 3781 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 3782 if (!hash) { 3783 count = -ENOMEM; 3784 goto out; 3785 } 3786 3787 if (unlikely(ftrace_disabled)) { 3788 count = -ENODEV; 3789 goto out; 3790 } 3791 3792 mutex_lock(&ftrace_lock); 3793 3794 do_for_each_ftrace_rec(pg, rec) { 3795 3796 if (!ftrace_match_record(rec, &func_g, NULL, 0)) 3797 continue; 3798 3799 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 3800 if (!entry) { 3801 /* If we did not process any, then return error */ 3802 if (!count) 3803 count = -ENOMEM; 3804 goto out_unlock; 3805 } 3806 3807 count++; 3808 3809 entry->data = data; 3810 3811 /* 3812 * The caller might want to do something special 3813 * for each function we find. We call the callback 3814 * to give the caller an opportunity to do so. 3815 */ 3816 if (ops->init) { 3817 if (ops->init(ops, rec->ip, &entry->data) < 0) { 3818 /* caller does not like this func */ 3819 kfree(entry); 3820 continue; 3821 } 3822 } 3823 3824 ret = enter_record(hash, rec, 0); 3825 if (ret < 0) { 3826 kfree(entry); 3827 count = ret; 3828 goto out_unlock; 3829 } 3830 3831 entry->ops = ops; 3832 entry->ip = rec->ip; 3833 3834 key = hash_long(entry->ip, FTRACE_HASH_BITS); 3835 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); 3836 3837 } while_for_each_ftrace_rec(); 3838 3839 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3840 3841 __enable_ftrace_function_probe(&old_hash_ops); 3842 3843 if (!ret) 3844 free_ftrace_hash_rcu(old_hash); 3845 else 3846 count = ret; 3847 3848 out_unlock: 3849 mutex_unlock(&ftrace_lock); 3850 out: 3851 mutex_unlock(&trace_probe_ops.func_hash->regex_lock); 3852 free_ftrace_hash(hash); 3853 3854 return count; 3855 } 3856 3857 enum { 3858 PROBE_TEST_FUNC = 1, 3859 PROBE_TEST_DATA = 2 3860 }; 3861 3862 static void 3863 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3864 void *data, int flags) 3865 { 3866 struct ftrace_func_entry *rec_entry; 3867 struct ftrace_func_probe *entry; 3868 struct ftrace_func_probe *p; 3869 struct ftrace_glob func_g; 3870 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 3871 struct ftrace_hash *old_hash = *orig_hash; 3872 struct list_head free_list; 3873 struct ftrace_hash *hash; 3874 struct hlist_node *tmp; 3875 char str[KSYM_SYMBOL_LEN]; 3876 int i, ret; 3877 3878 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 3879 func_g.search = NULL; 3880 else if (glob) { 3881 int not; 3882 3883 func_g.type = filter_parse_regex(glob, strlen(glob), 3884 &func_g.search, ¬); 3885 func_g.len = strlen(func_g.search); 3886 func_g.search = glob; 3887 3888 /* we do not support '!' for function probes */ 3889 if (WARN_ON(not)) 3890 return; 3891 } 3892 3893 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3894 3895 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3896 if (!hash) 3897 /* Hmm, should report this somehow */ 3898 goto out_unlock; 3899 3900 INIT_LIST_HEAD(&free_list); 3901 3902 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3903 struct hlist_head *hhd = &ftrace_func_hash[i]; 3904 3905 hlist_for_each_entry_safe(entry, tmp, hhd, node) { 3906 3907 /* break up if statements for readability */ 3908 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) 3909 continue; 3910 3911 if ((flags & PROBE_TEST_DATA) && entry->data != data) 3912 continue; 3913 3914 /* do this last, since it is the most expensive */ 3915 if (func_g.search) { 3916 kallsyms_lookup(entry->ip, NULL, NULL, 3917 NULL, str); 3918 if (!ftrace_match(str, &func_g)) 3919 continue; 3920 } 3921 3922 rec_entry = ftrace_lookup_ip(hash, entry->ip); 3923 /* It is possible more than one entry had this ip */ 3924 if (rec_entry) 3925 free_hash_entry(hash, rec_entry); 3926 3927 hlist_del_rcu(&entry->node); 3928 list_add(&entry->free_list, &free_list); 3929 } 3930 } 3931 mutex_lock(&ftrace_lock); 3932 __disable_ftrace_function_probe(); 3933 /* 3934 * Remove after the disable is called. Otherwise, if the last 3935 * probe is removed, a null hash means *all enabled*. 3936 */ 3937 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3938 synchronize_sched(); 3939 if (!ret) 3940 free_ftrace_hash_rcu(old_hash); 3941 3942 list_for_each_entry_safe(entry, p, &free_list, free_list) { 3943 list_del(&entry->free_list); 3944 ftrace_free_entry(entry); 3945 } 3946 mutex_unlock(&ftrace_lock); 3947 3948 out_unlock: 3949 mutex_unlock(&trace_probe_ops.func_hash->regex_lock); 3950 free_ftrace_hash(hash); 3951 } 3952 3953 void 3954 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3955 void *data) 3956 { 3957 __unregister_ftrace_function_probe(glob, ops, data, 3958 PROBE_TEST_FUNC | PROBE_TEST_DATA); 3959 } 3960 3961 void 3962 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) 3963 { 3964 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); 3965 } 3966 3967 void unregister_ftrace_function_probe_all(char *glob) 3968 { 3969 __unregister_ftrace_function_probe(glob, NULL, NULL, 0); 3970 } 3971 3972 static LIST_HEAD(ftrace_commands); 3973 static DEFINE_MUTEX(ftrace_cmd_mutex); 3974 3975 /* 3976 * Currently we only register ftrace commands from __init, so mark this 3977 * __init too. 3978 */ 3979 __init int register_ftrace_command(struct ftrace_func_command *cmd) 3980 { 3981 struct ftrace_func_command *p; 3982 int ret = 0; 3983 3984 mutex_lock(&ftrace_cmd_mutex); 3985 list_for_each_entry(p, &ftrace_commands, list) { 3986 if (strcmp(cmd->name, p->name) == 0) { 3987 ret = -EBUSY; 3988 goto out_unlock; 3989 } 3990 } 3991 list_add(&cmd->list, &ftrace_commands); 3992 out_unlock: 3993 mutex_unlock(&ftrace_cmd_mutex); 3994 3995 return ret; 3996 } 3997 3998 /* 3999 * Currently we only unregister ftrace commands from __init, so mark 4000 * this __init too. 4001 */ 4002 __init int unregister_ftrace_command(struct ftrace_func_command *cmd) 4003 { 4004 struct ftrace_func_command *p, *n; 4005 int ret = -ENODEV; 4006 4007 mutex_lock(&ftrace_cmd_mutex); 4008 list_for_each_entry_safe(p, n, &ftrace_commands, list) { 4009 if (strcmp(cmd->name, p->name) == 0) { 4010 ret = 0; 4011 list_del_init(&p->list); 4012 goto out_unlock; 4013 } 4014 } 4015 out_unlock: 4016 mutex_unlock(&ftrace_cmd_mutex); 4017 4018 return ret; 4019 } 4020 4021 static int ftrace_process_regex(struct ftrace_hash *hash, 4022 char *buff, int len, int enable) 4023 { 4024 char *func, *command, *next = buff; 4025 struct ftrace_func_command *p; 4026 int ret = -EINVAL; 4027 4028 func = strsep(&next, ":"); 4029 4030 if (!next) { 4031 ret = ftrace_match_records(hash, func, len); 4032 if (!ret) 4033 ret = -EINVAL; 4034 if (ret < 0) 4035 return ret; 4036 return 0; 4037 } 4038 4039 /* command found */ 4040 4041 command = strsep(&next, ":"); 4042 4043 mutex_lock(&ftrace_cmd_mutex); 4044 list_for_each_entry(p, &ftrace_commands, list) { 4045 if (strcmp(p->name, command) == 0) { 4046 ret = p->func(hash, func, command, next, enable); 4047 goto out_unlock; 4048 } 4049 } 4050 out_unlock: 4051 mutex_unlock(&ftrace_cmd_mutex); 4052 4053 return ret; 4054 } 4055 4056 static ssize_t 4057 ftrace_regex_write(struct file *file, const char __user *ubuf, 4058 size_t cnt, loff_t *ppos, int enable) 4059 { 4060 struct ftrace_iterator *iter; 4061 struct trace_parser *parser; 4062 ssize_t ret, read; 4063 4064 if (!cnt) 4065 return 0; 4066 4067 if (file->f_mode & FMODE_READ) { 4068 struct seq_file *m = file->private_data; 4069 iter = m->private; 4070 } else 4071 iter = file->private_data; 4072 4073 if (unlikely(ftrace_disabled)) 4074 return -ENODEV; 4075 4076 /* iter->hash is a local copy, so we don't need regex_lock */ 4077 4078 parser = &iter->parser; 4079 read = trace_get_user(parser, ubuf, cnt, ppos); 4080 4081 if (read >= 0 && trace_parser_loaded(parser) && 4082 !trace_parser_cont(parser)) { 4083 ret = ftrace_process_regex(iter->hash, parser->buffer, 4084 parser->idx, enable); 4085 trace_parser_clear(parser); 4086 if (ret < 0) 4087 goto out; 4088 } 4089 4090 ret = read; 4091 out: 4092 return ret; 4093 } 4094 4095 ssize_t 4096 ftrace_filter_write(struct file *file, const char __user *ubuf, 4097 size_t cnt, loff_t *ppos) 4098 { 4099 return ftrace_regex_write(file, ubuf, cnt, ppos, 1); 4100 } 4101 4102 ssize_t 4103 ftrace_notrace_write(struct file *file, const char __user *ubuf, 4104 size_t cnt, loff_t *ppos) 4105 { 4106 return ftrace_regex_write(file, ubuf, cnt, ppos, 0); 4107 } 4108 4109 static int 4110 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) 4111 { 4112 struct ftrace_func_entry *entry; 4113 4114 if (!ftrace_location(ip)) 4115 return -EINVAL; 4116 4117 if (remove) { 4118 entry = ftrace_lookup_ip(hash, ip); 4119 if (!entry) 4120 return -ENOENT; 4121 free_hash_entry(hash, entry); 4122 return 0; 4123 } 4124 4125 return add_hash_entry(hash, ip); 4126 } 4127 4128 static void ftrace_ops_update_code(struct ftrace_ops *ops, 4129 struct ftrace_ops_hash *old_hash) 4130 { 4131 struct ftrace_ops *op; 4132 4133 if (!ftrace_enabled) 4134 return; 4135 4136 if (ops->flags & FTRACE_OPS_FL_ENABLED) { 4137 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); 4138 return; 4139 } 4140 4141 /* 4142 * If this is the shared global_ops filter, then we need to 4143 * check if there is another ops that shares it, is enabled. 4144 * If so, we still need to run the modify code. 4145 */ 4146 if (ops->func_hash != &global_ops.local_hash) 4147 return; 4148 4149 do_for_each_ftrace_op(op, ftrace_ops_list) { 4150 if (op->func_hash == &global_ops.local_hash && 4151 op->flags & FTRACE_OPS_FL_ENABLED) { 4152 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); 4153 /* Only need to do this once */ 4154 return; 4155 } 4156 } while_for_each_ftrace_op(op); 4157 } 4158 4159 static int 4160 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 4161 unsigned long ip, int remove, int reset, int enable) 4162 { 4163 struct ftrace_hash **orig_hash; 4164 struct ftrace_ops_hash old_hash_ops; 4165 struct ftrace_hash *old_hash; 4166 struct ftrace_hash *hash; 4167 int ret; 4168 4169 if (unlikely(ftrace_disabled)) 4170 return -ENODEV; 4171 4172 mutex_lock(&ops->func_hash->regex_lock); 4173 4174 if (enable) 4175 orig_hash = &ops->func_hash->filter_hash; 4176 else 4177 orig_hash = &ops->func_hash->notrace_hash; 4178 4179 if (reset) 4180 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4181 else 4182 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 4183 4184 if (!hash) { 4185 ret = -ENOMEM; 4186 goto out_regex_unlock; 4187 } 4188 4189 if (buf && !ftrace_match_records(hash, buf, len)) { 4190 ret = -EINVAL; 4191 goto out_regex_unlock; 4192 } 4193 if (ip) { 4194 ret = ftrace_match_addr(hash, ip, remove); 4195 if (ret < 0) 4196 goto out_regex_unlock; 4197 } 4198 4199 mutex_lock(&ftrace_lock); 4200 old_hash = *orig_hash; 4201 old_hash_ops.filter_hash = ops->func_hash->filter_hash; 4202 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; 4203 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 4204 if (!ret) { 4205 ftrace_ops_update_code(ops, &old_hash_ops); 4206 free_ftrace_hash_rcu(old_hash); 4207 } 4208 mutex_unlock(&ftrace_lock); 4209 4210 out_regex_unlock: 4211 mutex_unlock(&ops->func_hash->regex_lock); 4212 4213 free_ftrace_hash(hash); 4214 return ret; 4215 } 4216 4217 static int 4218 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, 4219 int reset, int enable) 4220 { 4221 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); 4222 } 4223 4224 /** 4225 * ftrace_set_filter_ip - set a function to filter on in ftrace by address 4226 * @ops - the ops to set the filter with 4227 * @ip - the address to add to or remove from the filter. 4228 * @remove - non zero to remove the ip from the filter 4229 * @reset - non zero to reset all filters before applying this filter. 4230 * 4231 * Filters denote which functions should be enabled when tracing is enabled 4232 * If @ip is NULL, it failes to update filter. 4233 */ 4234 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 4235 int remove, int reset) 4236 { 4237 ftrace_ops_init(ops); 4238 return ftrace_set_addr(ops, ip, remove, reset, 1); 4239 } 4240 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); 4241 4242 static int 4243 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 4244 int reset, int enable) 4245 { 4246 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); 4247 } 4248 4249 /** 4250 * ftrace_set_filter - set a function to filter on in ftrace 4251 * @ops - the ops to set the filter with 4252 * @buf - the string that holds the function filter text. 4253 * @len - the length of the string. 4254 * @reset - non zero to reset all filters before applying this filter. 4255 * 4256 * Filters denote which functions should be enabled when tracing is enabled. 4257 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 4258 */ 4259 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 4260 int len, int reset) 4261 { 4262 ftrace_ops_init(ops); 4263 return ftrace_set_regex(ops, buf, len, reset, 1); 4264 } 4265 EXPORT_SYMBOL_GPL(ftrace_set_filter); 4266 4267 /** 4268 * ftrace_set_notrace - set a function to not trace in ftrace 4269 * @ops - the ops to set the notrace filter with 4270 * @buf - the string that holds the function notrace text. 4271 * @len - the length of the string. 4272 * @reset - non zero to reset all filters before applying this filter. 4273 * 4274 * Notrace Filters denote which functions should not be enabled when tracing 4275 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 4276 * for tracing. 4277 */ 4278 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 4279 int len, int reset) 4280 { 4281 ftrace_ops_init(ops); 4282 return ftrace_set_regex(ops, buf, len, reset, 0); 4283 } 4284 EXPORT_SYMBOL_GPL(ftrace_set_notrace); 4285 /** 4286 * ftrace_set_global_filter - set a function to filter on with global tracers 4287 * @buf - the string that holds the function filter text. 4288 * @len - the length of the string. 4289 * @reset - non zero to reset all filters before applying this filter. 4290 * 4291 * Filters denote which functions should be enabled when tracing is enabled. 4292 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 4293 */ 4294 void ftrace_set_global_filter(unsigned char *buf, int len, int reset) 4295 { 4296 ftrace_set_regex(&global_ops, buf, len, reset, 1); 4297 } 4298 EXPORT_SYMBOL_GPL(ftrace_set_global_filter); 4299 4300 /** 4301 * ftrace_set_global_notrace - set a function to not trace with global tracers 4302 * @buf - the string that holds the function notrace text. 4303 * @len - the length of the string. 4304 * @reset - non zero to reset all filters before applying this filter. 4305 * 4306 * Notrace Filters denote which functions should not be enabled when tracing 4307 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 4308 * for tracing. 4309 */ 4310 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) 4311 { 4312 ftrace_set_regex(&global_ops, buf, len, reset, 0); 4313 } 4314 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); 4315 4316 /* 4317 * command line interface to allow users to set filters on boot up. 4318 */ 4319 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE 4320 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 4321 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 4322 4323 /* Used by function selftest to not test if filter is set */ 4324 bool ftrace_filter_param __initdata; 4325 4326 static int __init set_ftrace_notrace(char *str) 4327 { 4328 ftrace_filter_param = true; 4329 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 4330 return 1; 4331 } 4332 __setup("ftrace_notrace=", set_ftrace_notrace); 4333 4334 static int __init set_ftrace_filter(char *str) 4335 { 4336 ftrace_filter_param = true; 4337 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 4338 return 1; 4339 } 4340 __setup("ftrace_filter=", set_ftrace_filter); 4341 4342 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4343 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 4344 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 4345 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); 4346 4347 static unsigned long save_global_trampoline; 4348 static unsigned long save_global_flags; 4349 4350 static int __init set_graph_function(char *str) 4351 { 4352 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 4353 return 1; 4354 } 4355 __setup("ftrace_graph_filter=", set_graph_function); 4356 4357 static int __init set_graph_notrace_function(char *str) 4358 { 4359 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); 4360 return 1; 4361 } 4362 __setup("ftrace_graph_notrace=", set_graph_notrace_function); 4363 4364 static void __init set_ftrace_early_graph(char *buf, int enable) 4365 { 4366 int ret; 4367 char *func; 4368 unsigned long *table = ftrace_graph_funcs; 4369 int *count = &ftrace_graph_count; 4370 4371 if (!enable) { 4372 table = ftrace_graph_notrace_funcs; 4373 count = &ftrace_graph_notrace_count; 4374 } 4375 4376 while (buf) { 4377 func = strsep(&buf, ","); 4378 /* we allow only one expression at a time */ 4379 ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func); 4380 if (ret) 4381 printk(KERN_DEBUG "ftrace: function %s not " 4382 "traceable\n", func); 4383 } 4384 } 4385 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4386 4387 void __init 4388 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) 4389 { 4390 char *func; 4391 4392 ftrace_ops_init(ops); 4393 4394 while (buf) { 4395 func = strsep(&buf, ","); 4396 ftrace_set_regex(ops, func, strlen(func), 0, enable); 4397 } 4398 } 4399 4400 static void __init set_ftrace_early_filters(void) 4401 { 4402 if (ftrace_filter_buf[0]) 4403 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); 4404 if (ftrace_notrace_buf[0]) 4405 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); 4406 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4407 if (ftrace_graph_buf[0]) 4408 set_ftrace_early_graph(ftrace_graph_buf, 1); 4409 if (ftrace_graph_notrace_buf[0]) 4410 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); 4411 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4412 } 4413 4414 int ftrace_regex_release(struct inode *inode, struct file *file) 4415 { 4416 struct seq_file *m = (struct seq_file *)file->private_data; 4417 struct ftrace_ops_hash old_hash_ops; 4418 struct ftrace_iterator *iter; 4419 struct ftrace_hash **orig_hash; 4420 struct ftrace_hash *old_hash; 4421 struct trace_parser *parser; 4422 int filter_hash; 4423 int ret; 4424 4425 if (file->f_mode & FMODE_READ) { 4426 iter = m->private; 4427 seq_release(inode, file); 4428 } else 4429 iter = file->private_data; 4430 4431 parser = &iter->parser; 4432 if (trace_parser_loaded(parser)) { 4433 parser->buffer[parser->idx] = 0; 4434 ftrace_match_records(iter->hash, parser->buffer, parser->idx); 4435 } 4436 4437 trace_parser_put(parser); 4438 4439 mutex_lock(&iter->ops->func_hash->regex_lock); 4440 4441 if (file->f_mode & FMODE_WRITE) { 4442 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 4443 4444 if (filter_hash) 4445 orig_hash = &iter->ops->func_hash->filter_hash; 4446 else 4447 orig_hash = &iter->ops->func_hash->notrace_hash; 4448 4449 mutex_lock(&ftrace_lock); 4450 old_hash = *orig_hash; 4451 old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash; 4452 old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash; 4453 ret = ftrace_hash_move(iter->ops, filter_hash, 4454 orig_hash, iter->hash); 4455 if (!ret) { 4456 ftrace_ops_update_code(iter->ops, &old_hash_ops); 4457 free_ftrace_hash_rcu(old_hash); 4458 } 4459 mutex_unlock(&ftrace_lock); 4460 } 4461 4462 mutex_unlock(&iter->ops->func_hash->regex_lock); 4463 free_ftrace_hash(iter->hash); 4464 kfree(iter); 4465 4466 return 0; 4467 } 4468 4469 static const struct file_operations ftrace_avail_fops = { 4470 .open = ftrace_avail_open, 4471 .read = seq_read, 4472 .llseek = seq_lseek, 4473 .release = seq_release_private, 4474 }; 4475 4476 static const struct file_operations ftrace_enabled_fops = { 4477 .open = ftrace_enabled_open, 4478 .read = seq_read, 4479 .llseek = seq_lseek, 4480 .release = seq_release_private, 4481 }; 4482 4483 static const struct file_operations ftrace_filter_fops = { 4484 .open = ftrace_filter_open, 4485 .read = seq_read, 4486 .write = ftrace_filter_write, 4487 .llseek = tracing_lseek, 4488 .release = ftrace_regex_release, 4489 }; 4490 4491 static const struct file_operations ftrace_notrace_fops = { 4492 .open = ftrace_notrace_open, 4493 .read = seq_read, 4494 .write = ftrace_notrace_write, 4495 .llseek = tracing_lseek, 4496 .release = ftrace_regex_release, 4497 }; 4498 4499 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4500 4501 static DEFINE_MUTEX(graph_lock); 4502 4503 int ftrace_graph_count; 4504 int ftrace_graph_notrace_count; 4505 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 4506 unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 4507 4508 struct ftrace_graph_data { 4509 unsigned long *table; 4510 size_t size; 4511 int *count; 4512 const struct seq_operations *seq_ops; 4513 }; 4514 4515 static void * 4516 __g_next(struct seq_file *m, loff_t *pos) 4517 { 4518 struct ftrace_graph_data *fgd = m->private; 4519 4520 if (*pos >= *fgd->count) 4521 return NULL; 4522 return &fgd->table[*pos]; 4523 } 4524 4525 static void * 4526 g_next(struct seq_file *m, void *v, loff_t *pos) 4527 { 4528 (*pos)++; 4529 return __g_next(m, pos); 4530 } 4531 4532 static void *g_start(struct seq_file *m, loff_t *pos) 4533 { 4534 struct ftrace_graph_data *fgd = m->private; 4535 4536 mutex_lock(&graph_lock); 4537 4538 /* Nothing, tell g_show to print all functions are enabled */ 4539 if (!*fgd->count && !*pos) 4540 return (void *)1; 4541 4542 return __g_next(m, pos); 4543 } 4544 4545 static void g_stop(struct seq_file *m, void *p) 4546 { 4547 mutex_unlock(&graph_lock); 4548 } 4549 4550 static int g_show(struct seq_file *m, void *v) 4551 { 4552 unsigned long *ptr = v; 4553 4554 if (!ptr) 4555 return 0; 4556 4557 if (ptr == (unsigned long *)1) { 4558 struct ftrace_graph_data *fgd = m->private; 4559 4560 if (fgd->table == ftrace_graph_funcs) 4561 seq_puts(m, "#### all functions enabled ####\n"); 4562 else 4563 seq_puts(m, "#### no functions disabled ####\n"); 4564 return 0; 4565 } 4566 4567 seq_printf(m, "%ps\n", (void *)*ptr); 4568 4569 return 0; 4570 } 4571 4572 static const struct seq_operations ftrace_graph_seq_ops = { 4573 .start = g_start, 4574 .next = g_next, 4575 .stop = g_stop, 4576 .show = g_show, 4577 }; 4578 4579 static int 4580 __ftrace_graph_open(struct inode *inode, struct file *file, 4581 struct ftrace_graph_data *fgd) 4582 { 4583 int ret = 0; 4584 4585 mutex_lock(&graph_lock); 4586 if ((file->f_mode & FMODE_WRITE) && 4587 (file->f_flags & O_TRUNC)) { 4588 *fgd->count = 0; 4589 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table)); 4590 } 4591 mutex_unlock(&graph_lock); 4592 4593 if (file->f_mode & FMODE_READ) { 4594 ret = seq_open(file, fgd->seq_ops); 4595 if (!ret) { 4596 struct seq_file *m = file->private_data; 4597 m->private = fgd; 4598 } 4599 } else 4600 file->private_data = fgd; 4601 4602 return ret; 4603 } 4604 4605 static int 4606 ftrace_graph_open(struct inode *inode, struct file *file) 4607 { 4608 struct ftrace_graph_data *fgd; 4609 4610 if (unlikely(ftrace_disabled)) 4611 return -ENODEV; 4612 4613 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 4614 if (fgd == NULL) 4615 return -ENOMEM; 4616 4617 fgd->table = ftrace_graph_funcs; 4618 fgd->size = FTRACE_GRAPH_MAX_FUNCS; 4619 fgd->count = &ftrace_graph_count; 4620 fgd->seq_ops = &ftrace_graph_seq_ops; 4621 4622 return __ftrace_graph_open(inode, file, fgd); 4623 } 4624 4625 static int 4626 ftrace_graph_notrace_open(struct inode *inode, struct file *file) 4627 { 4628 struct ftrace_graph_data *fgd; 4629 4630 if (unlikely(ftrace_disabled)) 4631 return -ENODEV; 4632 4633 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 4634 if (fgd == NULL) 4635 return -ENOMEM; 4636 4637 fgd->table = ftrace_graph_notrace_funcs; 4638 fgd->size = FTRACE_GRAPH_MAX_FUNCS; 4639 fgd->count = &ftrace_graph_notrace_count; 4640 fgd->seq_ops = &ftrace_graph_seq_ops; 4641 4642 return __ftrace_graph_open(inode, file, fgd); 4643 } 4644 4645 static int 4646 ftrace_graph_release(struct inode *inode, struct file *file) 4647 { 4648 if (file->f_mode & FMODE_READ) { 4649 struct seq_file *m = file->private_data; 4650 4651 kfree(m->private); 4652 seq_release(inode, file); 4653 } else { 4654 kfree(file->private_data); 4655 } 4656 4657 return 0; 4658 } 4659 4660 static int 4661 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) 4662 { 4663 struct ftrace_glob func_g; 4664 struct dyn_ftrace *rec; 4665 struct ftrace_page *pg; 4666 int fail = 1; 4667 int not; 4668 bool exists; 4669 int i; 4670 4671 /* decode regex */ 4672 func_g.type = filter_parse_regex(buffer, strlen(buffer), 4673 &func_g.search, ¬); 4674 if (!not && *idx >= size) 4675 return -EBUSY; 4676 4677 func_g.len = strlen(func_g.search); 4678 4679 mutex_lock(&ftrace_lock); 4680 4681 if (unlikely(ftrace_disabled)) { 4682 mutex_unlock(&ftrace_lock); 4683 return -ENODEV; 4684 } 4685 4686 do_for_each_ftrace_rec(pg, rec) { 4687 4688 if (ftrace_match_record(rec, &func_g, NULL, 0)) { 4689 /* if it is in the array */ 4690 exists = false; 4691 for (i = 0; i < *idx; i++) { 4692 if (array[i] == rec->ip) { 4693 exists = true; 4694 break; 4695 } 4696 } 4697 4698 if (!not) { 4699 fail = 0; 4700 if (!exists) { 4701 array[(*idx)++] = rec->ip; 4702 if (*idx >= size) 4703 goto out; 4704 } 4705 } else { 4706 if (exists) { 4707 array[i] = array[--(*idx)]; 4708 array[*idx] = 0; 4709 fail = 0; 4710 } 4711 } 4712 } 4713 } while_for_each_ftrace_rec(); 4714 out: 4715 mutex_unlock(&ftrace_lock); 4716 4717 if (fail) 4718 return -EINVAL; 4719 4720 return 0; 4721 } 4722 4723 static ssize_t 4724 ftrace_graph_write(struct file *file, const char __user *ubuf, 4725 size_t cnt, loff_t *ppos) 4726 { 4727 struct trace_parser parser; 4728 ssize_t read, ret = 0; 4729 struct ftrace_graph_data *fgd = file->private_data; 4730 4731 if (!cnt) 4732 return 0; 4733 4734 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) 4735 return -ENOMEM; 4736 4737 read = trace_get_user(&parser, ubuf, cnt, ppos); 4738 4739 if (read >= 0 && trace_parser_loaded((&parser))) { 4740 parser.buffer[parser.idx] = 0; 4741 4742 mutex_lock(&graph_lock); 4743 4744 /* we allow only one expression at a time */ 4745 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size, 4746 parser.buffer); 4747 4748 mutex_unlock(&graph_lock); 4749 } 4750 4751 if (!ret) 4752 ret = read; 4753 4754 trace_parser_put(&parser); 4755 4756 return ret; 4757 } 4758 4759 static const struct file_operations ftrace_graph_fops = { 4760 .open = ftrace_graph_open, 4761 .read = seq_read, 4762 .write = ftrace_graph_write, 4763 .llseek = tracing_lseek, 4764 .release = ftrace_graph_release, 4765 }; 4766 4767 static const struct file_operations ftrace_graph_notrace_fops = { 4768 .open = ftrace_graph_notrace_open, 4769 .read = seq_read, 4770 .write = ftrace_graph_write, 4771 .llseek = tracing_lseek, 4772 .release = ftrace_graph_release, 4773 }; 4774 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4775 4776 void ftrace_create_filter_files(struct ftrace_ops *ops, 4777 struct dentry *parent) 4778 { 4779 4780 trace_create_file("set_ftrace_filter", 0644, parent, 4781 ops, &ftrace_filter_fops); 4782 4783 trace_create_file("set_ftrace_notrace", 0644, parent, 4784 ops, &ftrace_notrace_fops); 4785 } 4786 4787 /* 4788 * The name "destroy_filter_files" is really a misnomer. Although 4789 * in the future, it may actualy delete the files, but this is 4790 * really intended to make sure the ops passed in are disabled 4791 * and that when this function returns, the caller is free to 4792 * free the ops. 4793 * 4794 * The "destroy" name is only to match the "create" name that this 4795 * should be paired with. 4796 */ 4797 void ftrace_destroy_filter_files(struct ftrace_ops *ops) 4798 { 4799 mutex_lock(&ftrace_lock); 4800 if (ops->flags & FTRACE_OPS_FL_ENABLED) 4801 ftrace_shutdown(ops, 0); 4802 ops->flags |= FTRACE_OPS_FL_DELETED; 4803 mutex_unlock(&ftrace_lock); 4804 } 4805 4806 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) 4807 { 4808 4809 trace_create_file("available_filter_functions", 0444, 4810 d_tracer, NULL, &ftrace_avail_fops); 4811 4812 trace_create_file("enabled_functions", 0444, 4813 d_tracer, NULL, &ftrace_enabled_fops); 4814 4815 ftrace_create_filter_files(&global_ops, d_tracer); 4816 4817 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4818 trace_create_file("set_graph_function", 0444, d_tracer, 4819 NULL, 4820 &ftrace_graph_fops); 4821 trace_create_file("set_graph_notrace", 0444, d_tracer, 4822 NULL, 4823 &ftrace_graph_notrace_fops); 4824 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4825 4826 return 0; 4827 } 4828 4829 static int ftrace_cmp_ips(const void *a, const void *b) 4830 { 4831 const unsigned long *ipa = a; 4832 const unsigned long *ipb = b; 4833 4834 if (*ipa > *ipb) 4835 return 1; 4836 if (*ipa < *ipb) 4837 return -1; 4838 return 0; 4839 } 4840 4841 static int ftrace_process_locs(struct module *mod, 4842 unsigned long *start, 4843 unsigned long *end) 4844 { 4845 struct ftrace_page *start_pg; 4846 struct ftrace_page *pg; 4847 struct dyn_ftrace *rec; 4848 unsigned long count; 4849 unsigned long *p; 4850 unsigned long addr; 4851 unsigned long flags = 0; /* Shut up gcc */ 4852 int ret = -ENOMEM; 4853 4854 count = end - start; 4855 4856 if (!count) 4857 return 0; 4858 4859 sort(start, count, sizeof(*start), 4860 ftrace_cmp_ips, NULL); 4861 4862 start_pg = ftrace_allocate_pages(count); 4863 if (!start_pg) 4864 return -ENOMEM; 4865 4866 mutex_lock(&ftrace_lock); 4867 4868 /* 4869 * Core and each module needs their own pages, as 4870 * modules will free them when they are removed. 4871 * Force a new page to be allocated for modules. 4872 */ 4873 if (!mod) { 4874 WARN_ON(ftrace_pages || ftrace_pages_start); 4875 /* First initialization */ 4876 ftrace_pages = ftrace_pages_start = start_pg; 4877 } else { 4878 if (!ftrace_pages) 4879 goto out; 4880 4881 if (WARN_ON(ftrace_pages->next)) { 4882 /* Hmm, we have free pages? */ 4883 while (ftrace_pages->next) 4884 ftrace_pages = ftrace_pages->next; 4885 } 4886 4887 ftrace_pages->next = start_pg; 4888 } 4889 4890 p = start; 4891 pg = start_pg; 4892 while (p < end) { 4893 addr = ftrace_call_adjust(*p++); 4894 /* 4895 * Some architecture linkers will pad between 4896 * the different mcount_loc sections of different 4897 * object files to satisfy alignments. 4898 * Skip any NULL pointers. 4899 */ 4900 if (!addr) 4901 continue; 4902 4903 if (pg->index == pg->size) { 4904 /* We should have allocated enough */ 4905 if (WARN_ON(!pg->next)) 4906 break; 4907 pg = pg->next; 4908 } 4909 4910 rec = &pg->records[pg->index++]; 4911 rec->ip = addr; 4912 } 4913 4914 /* We should have used all pages */ 4915 WARN_ON(pg->next); 4916 4917 /* Assign the last page to ftrace_pages */ 4918 ftrace_pages = pg; 4919 4920 /* 4921 * We only need to disable interrupts on start up 4922 * because we are modifying code that an interrupt 4923 * may execute, and the modification is not atomic. 4924 * But for modules, nothing runs the code we modify 4925 * until we are finished with it, and there's no 4926 * reason to cause large interrupt latencies while we do it. 4927 */ 4928 if (!mod) 4929 local_irq_save(flags); 4930 ftrace_update_code(mod, start_pg); 4931 if (!mod) 4932 local_irq_restore(flags); 4933 ret = 0; 4934 out: 4935 mutex_unlock(&ftrace_lock); 4936 4937 return ret; 4938 } 4939 4940 #ifdef CONFIG_MODULES 4941 4942 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) 4943 4944 static int referenced_filters(struct dyn_ftrace *rec) 4945 { 4946 struct ftrace_ops *ops; 4947 int cnt = 0; 4948 4949 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { 4950 if (ops_references_rec(ops, rec)) 4951 cnt++; 4952 } 4953 4954 return cnt; 4955 } 4956 4957 void ftrace_release_mod(struct module *mod) 4958 { 4959 struct dyn_ftrace *rec; 4960 struct ftrace_page **last_pg; 4961 struct ftrace_page *pg; 4962 int order; 4963 4964 mutex_lock(&ftrace_lock); 4965 4966 if (ftrace_disabled) 4967 goto out_unlock; 4968 4969 /* 4970 * Each module has its own ftrace_pages, remove 4971 * them from the list. 4972 */ 4973 last_pg = &ftrace_pages_start; 4974 for (pg = ftrace_pages_start; pg; pg = *last_pg) { 4975 rec = &pg->records[0]; 4976 if (within_module_core(rec->ip, mod)) { 4977 /* 4978 * As core pages are first, the first 4979 * page should never be a module page. 4980 */ 4981 if (WARN_ON(pg == ftrace_pages_start)) 4982 goto out_unlock; 4983 4984 /* Check if we are deleting the last page */ 4985 if (pg == ftrace_pages) 4986 ftrace_pages = next_to_ftrace_page(last_pg); 4987 4988 *last_pg = pg->next; 4989 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 4990 free_pages((unsigned long)pg->records, order); 4991 kfree(pg); 4992 } else 4993 last_pg = &pg->next; 4994 } 4995 out_unlock: 4996 mutex_unlock(&ftrace_lock); 4997 } 4998 4999 void ftrace_module_enable(struct module *mod) 5000 { 5001 struct dyn_ftrace *rec; 5002 struct ftrace_page *pg; 5003 5004 mutex_lock(&ftrace_lock); 5005 5006 if (ftrace_disabled) 5007 goto out_unlock; 5008 5009 /* 5010 * If the tracing is enabled, go ahead and enable the record. 5011 * 5012 * The reason not to enable the record immediatelly is the 5013 * inherent check of ftrace_make_nop/ftrace_make_call for 5014 * correct previous instructions. Making first the NOP 5015 * conversion puts the module to the correct state, thus 5016 * passing the ftrace_make_call check. 5017 * 5018 * We also delay this to after the module code already set the 5019 * text to read-only, as we now need to set it back to read-write 5020 * so that we can modify the text. 5021 */ 5022 if (ftrace_start_up) 5023 ftrace_arch_code_modify_prepare(); 5024 5025 do_for_each_ftrace_rec(pg, rec) { 5026 int cnt; 5027 /* 5028 * do_for_each_ftrace_rec() is a double loop. 5029 * module text shares the pg. If a record is 5030 * not part of this module, then skip this pg, 5031 * which the "break" will do. 5032 */ 5033 if (!within_module_core(rec->ip, mod)) 5034 break; 5035 5036 cnt = 0; 5037 5038 /* 5039 * When adding a module, we need to check if tracers are 5040 * currently enabled and if they are, and can trace this record, 5041 * we need to enable the module functions as well as update the 5042 * reference counts for those function records. 5043 */ 5044 if (ftrace_start_up) 5045 cnt += referenced_filters(rec); 5046 5047 /* This clears FTRACE_FL_DISABLED */ 5048 rec->flags = cnt; 5049 5050 if (ftrace_start_up && cnt) { 5051 int failed = __ftrace_replace_code(rec, 1); 5052 if (failed) { 5053 ftrace_bug(failed, rec); 5054 goto out_loop; 5055 } 5056 } 5057 5058 } while_for_each_ftrace_rec(); 5059 5060 out_loop: 5061 if (ftrace_start_up) 5062 ftrace_arch_code_modify_post_process(); 5063 5064 out_unlock: 5065 mutex_unlock(&ftrace_lock); 5066 } 5067 5068 void ftrace_module_init(struct module *mod) 5069 { 5070 if (ftrace_disabled || !mod->num_ftrace_callsites) 5071 return; 5072 5073 ftrace_process_locs(mod, mod->ftrace_callsites, 5074 mod->ftrace_callsites + mod->num_ftrace_callsites); 5075 } 5076 #endif /* CONFIG_MODULES */ 5077 5078 void __init ftrace_init(void) 5079 { 5080 extern unsigned long __start_mcount_loc[]; 5081 extern unsigned long __stop_mcount_loc[]; 5082 unsigned long count, flags; 5083 int ret; 5084 5085 local_irq_save(flags); 5086 ret = ftrace_dyn_arch_init(); 5087 local_irq_restore(flags); 5088 if (ret) 5089 goto failed; 5090 5091 count = __stop_mcount_loc - __start_mcount_loc; 5092 if (!count) { 5093 pr_info("ftrace: No functions to be traced?\n"); 5094 goto failed; 5095 } 5096 5097 pr_info("ftrace: allocating %ld entries in %ld pages\n", 5098 count, count / ENTRIES_PER_PAGE + 1); 5099 5100 last_ftrace_enabled = ftrace_enabled = 1; 5101 5102 ret = ftrace_process_locs(NULL, 5103 __start_mcount_loc, 5104 __stop_mcount_loc); 5105 5106 set_ftrace_early_filters(); 5107 5108 return; 5109 failed: 5110 ftrace_disabled = 1; 5111 } 5112 5113 /* Do nothing if arch does not support this */ 5114 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) 5115 { 5116 } 5117 5118 static void ftrace_update_trampoline(struct ftrace_ops *ops) 5119 { 5120 5121 /* 5122 * Currently there's no safe way to free a trampoline when the kernel 5123 * is configured with PREEMPT. That is because a task could be preempted 5124 * when it jumped to the trampoline, it may be preempted for a long time 5125 * depending on the system load, and currently there's no way to know 5126 * when it will be off the trampoline. If the trampoline is freed 5127 * too early, when the task runs again, it will be executing on freed 5128 * memory and crash. 5129 */ 5130 #ifdef CONFIG_PREEMPT 5131 /* Currently, only non dynamic ops can have a trampoline */ 5132 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 5133 return; 5134 #endif 5135 5136 arch_ftrace_update_trampoline(ops); 5137 } 5138 5139 #else 5140 5141 static struct ftrace_ops global_ops = { 5142 .func = ftrace_stub, 5143 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 5144 FTRACE_OPS_FL_INITIALIZED | 5145 FTRACE_OPS_FL_PID, 5146 }; 5147 5148 static int __init ftrace_nodyn_init(void) 5149 { 5150 ftrace_enabled = 1; 5151 return 0; 5152 } 5153 core_initcall(ftrace_nodyn_init); 5154 5155 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } 5156 static inline void ftrace_startup_enable(int command) { } 5157 static inline void ftrace_startup_all(int command) { } 5158 /* Keep as macros so we do not need to define the commands */ 5159 # define ftrace_startup(ops, command) \ 5160 ({ \ 5161 int ___ret = __register_ftrace_function(ops); \ 5162 if (!___ret) \ 5163 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ 5164 ___ret; \ 5165 }) 5166 # define ftrace_shutdown(ops, command) \ 5167 ({ \ 5168 int ___ret = __unregister_ftrace_function(ops); \ 5169 if (!___ret) \ 5170 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ 5171 ___ret; \ 5172 }) 5173 5174 # define ftrace_startup_sysctl() do { } while (0) 5175 # define ftrace_shutdown_sysctl() do { } while (0) 5176 5177 static inline int 5178 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 5179 { 5180 return 1; 5181 } 5182 5183 static void ftrace_update_trampoline(struct ftrace_ops *ops) 5184 { 5185 } 5186 5187 #endif /* CONFIG_DYNAMIC_FTRACE */ 5188 5189 __init void ftrace_init_global_array_ops(struct trace_array *tr) 5190 { 5191 tr->ops = &global_ops; 5192 tr->ops->private = tr; 5193 } 5194 5195 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) 5196 { 5197 /* If we filter on pids, update to use the pid function */ 5198 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { 5199 if (WARN_ON(tr->ops->func != ftrace_stub)) 5200 printk("ftrace ops had %pS for function\n", 5201 tr->ops->func); 5202 } 5203 tr->ops->func = func; 5204 tr->ops->private = tr; 5205 } 5206 5207 void ftrace_reset_array_ops(struct trace_array *tr) 5208 { 5209 tr->ops->func = ftrace_stub; 5210 } 5211 5212 static inline void 5213 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 5214 struct ftrace_ops *ignored, struct pt_regs *regs) 5215 { 5216 struct ftrace_ops *op; 5217 int bit; 5218 5219 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 5220 if (bit < 0) 5221 return; 5222 5223 /* 5224 * Some of the ops may be dynamically allocated, 5225 * they must be freed after a synchronize_sched(). 5226 */ 5227 preempt_disable_notrace(); 5228 5229 do_for_each_ftrace_op(op, ftrace_ops_list) { 5230 /* 5231 * Check the following for each ops before calling their func: 5232 * if RCU flag is set, then rcu_is_watching() must be true 5233 * if PER_CPU is set, then ftrace_function_local_disable() 5234 * must be false 5235 * Otherwise test if the ip matches the ops filter 5236 * 5237 * If any of the above fails then the op->func() is not executed. 5238 */ 5239 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && 5240 (!(op->flags & FTRACE_OPS_FL_PER_CPU) || 5241 !ftrace_function_local_disabled(op)) && 5242 ftrace_ops_test(op, ip, regs)) { 5243 5244 if (FTRACE_WARN_ON(!op->func)) { 5245 pr_warn("op=%p %pS\n", op, op); 5246 goto out; 5247 } 5248 op->func(ip, parent_ip, op, regs); 5249 } 5250 } while_for_each_ftrace_op(op); 5251 out: 5252 preempt_enable_notrace(); 5253 trace_clear_recursion(bit); 5254 } 5255 5256 /* 5257 * Some archs only support passing ip and parent_ip. Even though 5258 * the list function ignores the op parameter, we do not want any 5259 * C side effects, where a function is called without the caller 5260 * sending a third parameter. 5261 * Archs are to support both the regs and ftrace_ops at the same time. 5262 * If they support ftrace_ops, it is assumed they support regs. 5263 * If call backs want to use regs, they must either check for regs 5264 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. 5265 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. 5266 * An architecture can pass partial regs with ftrace_ops and still 5267 * set the ARCH_SUPPORTS_FTRACE_OPS. 5268 */ 5269 #if ARCH_SUPPORTS_FTRACE_OPS 5270 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 5271 struct ftrace_ops *op, struct pt_regs *regs) 5272 { 5273 __ftrace_ops_list_func(ip, parent_ip, NULL, regs); 5274 } 5275 #else 5276 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) 5277 { 5278 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); 5279 } 5280 #endif 5281 5282 /* 5283 * If there's only one function registered but it does not support 5284 * recursion, needs RCU protection and/or requires per cpu handling, then 5285 * this function will be called by the mcount trampoline. 5286 */ 5287 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, 5288 struct ftrace_ops *op, struct pt_regs *regs) 5289 { 5290 int bit; 5291 5292 if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) 5293 return; 5294 5295 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 5296 if (bit < 0) 5297 return; 5298 5299 preempt_disable_notrace(); 5300 5301 if (!(op->flags & FTRACE_OPS_FL_PER_CPU) || 5302 !ftrace_function_local_disabled(op)) { 5303 op->func(ip, parent_ip, op, regs); 5304 } 5305 5306 preempt_enable_notrace(); 5307 trace_clear_recursion(bit); 5308 } 5309 5310 /** 5311 * ftrace_ops_get_func - get the function a trampoline should call 5312 * @ops: the ops to get the function for 5313 * 5314 * Normally the mcount trampoline will call the ops->func, but there 5315 * are times that it should not. For example, if the ops does not 5316 * have its own recursion protection, then it should call the 5317 * ftrace_ops_recurs_func() instead. 5318 * 5319 * Returns the function that the trampoline should call for @ops. 5320 */ 5321 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) 5322 { 5323 /* 5324 * If the function does not handle recursion, needs to be RCU safe, 5325 * or does per cpu logic, then we need to call the assist handler. 5326 */ 5327 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || 5328 ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU)) 5329 return ftrace_ops_assist_func; 5330 5331 return ops->func; 5332 } 5333 5334 static void 5335 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, 5336 struct task_struct *prev, struct task_struct *next) 5337 { 5338 struct trace_array *tr = data; 5339 struct trace_pid_list *pid_list; 5340 5341 pid_list = rcu_dereference_sched(tr->function_pids); 5342 5343 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, 5344 trace_ignore_this_task(pid_list, next)); 5345 } 5346 5347 static void clear_ftrace_pids(struct trace_array *tr) 5348 { 5349 struct trace_pid_list *pid_list; 5350 int cpu; 5351 5352 pid_list = rcu_dereference_protected(tr->function_pids, 5353 lockdep_is_held(&ftrace_lock)); 5354 if (!pid_list) 5355 return; 5356 5357 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 5358 5359 for_each_possible_cpu(cpu) 5360 per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false; 5361 5362 rcu_assign_pointer(tr->function_pids, NULL); 5363 5364 /* Wait till all users are no longer using pid filtering */ 5365 synchronize_sched(); 5366 5367 trace_free_pid_list(pid_list); 5368 } 5369 5370 static void ftrace_pid_reset(struct trace_array *tr) 5371 { 5372 mutex_lock(&ftrace_lock); 5373 clear_ftrace_pids(tr); 5374 5375 ftrace_update_pid_func(); 5376 ftrace_startup_all(0); 5377 5378 mutex_unlock(&ftrace_lock); 5379 } 5380 5381 /* Greater than any max PID */ 5382 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) 5383 5384 static void *fpid_start(struct seq_file *m, loff_t *pos) 5385 __acquires(RCU) 5386 { 5387 struct trace_pid_list *pid_list; 5388 struct trace_array *tr = m->private; 5389 5390 mutex_lock(&ftrace_lock); 5391 rcu_read_lock_sched(); 5392 5393 pid_list = rcu_dereference_sched(tr->function_pids); 5394 5395 if (!pid_list) 5396 return !(*pos) ? FTRACE_NO_PIDS : NULL; 5397 5398 return trace_pid_start(pid_list, pos); 5399 } 5400 5401 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) 5402 { 5403 struct trace_array *tr = m->private; 5404 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); 5405 5406 if (v == FTRACE_NO_PIDS) 5407 return NULL; 5408 5409 return trace_pid_next(pid_list, v, pos); 5410 } 5411 5412 static void fpid_stop(struct seq_file *m, void *p) 5413 __releases(RCU) 5414 { 5415 rcu_read_unlock_sched(); 5416 mutex_unlock(&ftrace_lock); 5417 } 5418 5419 static int fpid_show(struct seq_file *m, void *v) 5420 { 5421 if (v == FTRACE_NO_PIDS) { 5422 seq_puts(m, "no pid\n"); 5423 return 0; 5424 } 5425 5426 return trace_pid_show(m, v); 5427 } 5428 5429 static const struct seq_operations ftrace_pid_sops = { 5430 .start = fpid_start, 5431 .next = fpid_next, 5432 .stop = fpid_stop, 5433 .show = fpid_show, 5434 }; 5435 5436 static int 5437 ftrace_pid_open(struct inode *inode, struct file *file) 5438 { 5439 struct trace_array *tr = inode->i_private; 5440 struct seq_file *m; 5441 int ret = 0; 5442 5443 if (trace_array_get(tr) < 0) 5444 return -ENODEV; 5445 5446 if ((file->f_mode & FMODE_WRITE) && 5447 (file->f_flags & O_TRUNC)) 5448 ftrace_pid_reset(tr); 5449 5450 ret = seq_open(file, &ftrace_pid_sops); 5451 if (ret < 0) { 5452 trace_array_put(tr); 5453 } else { 5454 m = file->private_data; 5455 /* copy tr over to seq ops */ 5456 m->private = tr; 5457 } 5458 5459 return ret; 5460 } 5461 5462 static void ignore_task_cpu(void *data) 5463 { 5464 struct trace_array *tr = data; 5465 struct trace_pid_list *pid_list; 5466 5467 /* 5468 * This function is called by on_each_cpu() while the 5469 * event_mutex is held. 5470 */ 5471 pid_list = rcu_dereference_protected(tr->function_pids, 5472 mutex_is_locked(&ftrace_lock)); 5473 5474 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, 5475 trace_ignore_this_task(pid_list, current)); 5476 } 5477 5478 static ssize_t 5479 ftrace_pid_write(struct file *filp, const char __user *ubuf, 5480 size_t cnt, loff_t *ppos) 5481 { 5482 struct seq_file *m = filp->private_data; 5483 struct trace_array *tr = m->private; 5484 struct trace_pid_list *filtered_pids = NULL; 5485 struct trace_pid_list *pid_list; 5486 ssize_t ret; 5487 5488 if (!cnt) 5489 return 0; 5490 5491 mutex_lock(&ftrace_lock); 5492 5493 filtered_pids = rcu_dereference_protected(tr->function_pids, 5494 lockdep_is_held(&ftrace_lock)); 5495 5496 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 5497 if (ret < 0) 5498 goto out; 5499 5500 rcu_assign_pointer(tr->function_pids, pid_list); 5501 5502 if (filtered_pids) { 5503 synchronize_sched(); 5504 trace_free_pid_list(filtered_pids); 5505 } else if (pid_list) { 5506 /* Register a probe to set whether to ignore the tracing of a task */ 5507 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 5508 } 5509 5510 /* 5511 * Ignoring of pids is done at task switch. But we have to 5512 * check for those tasks that are currently running. 5513 * Always do this in case a pid was appended or removed. 5514 */ 5515 on_each_cpu(ignore_task_cpu, tr, 1); 5516 5517 ftrace_update_pid_func(); 5518 ftrace_startup_all(0); 5519 out: 5520 mutex_unlock(&ftrace_lock); 5521 5522 if (ret > 0) 5523 *ppos += ret; 5524 5525 return ret; 5526 } 5527 5528 static int 5529 ftrace_pid_release(struct inode *inode, struct file *file) 5530 { 5531 struct trace_array *tr = inode->i_private; 5532 5533 trace_array_put(tr); 5534 5535 return seq_release(inode, file); 5536 } 5537 5538 static const struct file_operations ftrace_pid_fops = { 5539 .open = ftrace_pid_open, 5540 .write = ftrace_pid_write, 5541 .read = seq_read, 5542 .llseek = tracing_lseek, 5543 .release = ftrace_pid_release, 5544 }; 5545 5546 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) 5547 { 5548 trace_create_file("set_ftrace_pid", 0644, d_tracer, 5549 tr, &ftrace_pid_fops); 5550 } 5551 5552 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, 5553 struct dentry *d_tracer) 5554 { 5555 /* Only the top level directory has the dyn_tracefs and profile */ 5556 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 5557 5558 ftrace_init_dyn_tracefs(d_tracer); 5559 ftrace_profile_tracefs(d_tracer); 5560 } 5561 5562 /** 5563 * ftrace_kill - kill ftrace 5564 * 5565 * This function should be used by panic code. It stops ftrace 5566 * but in a not so nice way. If you need to simply kill ftrace 5567 * from a non-atomic section, use ftrace_kill. 5568 */ 5569 void ftrace_kill(void) 5570 { 5571 ftrace_disabled = 1; 5572 ftrace_enabled = 0; 5573 clear_ftrace_function(); 5574 } 5575 5576 /** 5577 * Test if ftrace is dead or not. 5578 */ 5579 int ftrace_is_dead(void) 5580 { 5581 return ftrace_disabled; 5582 } 5583 5584 /** 5585 * register_ftrace_function - register a function for profiling 5586 * @ops - ops structure that holds the function for profiling. 5587 * 5588 * Register a function to be called by all functions in the 5589 * kernel. 5590 * 5591 * Note: @ops->func and all the functions it calls must be labeled 5592 * with "notrace", otherwise it will go into a 5593 * recursive loop. 5594 */ 5595 int register_ftrace_function(struct ftrace_ops *ops) 5596 { 5597 int ret = -1; 5598 5599 ftrace_ops_init(ops); 5600 5601 mutex_lock(&ftrace_lock); 5602 5603 ret = ftrace_startup(ops, 0); 5604 5605 mutex_unlock(&ftrace_lock); 5606 5607 return ret; 5608 } 5609 EXPORT_SYMBOL_GPL(register_ftrace_function); 5610 5611 /** 5612 * unregister_ftrace_function - unregister a function for profiling. 5613 * @ops - ops structure that holds the function to unregister 5614 * 5615 * Unregister a function that was added to be called by ftrace profiling. 5616 */ 5617 int unregister_ftrace_function(struct ftrace_ops *ops) 5618 { 5619 int ret; 5620 5621 mutex_lock(&ftrace_lock); 5622 ret = ftrace_shutdown(ops, 0); 5623 mutex_unlock(&ftrace_lock); 5624 5625 return ret; 5626 } 5627 EXPORT_SYMBOL_GPL(unregister_ftrace_function); 5628 5629 int 5630 ftrace_enable_sysctl(struct ctl_table *table, int write, 5631 void __user *buffer, size_t *lenp, 5632 loff_t *ppos) 5633 { 5634 int ret = -ENODEV; 5635 5636 mutex_lock(&ftrace_lock); 5637 5638 if (unlikely(ftrace_disabled)) 5639 goto out; 5640 5641 ret = proc_dointvec(table, write, buffer, lenp, ppos); 5642 5643 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 5644 goto out; 5645 5646 last_ftrace_enabled = !!ftrace_enabled; 5647 5648 if (ftrace_enabled) { 5649 5650 /* we are starting ftrace again */ 5651 if (ftrace_ops_list != &ftrace_list_end) 5652 update_ftrace_function(); 5653 5654 ftrace_startup_sysctl(); 5655 5656 } else { 5657 /* stopping ftrace calls (just send to ftrace_stub) */ 5658 ftrace_trace_function = ftrace_stub; 5659 5660 ftrace_shutdown_sysctl(); 5661 } 5662 5663 out: 5664 mutex_unlock(&ftrace_lock); 5665 return ret; 5666 } 5667 5668 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5669 5670 static struct ftrace_ops graph_ops = { 5671 .func = ftrace_stub, 5672 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 5673 FTRACE_OPS_FL_INITIALIZED | 5674 FTRACE_OPS_FL_PID | 5675 FTRACE_OPS_FL_STUB, 5676 #ifdef FTRACE_GRAPH_TRAMP_ADDR 5677 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 5678 /* trampoline_size is only needed for dynamically allocated tramps */ 5679 #endif 5680 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) 5681 }; 5682 5683 void ftrace_graph_sleep_time_control(bool enable) 5684 { 5685 fgraph_sleep_time = enable; 5686 } 5687 5688 void ftrace_graph_graph_time_control(bool enable) 5689 { 5690 fgraph_graph_time = enable; 5691 } 5692 5693 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 5694 { 5695 return 0; 5696 } 5697 5698 /* The callbacks that hook a function */ 5699 trace_func_graph_ret_t ftrace_graph_return = 5700 (trace_func_graph_ret_t)ftrace_stub; 5701 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; 5702 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; 5703 5704 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 5705 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 5706 { 5707 int i; 5708 int ret = 0; 5709 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; 5710 struct task_struct *g, *t; 5711 5712 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { 5713 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH 5714 * sizeof(struct ftrace_ret_stack), 5715 GFP_KERNEL); 5716 if (!ret_stack_list[i]) { 5717 start = 0; 5718 end = i; 5719 ret = -ENOMEM; 5720 goto free; 5721 } 5722 } 5723 5724 read_lock(&tasklist_lock); 5725 do_each_thread(g, t) { 5726 if (start == end) { 5727 ret = -EAGAIN; 5728 goto unlock; 5729 } 5730 5731 if (t->ret_stack == NULL) { 5732 atomic_set(&t->tracing_graph_pause, 0); 5733 atomic_set(&t->trace_overrun, 0); 5734 t->curr_ret_stack = -1; 5735 /* Make sure the tasks see the -1 first: */ 5736 smp_wmb(); 5737 t->ret_stack = ret_stack_list[start++]; 5738 } 5739 } while_each_thread(g, t); 5740 5741 unlock: 5742 read_unlock(&tasklist_lock); 5743 free: 5744 for (i = start; i < end; i++) 5745 kfree(ret_stack_list[i]); 5746 return ret; 5747 } 5748 5749 static void 5750 ftrace_graph_probe_sched_switch(void *ignore, bool preempt, 5751 struct task_struct *prev, struct task_struct *next) 5752 { 5753 unsigned long long timestamp; 5754 int index; 5755 5756 /* 5757 * Does the user want to count the time a function was asleep. 5758 * If so, do not update the time stamps. 5759 */ 5760 if (fgraph_sleep_time) 5761 return; 5762 5763 timestamp = trace_clock_local(); 5764 5765 prev->ftrace_timestamp = timestamp; 5766 5767 /* only process tasks that we timestamped */ 5768 if (!next->ftrace_timestamp) 5769 return; 5770 5771 /* 5772 * Update all the counters in next to make up for the 5773 * time next was sleeping. 5774 */ 5775 timestamp -= next->ftrace_timestamp; 5776 5777 for (index = next->curr_ret_stack; index >= 0; index--) 5778 next->ret_stack[index].calltime += timestamp; 5779 } 5780 5781 /* Allocate a return stack for each task */ 5782 static int start_graph_tracing(void) 5783 { 5784 struct ftrace_ret_stack **ret_stack_list; 5785 int ret, cpu; 5786 5787 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * 5788 sizeof(struct ftrace_ret_stack *), 5789 GFP_KERNEL); 5790 5791 if (!ret_stack_list) 5792 return -ENOMEM; 5793 5794 /* The cpu_boot init_task->ret_stack will never be freed */ 5795 for_each_online_cpu(cpu) { 5796 if (!idle_task(cpu)->ret_stack) 5797 ftrace_graph_init_idle_task(idle_task(cpu), cpu); 5798 } 5799 5800 do { 5801 ret = alloc_retstack_tasklist(ret_stack_list); 5802 } while (ret == -EAGAIN); 5803 5804 if (!ret) { 5805 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5806 if (ret) 5807 pr_info("ftrace_graph: Couldn't activate tracepoint" 5808 " probe to kernel_sched_switch\n"); 5809 } 5810 5811 kfree(ret_stack_list); 5812 return ret; 5813 } 5814 5815 /* 5816 * Hibernation protection. 5817 * The state of the current task is too much unstable during 5818 * suspend/restore to disk. We want to protect against that. 5819 */ 5820 static int 5821 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, 5822 void *unused) 5823 { 5824 switch (state) { 5825 case PM_HIBERNATION_PREPARE: 5826 pause_graph_tracing(); 5827 break; 5828 5829 case PM_POST_HIBERNATION: 5830 unpause_graph_tracing(); 5831 break; 5832 } 5833 return NOTIFY_DONE; 5834 } 5835 5836 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) 5837 { 5838 if (!ftrace_ops_test(&global_ops, trace->func, NULL)) 5839 return 0; 5840 return __ftrace_graph_entry(trace); 5841 } 5842 5843 /* 5844 * The function graph tracer should only trace the functions defined 5845 * by set_ftrace_filter and set_ftrace_notrace. If another function 5846 * tracer ops is registered, the graph tracer requires testing the 5847 * function against the global ops, and not just trace any function 5848 * that any ftrace_ops registered. 5849 */ 5850 static void update_function_graph_func(void) 5851 { 5852 struct ftrace_ops *op; 5853 bool do_test = false; 5854 5855 /* 5856 * The graph and global ops share the same set of functions 5857 * to test. If any other ops is on the list, then 5858 * the graph tracing needs to test if its the function 5859 * it should call. 5860 */ 5861 do_for_each_ftrace_op(op, ftrace_ops_list) { 5862 if (op != &global_ops && op != &graph_ops && 5863 op != &ftrace_list_end) { 5864 do_test = true; 5865 /* in double loop, break out with goto */ 5866 goto out; 5867 } 5868 } while_for_each_ftrace_op(op); 5869 out: 5870 if (do_test) 5871 ftrace_graph_entry = ftrace_graph_entry_test; 5872 else 5873 ftrace_graph_entry = __ftrace_graph_entry; 5874 } 5875 5876 static struct notifier_block ftrace_suspend_notifier = { 5877 .notifier_call = ftrace_suspend_notifier_call, 5878 }; 5879 5880 int register_ftrace_graph(trace_func_graph_ret_t retfunc, 5881 trace_func_graph_ent_t entryfunc) 5882 { 5883 int ret = 0; 5884 5885 mutex_lock(&ftrace_lock); 5886 5887 /* we currently allow only one tracer registered at a time */ 5888 if (ftrace_graph_active) { 5889 ret = -EBUSY; 5890 goto out; 5891 } 5892 5893 register_pm_notifier(&ftrace_suspend_notifier); 5894 5895 ftrace_graph_active++; 5896 ret = start_graph_tracing(); 5897 if (ret) { 5898 ftrace_graph_active--; 5899 goto out; 5900 } 5901 5902 ftrace_graph_return = retfunc; 5903 5904 /* 5905 * Update the indirect function to the entryfunc, and the 5906 * function that gets called to the entry_test first. Then 5907 * call the update fgraph entry function to determine if 5908 * the entryfunc should be called directly or not. 5909 */ 5910 __ftrace_graph_entry = entryfunc; 5911 ftrace_graph_entry = ftrace_graph_entry_test; 5912 update_function_graph_func(); 5913 5914 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); 5915 out: 5916 mutex_unlock(&ftrace_lock); 5917 return ret; 5918 } 5919 5920 void unregister_ftrace_graph(void) 5921 { 5922 mutex_lock(&ftrace_lock); 5923 5924 if (unlikely(!ftrace_graph_active)) 5925 goto out; 5926 5927 ftrace_graph_active--; 5928 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5929 ftrace_graph_entry = ftrace_graph_entry_stub; 5930 __ftrace_graph_entry = ftrace_graph_entry_stub; 5931 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); 5932 unregister_pm_notifier(&ftrace_suspend_notifier); 5933 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5934 5935 #ifdef CONFIG_DYNAMIC_FTRACE 5936 /* 5937 * Function graph does not allocate the trampoline, but 5938 * other global_ops do. We need to reset the ALLOC_TRAMP flag 5939 * if one was used. 5940 */ 5941 global_ops.trampoline = save_global_trampoline; 5942 if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP) 5943 global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP; 5944 #endif 5945 5946 out: 5947 mutex_unlock(&ftrace_lock); 5948 } 5949 5950 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); 5951 5952 static void 5953 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) 5954 { 5955 atomic_set(&t->tracing_graph_pause, 0); 5956 atomic_set(&t->trace_overrun, 0); 5957 t->ftrace_timestamp = 0; 5958 /* make curr_ret_stack visible before we add the ret_stack */ 5959 smp_wmb(); 5960 t->ret_stack = ret_stack; 5961 } 5962 5963 /* 5964 * Allocate a return stack for the idle task. May be the first 5965 * time through, or it may be done by CPU hotplug online. 5966 */ 5967 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) 5968 { 5969 t->curr_ret_stack = -1; 5970 /* 5971 * The idle task has no parent, it either has its own 5972 * stack or no stack at all. 5973 */ 5974 if (t->ret_stack) 5975 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); 5976 5977 if (ftrace_graph_active) { 5978 struct ftrace_ret_stack *ret_stack; 5979 5980 ret_stack = per_cpu(idle_ret_stack, cpu); 5981 if (!ret_stack) { 5982 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 5983 * sizeof(struct ftrace_ret_stack), 5984 GFP_KERNEL); 5985 if (!ret_stack) 5986 return; 5987 per_cpu(idle_ret_stack, cpu) = ret_stack; 5988 } 5989 graph_init_task(t, ret_stack); 5990 } 5991 } 5992 5993 /* Allocate a return stack for newly created task */ 5994 void ftrace_graph_init_task(struct task_struct *t) 5995 { 5996 /* Make sure we do not use the parent ret_stack */ 5997 t->ret_stack = NULL; 5998 t->curr_ret_stack = -1; 5999 6000 if (ftrace_graph_active) { 6001 struct ftrace_ret_stack *ret_stack; 6002 6003 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 6004 * sizeof(struct ftrace_ret_stack), 6005 GFP_KERNEL); 6006 if (!ret_stack) 6007 return; 6008 graph_init_task(t, ret_stack); 6009 } 6010 } 6011 6012 void ftrace_graph_exit_task(struct task_struct *t) 6013 { 6014 struct ftrace_ret_stack *ret_stack = t->ret_stack; 6015 6016 t->ret_stack = NULL; 6017 /* NULL must become visible to IRQs before we free it: */ 6018 barrier(); 6019 6020 kfree(ret_stack); 6021 } 6022 #endif 6023