1 /* Include in trace.c */ 2 3 #include <linux/stringify.h> 4 #include <linux/kthread.h> 5 #include <linux/delay.h> 6 #include <linux/slab.h> 7 8 static inline int trace_valid_entry(struct trace_entry *entry) 9 { 10 switch (entry->type) { 11 case TRACE_FN: 12 case TRACE_CTX: 13 case TRACE_WAKE: 14 case TRACE_STACK: 15 case TRACE_PRINT: 16 case TRACE_SPECIAL: 17 case TRACE_BRANCH: 18 case TRACE_GRAPH_ENT: 19 case TRACE_GRAPH_RET: 20 case TRACE_KSYM: 21 return 1; 22 } 23 return 0; 24 } 25 26 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) 27 { 28 struct ring_buffer_event *event; 29 struct trace_entry *entry; 30 unsigned int loops = 0; 31 32 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { 33 entry = ring_buffer_event_data(event); 34 35 /* 36 * The ring buffer is a size of trace_buf_size, if 37 * we loop more than the size, there's something wrong 38 * with the ring buffer. 39 */ 40 if (loops++ > trace_buf_size) { 41 printk(KERN_CONT ".. bad ring buffer "); 42 goto failed; 43 } 44 if (!trace_valid_entry(entry)) { 45 printk(KERN_CONT ".. invalid entry %d ", 46 entry->type); 47 goto failed; 48 } 49 } 50 return 0; 51 52 failed: 53 /* disable tracing */ 54 tracing_disabled = 1; 55 printk(KERN_CONT ".. corrupted trace buffer .. "); 56 return -1; 57 } 58 59 /* 60 * Test the trace buffer to see if all the elements 61 * are still sane. 62 */ 63 static int trace_test_buffer(struct trace_array *tr, unsigned long *count) 64 { 65 unsigned long flags, cnt = 0; 66 int cpu, ret = 0; 67 68 /* Don't allow flipping of max traces now */ 69 local_irq_save(flags); 70 arch_spin_lock(&ftrace_max_lock); 71 72 cnt = ring_buffer_entries(tr->buffer); 73 74 /* 75 * The trace_test_buffer_cpu runs a while loop to consume all data. 76 * If the calling tracer is broken, and is constantly filling 77 * the buffer, this will run forever, and hard lock the box. 78 * We disable the ring buffer while we do this test to prevent 79 * a hard lock up. 80 */ 81 tracing_off(); 82 for_each_possible_cpu(cpu) { 83 ret = trace_test_buffer_cpu(tr, cpu); 84 if (ret) 85 break; 86 } 87 tracing_on(); 88 arch_spin_unlock(&ftrace_max_lock); 89 local_irq_restore(flags); 90 91 if (count) 92 *count = cnt; 93 94 return ret; 95 } 96 97 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) 98 { 99 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", 100 trace->name, init_ret); 101 } 102 #ifdef CONFIG_FUNCTION_TRACER 103 104 #ifdef CONFIG_DYNAMIC_FTRACE 105 106 /* Test dynamic code modification and ftrace filters */ 107 int trace_selftest_startup_dynamic_tracing(struct tracer *trace, 108 struct trace_array *tr, 109 int (*func)(void)) 110 { 111 int save_ftrace_enabled = ftrace_enabled; 112 int save_tracer_enabled = tracer_enabled; 113 unsigned long count; 114 char *func_name; 115 int ret; 116 117 /* The ftrace test PASSED */ 118 printk(KERN_CONT "PASSED\n"); 119 pr_info("Testing dynamic ftrace: "); 120 121 /* enable tracing, and record the filter function */ 122 ftrace_enabled = 1; 123 tracer_enabled = 1; 124 125 /* passed in by parameter to fool gcc from optimizing */ 126 func(); 127 128 /* 129 * Some archs *cough*PowerPC*cough* add characters to the 130 * start of the function names. We simply put a '*' to 131 * accommodate them. 132 */ 133 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 134 135 /* filter only on our function */ 136 ftrace_set_filter(func_name, strlen(func_name), 1); 137 138 /* enable tracing */ 139 ret = tracer_init(trace, tr); 140 if (ret) { 141 warn_failed_init_tracer(trace, ret); 142 goto out; 143 } 144 145 /* Sleep for a 1/10 of a second */ 146 msleep(100); 147 148 /* we should have nothing in the buffer */ 149 ret = trace_test_buffer(tr, &count); 150 if (ret) 151 goto out; 152 153 if (count) { 154 ret = -1; 155 printk(KERN_CONT ".. filter did not filter .. "); 156 goto out; 157 } 158 159 /* call our function again */ 160 func(); 161 162 /* sleep again */ 163 msleep(100); 164 165 /* stop the tracing. */ 166 tracing_stop(); 167 ftrace_enabled = 0; 168 169 /* check the trace buffer */ 170 ret = trace_test_buffer(tr, &count); 171 trace->reset(tr); 172 tracing_start(); 173 174 /* we should only have one item */ 175 if (!ret && count != 1) { 176 printk(KERN_CONT ".. filter failed count=%ld ..", count); 177 ret = -1; 178 goto out; 179 } 180 181 out: 182 ftrace_enabled = save_ftrace_enabled; 183 tracer_enabled = save_tracer_enabled; 184 185 /* Enable tracing on all functions again */ 186 ftrace_set_filter(NULL, 0, 1); 187 188 return ret; 189 } 190 #else 191 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) 192 #endif /* CONFIG_DYNAMIC_FTRACE */ 193 194 /* 195 * Simple verification test of ftrace function tracer. 196 * Enable ftrace, sleep 1/10 second, and then read the trace 197 * buffer to see if all is in order. 198 */ 199 int 200 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 201 { 202 int save_ftrace_enabled = ftrace_enabled; 203 int save_tracer_enabled = tracer_enabled; 204 unsigned long count; 205 int ret; 206 207 /* make sure msleep has been recorded */ 208 msleep(1); 209 210 /* start the tracing */ 211 ftrace_enabled = 1; 212 tracer_enabled = 1; 213 214 ret = tracer_init(trace, tr); 215 if (ret) { 216 warn_failed_init_tracer(trace, ret); 217 goto out; 218 } 219 220 /* Sleep for a 1/10 of a second */ 221 msleep(100); 222 /* stop the tracing. */ 223 tracing_stop(); 224 ftrace_enabled = 0; 225 226 /* check the trace buffer */ 227 ret = trace_test_buffer(tr, &count); 228 trace->reset(tr); 229 tracing_start(); 230 231 if (!ret && !count) { 232 printk(KERN_CONT ".. no entries found .."); 233 ret = -1; 234 goto out; 235 } 236 237 ret = trace_selftest_startup_dynamic_tracing(trace, tr, 238 DYN_FTRACE_TEST_NAME); 239 240 out: 241 ftrace_enabled = save_ftrace_enabled; 242 tracer_enabled = save_tracer_enabled; 243 244 /* kill ftrace totally if we failed */ 245 if (ret) 246 ftrace_kill(); 247 248 return ret; 249 } 250 #endif /* CONFIG_FUNCTION_TRACER */ 251 252 253 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 254 255 /* Maximum number of functions to trace before diagnosing a hang */ 256 #define GRAPH_MAX_FUNC_TEST 100000000 257 258 static void 259 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); 260 static unsigned int graph_hang_thresh; 261 262 /* Wrap the real function entry probe to avoid possible hanging */ 263 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) 264 { 265 /* This is harmlessly racy, we want to approximately detect a hang */ 266 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { 267 ftrace_graph_stop(); 268 printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); 269 if (ftrace_dump_on_oops) 270 __ftrace_dump(false, DUMP_ALL); 271 return 0; 272 } 273 274 return trace_graph_entry(trace); 275 } 276 277 /* 278 * Pretty much the same than for the function tracer from which the selftest 279 * has been borrowed. 280 */ 281 int 282 trace_selftest_startup_function_graph(struct tracer *trace, 283 struct trace_array *tr) 284 { 285 int ret; 286 unsigned long count; 287 288 /* 289 * Simulate the init() callback but we attach a watchdog callback 290 * to detect and recover from possible hangs 291 */ 292 tracing_reset_online_cpus(tr); 293 set_graph_array(tr); 294 ret = register_ftrace_graph(&trace_graph_return, 295 &trace_graph_entry_watchdog); 296 if (ret) { 297 warn_failed_init_tracer(trace, ret); 298 goto out; 299 } 300 tracing_start_cmdline_record(); 301 302 /* Sleep for a 1/10 of a second */ 303 msleep(100); 304 305 /* Have we just recovered from a hang? */ 306 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { 307 tracing_selftest_disabled = true; 308 ret = -1; 309 goto out; 310 } 311 312 tracing_stop(); 313 314 /* check the trace buffer */ 315 ret = trace_test_buffer(tr, &count); 316 317 trace->reset(tr); 318 tracing_start(); 319 320 if (!ret && !count) { 321 printk(KERN_CONT ".. no entries found .."); 322 ret = -1; 323 goto out; 324 } 325 326 /* Don't test dynamic tracing, the function tracer already did */ 327 328 out: 329 /* Stop it if we failed */ 330 if (ret) 331 ftrace_graph_stop(); 332 333 return ret; 334 } 335 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 336 337 338 #ifdef CONFIG_IRQSOFF_TRACER 339 int 340 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) 341 { 342 unsigned long save_max = tracing_max_latency; 343 unsigned long count; 344 int ret; 345 346 /* start the tracing */ 347 ret = tracer_init(trace, tr); 348 if (ret) { 349 warn_failed_init_tracer(trace, ret); 350 return ret; 351 } 352 353 /* reset the max latency */ 354 tracing_max_latency = 0; 355 /* disable interrupts for a bit */ 356 local_irq_disable(); 357 udelay(100); 358 local_irq_enable(); 359 360 /* 361 * Stop the tracer to avoid a warning subsequent 362 * to buffer flipping failure because tracing_stop() 363 * disables the tr and max buffers, making flipping impossible 364 * in case of parallels max irqs off latencies. 365 */ 366 trace->stop(tr); 367 /* stop the tracing. */ 368 tracing_stop(); 369 /* check both trace buffers */ 370 ret = trace_test_buffer(tr, NULL); 371 if (!ret) 372 ret = trace_test_buffer(&max_tr, &count); 373 trace->reset(tr); 374 tracing_start(); 375 376 if (!ret && !count) { 377 printk(KERN_CONT ".. no entries found .."); 378 ret = -1; 379 } 380 381 tracing_max_latency = save_max; 382 383 return ret; 384 } 385 #endif /* CONFIG_IRQSOFF_TRACER */ 386 387 #ifdef CONFIG_PREEMPT_TRACER 388 int 389 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) 390 { 391 unsigned long save_max = tracing_max_latency; 392 unsigned long count; 393 int ret; 394 395 /* 396 * Now that the big kernel lock is no longer preemptable, 397 * and this is called with the BKL held, it will always 398 * fail. If preemption is already disabled, simply 399 * pass the test. When the BKL is removed, or becomes 400 * preemptible again, we will once again test this, 401 * so keep it in. 402 */ 403 if (preempt_count()) { 404 printk(KERN_CONT "can not test ... force "); 405 return 0; 406 } 407 408 /* start the tracing */ 409 ret = tracer_init(trace, tr); 410 if (ret) { 411 warn_failed_init_tracer(trace, ret); 412 return ret; 413 } 414 415 /* reset the max latency */ 416 tracing_max_latency = 0; 417 /* disable preemption for a bit */ 418 preempt_disable(); 419 udelay(100); 420 preempt_enable(); 421 422 /* 423 * Stop the tracer to avoid a warning subsequent 424 * to buffer flipping failure because tracing_stop() 425 * disables the tr and max buffers, making flipping impossible 426 * in case of parallels max preempt off latencies. 427 */ 428 trace->stop(tr); 429 /* stop the tracing. */ 430 tracing_stop(); 431 /* check both trace buffers */ 432 ret = trace_test_buffer(tr, NULL); 433 if (!ret) 434 ret = trace_test_buffer(&max_tr, &count); 435 trace->reset(tr); 436 tracing_start(); 437 438 if (!ret && !count) { 439 printk(KERN_CONT ".. no entries found .."); 440 ret = -1; 441 } 442 443 tracing_max_latency = save_max; 444 445 return ret; 446 } 447 #endif /* CONFIG_PREEMPT_TRACER */ 448 449 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) 450 int 451 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) 452 { 453 unsigned long save_max = tracing_max_latency; 454 unsigned long count; 455 int ret; 456 457 /* 458 * Now that the big kernel lock is no longer preemptable, 459 * and this is called with the BKL held, it will always 460 * fail. If preemption is already disabled, simply 461 * pass the test. When the BKL is removed, or becomes 462 * preemptible again, we will once again test this, 463 * so keep it in. 464 */ 465 if (preempt_count()) { 466 printk(KERN_CONT "can not test ... force "); 467 return 0; 468 } 469 470 /* start the tracing */ 471 ret = tracer_init(trace, tr); 472 if (ret) { 473 warn_failed_init_tracer(trace, ret); 474 goto out_no_start; 475 } 476 477 /* reset the max latency */ 478 tracing_max_latency = 0; 479 480 /* disable preemption and interrupts for a bit */ 481 preempt_disable(); 482 local_irq_disable(); 483 udelay(100); 484 preempt_enable(); 485 /* reverse the order of preempt vs irqs */ 486 local_irq_enable(); 487 488 /* 489 * Stop the tracer to avoid a warning subsequent 490 * to buffer flipping failure because tracing_stop() 491 * disables the tr and max buffers, making flipping impossible 492 * in case of parallels max irqs/preempt off latencies. 493 */ 494 trace->stop(tr); 495 /* stop the tracing. */ 496 tracing_stop(); 497 /* check both trace buffers */ 498 ret = trace_test_buffer(tr, NULL); 499 if (ret) 500 goto out; 501 502 ret = trace_test_buffer(&max_tr, &count); 503 if (ret) 504 goto out; 505 506 if (!ret && !count) { 507 printk(KERN_CONT ".. no entries found .."); 508 ret = -1; 509 goto out; 510 } 511 512 /* do the test by disabling interrupts first this time */ 513 tracing_max_latency = 0; 514 tracing_start(); 515 trace->start(tr); 516 517 preempt_disable(); 518 local_irq_disable(); 519 udelay(100); 520 preempt_enable(); 521 /* reverse the order of preempt vs irqs */ 522 local_irq_enable(); 523 524 trace->stop(tr); 525 /* stop the tracing. */ 526 tracing_stop(); 527 /* check both trace buffers */ 528 ret = trace_test_buffer(tr, NULL); 529 if (ret) 530 goto out; 531 532 ret = trace_test_buffer(&max_tr, &count); 533 534 if (!ret && !count) { 535 printk(KERN_CONT ".. no entries found .."); 536 ret = -1; 537 goto out; 538 } 539 540 out: 541 tracing_start(); 542 out_no_start: 543 trace->reset(tr); 544 tracing_max_latency = save_max; 545 546 return ret; 547 } 548 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ 549 550 #ifdef CONFIG_NOP_TRACER 551 int 552 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) 553 { 554 /* What could possibly go wrong? */ 555 return 0; 556 } 557 #endif 558 559 #ifdef CONFIG_SCHED_TRACER 560 static int trace_wakeup_test_thread(void *data) 561 { 562 /* Make this a RT thread, doesn't need to be too high */ 563 struct sched_param param = { .sched_priority = 5 }; 564 struct completion *x = data; 565 566 sched_setscheduler(current, SCHED_FIFO, ¶m); 567 568 /* Make it know we have a new prio */ 569 complete(x); 570 571 /* now go to sleep and let the test wake us up */ 572 set_current_state(TASK_INTERRUPTIBLE); 573 schedule(); 574 575 /* we are awake, now wait to disappear */ 576 while (!kthread_should_stop()) { 577 /* 578 * This is an RT task, do short sleeps to let 579 * others run. 580 */ 581 msleep(100); 582 } 583 584 return 0; 585 } 586 587 int 588 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) 589 { 590 unsigned long save_max = tracing_max_latency; 591 struct task_struct *p; 592 struct completion isrt; 593 unsigned long count; 594 int ret; 595 596 init_completion(&isrt); 597 598 /* create a high prio thread */ 599 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); 600 if (IS_ERR(p)) { 601 printk(KERN_CONT "Failed to create ftrace wakeup test thread "); 602 return -1; 603 } 604 605 /* make sure the thread is running at an RT prio */ 606 wait_for_completion(&isrt); 607 608 /* start the tracing */ 609 ret = tracer_init(trace, tr); 610 if (ret) { 611 warn_failed_init_tracer(trace, ret); 612 return ret; 613 } 614 615 /* reset the max latency */ 616 tracing_max_latency = 0; 617 618 /* sleep to let the RT thread sleep too */ 619 msleep(100); 620 621 /* 622 * Yes this is slightly racy. It is possible that for some 623 * strange reason that the RT thread we created, did not 624 * call schedule for 100ms after doing the completion, 625 * and we do a wakeup on a task that already is awake. 626 * But that is extremely unlikely, and the worst thing that 627 * happens in such a case, is that we disable tracing. 628 * Honestly, if this race does happen something is horrible 629 * wrong with the system. 630 */ 631 632 wake_up_process(p); 633 634 /* give a little time to let the thread wake up */ 635 msleep(100); 636 637 /* stop the tracing. */ 638 tracing_stop(); 639 /* check both trace buffers */ 640 ret = trace_test_buffer(tr, NULL); 641 if (!ret) 642 ret = trace_test_buffer(&max_tr, &count); 643 644 645 trace->reset(tr); 646 tracing_start(); 647 648 tracing_max_latency = save_max; 649 650 /* kill the thread */ 651 kthread_stop(p); 652 653 if (!ret && !count) { 654 printk(KERN_CONT ".. no entries found .."); 655 ret = -1; 656 } 657 658 return ret; 659 } 660 #endif /* CONFIG_SCHED_TRACER */ 661 662 #ifdef CONFIG_CONTEXT_SWITCH_TRACER 663 int 664 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) 665 { 666 unsigned long count; 667 int ret; 668 669 /* start the tracing */ 670 ret = tracer_init(trace, tr); 671 if (ret) { 672 warn_failed_init_tracer(trace, ret); 673 return ret; 674 } 675 676 /* Sleep for a 1/10 of a second */ 677 msleep(100); 678 /* stop the tracing. */ 679 tracing_stop(); 680 /* check the trace buffer */ 681 ret = trace_test_buffer(tr, &count); 682 trace->reset(tr); 683 tracing_start(); 684 685 if (!ret && !count) { 686 printk(KERN_CONT ".. no entries found .."); 687 ret = -1; 688 } 689 690 return ret; 691 } 692 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 693 694 #ifdef CONFIG_SYSPROF_TRACER 695 int 696 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) 697 { 698 unsigned long count; 699 int ret; 700 701 /* start the tracing */ 702 ret = tracer_init(trace, tr); 703 if (ret) { 704 warn_failed_init_tracer(trace, ret); 705 return ret; 706 } 707 708 /* Sleep for a 1/10 of a second */ 709 msleep(100); 710 /* stop the tracing. */ 711 tracing_stop(); 712 /* check the trace buffer */ 713 ret = trace_test_buffer(tr, &count); 714 trace->reset(tr); 715 tracing_start(); 716 717 if (!ret && !count) { 718 printk(KERN_CONT ".. no entries found .."); 719 ret = -1; 720 } 721 722 return ret; 723 } 724 #endif /* CONFIG_SYSPROF_TRACER */ 725 726 #ifdef CONFIG_BRANCH_TRACER 727 int 728 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) 729 { 730 unsigned long count; 731 int ret; 732 733 /* start the tracing */ 734 ret = tracer_init(trace, tr); 735 if (ret) { 736 warn_failed_init_tracer(trace, ret); 737 return ret; 738 } 739 740 /* Sleep for a 1/10 of a second */ 741 msleep(100); 742 /* stop the tracing. */ 743 tracing_stop(); 744 /* check the trace buffer */ 745 ret = trace_test_buffer(tr, &count); 746 trace->reset(tr); 747 tracing_start(); 748 749 if (!ret && !count) { 750 printk(KERN_CONT ".. no entries found .."); 751 ret = -1; 752 } 753 754 return ret; 755 } 756 #endif /* CONFIG_BRANCH_TRACER */ 757 758 #ifdef CONFIG_KSYM_TRACER 759 static int ksym_selftest_dummy; 760 761 int 762 trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) 763 { 764 unsigned long count; 765 int ret; 766 767 /* start the tracing */ 768 ret = tracer_init(trace, tr); 769 if (ret) { 770 warn_failed_init_tracer(trace, ret); 771 return ret; 772 } 773 774 ksym_selftest_dummy = 0; 775 /* Register the read-write tracing request */ 776 777 ret = process_new_ksym_entry("ksym_selftest_dummy", 778 HW_BREAKPOINT_R | HW_BREAKPOINT_W, 779 (unsigned long)(&ksym_selftest_dummy)); 780 781 if (ret < 0) { 782 printk(KERN_CONT "ksym_trace read-write startup test failed\n"); 783 goto ret_path; 784 } 785 /* Perform a read and a write operation over the dummy variable to 786 * trigger the tracer 787 */ 788 if (ksym_selftest_dummy == 0) 789 ksym_selftest_dummy++; 790 791 /* stop the tracing. */ 792 tracing_stop(); 793 /* check the trace buffer */ 794 ret = trace_test_buffer(tr, &count); 795 trace->reset(tr); 796 tracing_start(); 797 798 /* read & write operations - one each is performed on the dummy variable 799 * triggering two entries in the trace buffer 800 */ 801 if (!ret && count != 2) { 802 printk(KERN_CONT "Ksym tracer startup test failed"); 803 ret = -1; 804 } 805 806 ret_path: 807 return ret; 808 } 809 #endif /* CONFIG_KSYM_TRACER */ 810 811