1 // SPDX-License-Identifier: GPL-2.0 2 /* Include in trace.c */ 3 4 #include <uapi/linux/sched/types.h> 5 #include <linux/stringify.h> 6 #include <linux/kthread.h> 7 #include <linux/delay.h> 8 #include <linux/slab.h> 9 10 static inline int trace_valid_entry(struct trace_entry *entry) 11 { 12 switch (entry->type) { 13 case TRACE_FN: 14 case TRACE_CTX: 15 case TRACE_WAKE: 16 case TRACE_STACK: 17 case TRACE_PRINT: 18 case TRACE_BRANCH: 19 case TRACE_GRAPH_ENT: 20 case TRACE_GRAPH_RETADDR_ENT: 21 case TRACE_GRAPH_RET: 22 return 1; 23 } 24 return 0; 25 } 26 27 static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu) 28 { 29 struct ring_buffer_event *event; 30 struct trace_entry *entry; 31 unsigned int loops = 0; 32 33 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { 34 entry = ring_buffer_event_data(event); 35 36 /* 37 * The ring buffer is a size of trace_buf_size, if 38 * we loop more than the size, there's something wrong 39 * with the ring buffer. 40 */ 41 if (loops++ > trace_buf_size) { 42 printk(KERN_CONT ".. bad ring buffer "); 43 goto failed; 44 } 45 if (!trace_valid_entry(entry)) { 46 printk(KERN_CONT ".. invalid entry %d ", 47 entry->type); 48 goto failed; 49 } 50 } 51 return 0; 52 53 failed: 54 /* disable tracing */ 55 tracing_disabled = 1; 56 printk(KERN_CONT ".. corrupted trace buffer .. "); 57 return -1; 58 } 59 60 /* 61 * Test the trace buffer to see if all the elements 62 * are still sane. 63 */ 64 static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count) 65 { 66 unsigned long flags, cnt = 0; 67 int cpu, ret = 0; 68 69 /* Don't allow flipping of max traces now */ 70 local_irq_save(flags); 71 arch_spin_lock(&buf->tr->max_lock); 72 73 cnt = ring_buffer_entries(buf->buffer); 74 75 /* 76 * The trace_test_buffer_cpu runs a while loop to consume all data. 77 * If the calling tracer is broken, and is constantly filling 78 * the buffer, this will run forever, and hard lock the box. 79 * We disable the ring buffer while we do this test to prevent 80 * a hard lock up. 81 */ 82 tracing_off(); 83 for_each_possible_cpu(cpu) { 84 ret = trace_test_buffer_cpu(buf, cpu); 85 if (ret) 86 break; 87 } 88 tracing_on(); 89 arch_spin_unlock(&buf->tr->max_lock); 90 local_irq_restore(flags); 91 92 if (count) 93 *count = cnt; 94 95 return ret; 96 } 97 98 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) 99 { 100 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", 101 trace->name, init_ret); 102 } 103 #ifdef CONFIG_FUNCTION_TRACER 104 105 #ifdef CONFIG_DYNAMIC_FTRACE 106 107 static int trace_selftest_test_probe1_cnt; 108 static void trace_selftest_test_probe1_func(unsigned long ip, 109 unsigned long pip, 110 struct ftrace_ops *op, 111 struct ftrace_regs *fregs) 112 { 113 trace_selftest_test_probe1_cnt++; 114 } 115 116 static int trace_selftest_test_probe2_cnt; 117 static void trace_selftest_test_probe2_func(unsigned long ip, 118 unsigned long pip, 119 struct ftrace_ops *op, 120 struct ftrace_regs *fregs) 121 { 122 trace_selftest_test_probe2_cnt++; 123 } 124 125 static int trace_selftest_test_probe3_cnt; 126 static void trace_selftest_test_probe3_func(unsigned long ip, 127 unsigned long pip, 128 struct ftrace_ops *op, 129 struct ftrace_regs *fregs) 130 { 131 trace_selftest_test_probe3_cnt++; 132 } 133 134 static int trace_selftest_test_global_cnt; 135 static void trace_selftest_test_global_func(unsigned long ip, 136 unsigned long pip, 137 struct ftrace_ops *op, 138 struct ftrace_regs *fregs) 139 { 140 trace_selftest_test_global_cnt++; 141 } 142 143 static int trace_selftest_test_dyn_cnt; 144 static void trace_selftest_test_dyn_func(unsigned long ip, 145 unsigned long pip, 146 struct ftrace_ops *op, 147 struct ftrace_regs *fregs) 148 { 149 trace_selftest_test_dyn_cnt++; 150 } 151 152 static struct ftrace_ops test_probe1 = { 153 .func = trace_selftest_test_probe1_func, 154 }; 155 156 static struct ftrace_ops test_probe2 = { 157 .func = trace_selftest_test_probe2_func, 158 }; 159 160 static struct ftrace_ops test_probe3 = { 161 .func = trace_selftest_test_probe3_func, 162 }; 163 164 static void print_counts(void) 165 { 166 printk("(%d %d %d %d %d) ", 167 trace_selftest_test_probe1_cnt, 168 trace_selftest_test_probe2_cnt, 169 trace_selftest_test_probe3_cnt, 170 trace_selftest_test_global_cnt, 171 trace_selftest_test_dyn_cnt); 172 } 173 174 static void reset_counts(void) 175 { 176 trace_selftest_test_probe1_cnt = 0; 177 trace_selftest_test_probe2_cnt = 0; 178 trace_selftest_test_probe3_cnt = 0; 179 trace_selftest_test_global_cnt = 0; 180 trace_selftest_test_dyn_cnt = 0; 181 } 182 183 static int trace_selftest_ops(struct trace_array *tr, int cnt) 184 { 185 int save_ftrace_enabled = ftrace_enabled; 186 struct ftrace_ops *dyn_ops; 187 char *func1_name; 188 char *func2_name; 189 int len1; 190 int len2; 191 int ret = -1; 192 193 printk(KERN_CONT "PASSED\n"); 194 pr_info("Testing dynamic ftrace ops #%d: ", cnt); 195 196 ftrace_enabled = 1; 197 reset_counts(); 198 199 /* Handle PPC64 '.' name */ 200 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 201 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); 202 len1 = strlen(func1_name); 203 len2 = strlen(func2_name); 204 205 /* 206 * Probe 1 will trace function 1. 207 * Probe 2 will trace function 2. 208 * Probe 3 will trace functions 1 and 2. 209 */ 210 ftrace_set_filter(&test_probe1, func1_name, len1, 1); 211 ftrace_set_filter(&test_probe2, func2_name, len2, 1); 212 ftrace_set_filter(&test_probe3, func1_name, len1, 1); 213 ftrace_set_filter(&test_probe3, func2_name, len2, 0); 214 215 register_ftrace_function(&test_probe1); 216 register_ftrace_function(&test_probe2); 217 register_ftrace_function(&test_probe3); 218 /* First time we are running with main function */ 219 if (cnt > 1) { 220 ftrace_init_array_ops(tr, trace_selftest_test_global_func); 221 register_ftrace_function(tr->ops); 222 } 223 224 DYN_FTRACE_TEST_NAME(); 225 226 print_counts(); 227 228 if (trace_selftest_test_probe1_cnt != 1) 229 goto out; 230 if (trace_selftest_test_probe2_cnt != 0) 231 goto out; 232 if (trace_selftest_test_probe3_cnt != 1) 233 goto out; 234 if (cnt > 1) { 235 if (trace_selftest_test_global_cnt == 0) 236 goto out; 237 } 238 239 DYN_FTRACE_TEST_NAME2(); 240 241 print_counts(); 242 243 if (trace_selftest_test_probe1_cnt != 1) 244 goto out; 245 if (trace_selftest_test_probe2_cnt != 1) 246 goto out; 247 if (trace_selftest_test_probe3_cnt != 2) 248 goto out; 249 250 /* Add a dynamic probe */ 251 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); 252 if (!dyn_ops) { 253 printk("MEMORY ERROR "); 254 goto out; 255 } 256 257 dyn_ops->func = trace_selftest_test_dyn_func; 258 259 register_ftrace_function(dyn_ops); 260 261 trace_selftest_test_global_cnt = 0; 262 263 DYN_FTRACE_TEST_NAME(); 264 265 print_counts(); 266 267 if (trace_selftest_test_probe1_cnt != 2) 268 goto out_free; 269 if (trace_selftest_test_probe2_cnt != 1) 270 goto out_free; 271 if (trace_selftest_test_probe3_cnt != 3) 272 goto out_free; 273 if (cnt > 1) { 274 if (trace_selftest_test_global_cnt == 0) 275 goto out_free; 276 } 277 if (trace_selftest_test_dyn_cnt == 0) 278 goto out_free; 279 280 DYN_FTRACE_TEST_NAME2(); 281 282 print_counts(); 283 284 if (trace_selftest_test_probe1_cnt != 2) 285 goto out_free; 286 if (trace_selftest_test_probe2_cnt != 2) 287 goto out_free; 288 if (trace_selftest_test_probe3_cnt != 4) 289 goto out_free; 290 291 /* Remove trace function from probe 3 */ 292 func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME); 293 len1 = strlen(func1_name); 294 295 ftrace_set_filter(&test_probe3, func1_name, len1, 0); 296 297 DYN_FTRACE_TEST_NAME(); 298 299 print_counts(); 300 301 if (trace_selftest_test_probe1_cnt != 3) 302 goto out_free; 303 if (trace_selftest_test_probe2_cnt != 2) 304 goto out_free; 305 if (trace_selftest_test_probe3_cnt != 4) 306 goto out_free; 307 if (cnt > 1) { 308 if (trace_selftest_test_global_cnt == 0) 309 goto out_free; 310 } 311 if (trace_selftest_test_dyn_cnt == 0) 312 goto out_free; 313 314 DYN_FTRACE_TEST_NAME2(); 315 316 print_counts(); 317 318 if (trace_selftest_test_probe1_cnt != 3) 319 goto out_free; 320 if (trace_selftest_test_probe2_cnt != 3) 321 goto out_free; 322 if (trace_selftest_test_probe3_cnt != 5) 323 goto out_free; 324 325 ret = 0; 326 out_free: 327 unregister_ftrace_function(dyn_ops); 328 kfree(dyn_ops); 329 330 out: 331 /* Purposely unregister in the same order */ 332 unregister_ftrace_function(&test_probe1); 333 unregister_ftrace_function(&test_probe2); 334 unregister_ftrace_function(&test_probe3); 335 if (cnt > 1) 336 unregister_ftrace_function(tr->ops); 337 ftrace_reset_array_ops(tr); 338 339 /* Make sure everything is off */ 340 reset_counts(); 341 DYN_FTRACE_TEST_NAME(); 342 DYN_FTRACE_TEST_NAME(); 343 344 if (trace_selftest_test_probe1_cnt || 345 trace_selftest_test_probe2_cnt || 346 trace_selftest_test_probe3_cnt || 347 trace_selftest_test_global_cnt || 348 trace_selftest_test_dyn_cnt) 349 ret = -1; 350 351 ftrace_enabled = save_ftrace_enabled; 352 353 return ret; 354 } 355 356 /* Test dynamic code modification and ftrace filters */ 357 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, 358 struct trace_array *tr, 359 int (*func)(void)) 360 { 361 int save_ftrace_enabled = ftrace_enabled; 362 unsigned long count; 363 char *func_name; 364 int ret; 365 366 /* The ftrace test PASSED */ 367 printk(KERN_CONT "PASSED\n"); 368 pr_info("Testing dynamic ftrace: "); 369 370 /* enable tracing, and record the filter function */ 371 ftrace_enabled = 1; 372 373 /* passed in by parameter to fool gcc from optimizing */ 374 func(); 375 376 /* 377 * Some archs *cough*PowerPC*cough* add characters to the 378 * start of the function names. We simply put a '*' to 379 * accommodate them. 380 */ 381 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 382 383 /* filter only on our function */ 384 ftrace_set_global_filter(func_name, strlen(func_name), 1); 385 386 /* enable tracing */ 387 ret = tracer_init(trace, tr); 388 if (ret) { 389 warn_failed_init_tracer(trace, ret); 390 goto out; 391 } 392 393 /* Sleep for a 1/10 of a second */ 394 msleep(100); 395 396 /* we should have nothing in the buffer */ 397 ret = trace_test_buffer(&tr->array_buffer, &count); 398 if (ret) 399 goto out; 400 401 if (count) { 402 ret = -1; 403 printk(KERN_CONT ".. filter did not filter .. "); 404 goto out; 405 } 406 407 /* call our function again */ 408 func(); 409 410 /* sleep again */ 411 msleep(100); 412 413 /* stop the tracing. */ 414 tracing_stop(); 415 ftrace_enabled = 0; 416 417 /* check the trace buffer */ 418 ret = trace_test_buffer(&tr->array_buffer, &count); 419 420 ftrace_enabled = 1; 421 tracing_start(); 422 423 /* we should only have one item */ 424 if (!ret && count != 1) { 425 trace->reset(tr); 426 printk(KERN_CONT ".. filter failed count=%ld ..", count); 427 ret = -1; 428 goto out; 429 } 430 431 /* Test the ops with global tracing running */ 432 ret = trace_selftest_ops(tr, 1); 433 trace->reset(tr); 434 435 out: 436 ftrace_enabled = save_ftrace_enabled; 437 438 /* Enable tracing on all functions again */ 439 ftrace_set_global_filter(NULL, 0, 1); 440 441 /* Test the ops with global tracing off */ 442 if (!ret) 443 ret = trace_selftest_ops(tr, 2); 444 445 return ret; 446 } 447 448 static int trace_selftest_recursion_cnt; 449 static void trace_selftest_test_recursion_func(unsigned long ip, 450 unsigned long pip, 451 struct ftrace_ops *op, 452 struct ftrace_regs *fregs) 453 { 454 /* 455 * This function is registered without the recursion safe flag. 456 * The ftrace infrastructure should provide the recursion 457 * protection. If not, this will crash the kernel! 458 */ 459 if (trace_selftest_recursion_cnt++ > 10) 460 return; 461 DYN_FTRACE_TEST_NAME(); 462 } 463 464 static void trace_selftest_test_recursion_safe_func(unsigned long ip, 465 unsigned long pip, 466 struct ftrace_ops *op, 467 struct ftrace_regs *fregs) 468 { 469 /* 470 * We said we would provide our own recursion. By calling 471 * this function again, we should recurse back into this function 472 * and count again. But this only happens if the arch supports 473 * all of ftrace features and nothing else is using the function 474 * tracing utility. 475 */ 476 if (trace_selftest_recursion_cnt++) 477 return; 478 DYN_FTRACE_TEST_NAME(); 479 } 480 481 static struct ftrace_ops test_rec_probe = { 482 .func = trace_selftest_test_recursion_func, 483 .flags = FTRACE_OPS_FL_RECURSION, 484 }; 485 486 static struct ftrace_ops test_recsafe_probe = { 487 .func = trace_selftest_test_recursion_safe_func, 488 }; 489 490 static int 491 trace_selftest_function_recursion(void) 492 { 493 int save_ftrace_enabled = ftrace_enabled; 494 char *func_name; 495 int len; 496 int ret; 497 498 /* The previous test PASSED */ 499 pr_cont("PASSED\n"); 500 pr_info("Testing ftrace recursion: "); 501 502 503 /* enable tracing, and record the filter function */ 504 ftrace_enabled = 1; 505 506 /* Handle PPC64 '.' name */ 507 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 508 len = strlen(func_name); 509 510 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); 511 if (ret) { 512 pr_cont("*Could not set filter* "); 513 goto out; 514 } 515 516 ret = register_ftrace_function(&test_rec_probe); 517 if (ret) { 518 pr_cont("*could not register callback* "); 519 goto out; 520 } 521 522 DYN_FTRACE_TEST_NAME(); 523 524 unregister_ftrace_function(&test_rec_probe); 525 526 ret = -1; 527 /* 528 * Recursion allows for transitions between context, 529 * and may call the callback twice. 530 */ 531 if (trace_selftest_recursion_cnt != 1 && 532 trace_selftest_recursion_cnt != 2) { 533 pr_cont("*callback not called once (or twice) (%d)* ", 534 trace_selftest_recursion_cnt); 535 goto out; 536 } 537 538 trace_selftest_recursion_cnt = 1; 539 540 pr_cont("PASSED\n"); 541 pr_info("Testing ftrace recursion safe: "); 542 543 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); 544 if (ret) { 545 pr_cont("*Could not set filter* "); 546 goto out; 547 } 548 549 ret = register_ftrace_function(&test_recsafe_probe); 550 if (ret) { 551 pr_cont("*could not register callback* "); 552 goto out; 553 } 554 555 DYN_FTRACE_TEST_NAME(); 556 557 unregister_ftrace_function(&test_recsafe_probe); 558 559 ret = -1; 560 if (trace_selftest_recursion_cnt != 2) { 561 pr_cont("*callback not called expected 2 times (%d)* ", 562 trace_selftest_recursion_cnt); 563 goto out; 564 } 565 566 ret = 0; 567 out: 568 ftrace_enabled = save_ftrace_enabled; 569 570 return ret; 571 } 572 #else 573 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) 574 # define trace_selftest_function_recursion() ({ 0; }) 575 #endif /* CONFIG_DYNAMIC_FTRACE */ 576 577 static enum { 578 TRACE_SELFTEST_REGS_START, 579 TRACE_SELFTEST_REGS_FOUND, 580 TRACE_SELFTEST_REGS_NOT_FOUND, 581 } trace_selftest_regs_stat; 582 583 static void trace_selftest_test_regs_func(unsigned long ip, 584 unsigned long pip, 585 struct ftrace_ops *op, 586 struct ftrace_regs *fregs) 587 { 588 struct pt_regs *regs = ftrace_get_regs(fregs); 589 590 if (regs) 591 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; 592 else 593 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; 594 } 595 596 static struct ftrace_ops test_regs_probe = { 597 .func = trace_selftest_test_regs_func, 598 .flags = FTRACE_OPS_FL_SAVE_REGS, 599 }; 600 601 static int 602 trace_selftest_function_regs(void) 603 { 604 int save_ftrace_enabled = ftrace_enabled; 605 char *func_name; 606 int len; 607 int ret; 608 int supported = 0; 609 610 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 611 supported = 1; 612 #endif 613 614 /* The previous test PASSED */ 615 pr_cont("PASSED\n"); 616 pr_info("Testing ftrace regs%s: ", 617 !supported ? "(no arch support)" : ""); 618 619 /* enable tracing, and record the filter function */ 620 ftrace_enabled = 1; 621 622 /* Handle PPC64 '.' name */ 623 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 624 len = strlen(func_name); 625 626 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); 627 /* 628 * If DYNAMIC_FTRACE is not set, then we just trace all functions. 629 * This test really doesn't care. 630 */ 631 if (ret && ret != -ENODEV) { 632 pr_cont("*Could not set filter* "); 633 goto out; 634 } 635 636 ret = register_ftrace_function(&test_regs_probe); 637 /* 638 * Now if the arch does not support passing regs, then this should 639 * have failed. 640 */ 641 if (!supported) { 642 if (!ret) { 643 pr_cont("*registered save-regs without arch support* "); 644 goto out; 645 } 646 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; 647 ret = register_ftrace_function(&test_regs_probe); 648 } 649 if (ret) { 650 pr_cont("*could not register callback* "); 651 goto out; 652 } 653 654 655 DYN_FTRACE_TEST_NAME(); 656 657 unregister_ftrace_function(&test_regs_probe); 658 659 ret = -1; 660 661 switch (trace_selftest_regs_stat) { 662 case TRACE_SELFTEST_REGS_START: 663 pr_cont("*callback never called* "); 664 goto out; 665 666 case TRACE_SELFTEST_REGS_FOUND: 667 if (supported) 668 break; 669 pr_cont("*callback received regs without arch support* "); 670 goto out; 671 672 case TRACE_SELFTEST_REGS_NOT_FOUND: 673 if (!supported) 674 break; 675 pr_cont("*callback received NULL regs* "); 676 goto out; 677 } 678 679 ret = 0; 680 out: 681 ftrace_enabled = save_ftrace_enabled; 682 683 return ret; 684 } 685 686 /* 687 * Simple verification test of ftrace function tracer. 688 * Enable ftrace, sleep 1/10 second, and then read the trace 689 * buffer to see if all is in order. 690 */ 691 __init int 692 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 693 { 694 int save_ftrace_enabled = ftrace_enabled; 695 unsigned long count; 696 int ret; 697 698 #ifdef CONFIG_DYNAMIC_FTRACE 699 if (ftrace_filter_param) { 700 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 701 return 0; 702 } 703 #endif 704 705 /* make sure msleep has been recorded */ 706 msleep(1); 707 708 /* start the tracing */ 709 ftrace_enabled = 1; 710 711 ret = tracer_init(trace, tr); 712 if (ret) { 713 warn_failed_init_tracer(trace, ret); 714 goto out; 715 } 716 717 /* Sleep for a 1/10 of a second */ 718 msleep(100); 719 /* stop the tracing. */ 720 tracing_stop(); 721 ftrace_enabled = 0; 722 723 /* check the trace buffer */ 724 ret = trace_test_buffer(&tr->array_buffer, &count); 725 726 ftrace_enabled = 1; 727 trace->reset(tr); 728 tracing_start(); 729 730 if (!ret && !count) { 731 printk(KERN_CONT ".. no entries found .."); 732 ret = -1; 733 goto out; 734 } 735 736 ret = trace_selftest_startup_dynamic_tracing(trace, tr, 737 DYN_FTRACE_TEST_NAME); 738 if (ret) 739 goto out; 740 741 ret = trace_selftest_function_recursion(); 742 if (ret) 743 goto out; 744 745 ret = trace_selftest_function_regs(); 746 out: 747 ftrace_enabled = save_ftrace_enabled; 748 749 /* kill ftrace totally if we failed */ 750 if (ret) 751 ftrace_kill(); 752 753 return ret; 754 } 755 #endif /* CONFIG_FUNCTION_TRACER */ 756 757 758 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 759 760 #ifdef CONFIG_DYNAMIC_FTRACE 761 762 #define CHAR_NUMBER 123 763 #define SHORT_NUMBER 12345 764 #define WORD_NUMBER 1234567890 765 #define LONG_NUMBER 1234567890123456789LL 766 #define ERRSTR_BUFLEN 128 767 768 struct fgraph_fixture { 769 struct fgraph_ops gops; 770 int store_size; 771 const char *store_type_name; 772 char error_str_buf[ERRSTR_BUFLEN]; 773 char *error_str; 774 }; 775 776 static __init int store_entry(struct ftrace_graph_ent *trace, 777 struct fgraph_ops *gops) 778 { 779 struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops); 780 const char *type = fixture->store_type_name; 781 int size = fixture->store_size; 782 void *p; 783 784 p = fgraph_reserve_data(gops->idx, size); 785 if (!p) { 786 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, 787 "Failed to reserve %s\n", type); 788 return 0; 789 } 790 791 switch (size) { 792 case 1: 793 *(char *)p = CHAR_NUMBER; 794 break; 795 case 2: 796 *(short *)p = SHORT_NUMBER; 797 break; 798 case 4: 799 *(int *)p = WORD_NUMBER; 800 break; 801 case 8: 802 *(long long *)p = LONG_NUMBER; 803 break; 804 } 805 806 return 1; 807 } 808 809 static __init void store_return(struct ftrace_graph_ret *trace, 810 struct fgraph_ops *gops) 811 { 812 struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops); 813 const char *type = fixture->store_type_name; 814 long long expect = 0; 815 long long found = -1; 816 int size; 817 char *p; 818 819 p = fgraph_retrieve_data(gops->idx, &size); 820 if (!p) { 821 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, 822 "Failed to retrieve %s\n", type); 823 return; 824 } 825 if (fixture->store_size > size) { 826 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, 827 "Retrieved size %d is smaller than expected %d\n", 828 size, (int)fixture->store_size); 829 return; 830 } 831 832 switch (fixture->store_size) { 833 case 1: 834 expect = CHAR_NUMBER; 835 found = *(char *)p; 836 break; 837 case 2: 838 expect = SHORT_NUMBER; 839 found = *(short *)p; 840 break; 841 case 4: 842 expect = WORD_NUMBER; 843 found = *(int *)p; 844 break; 845 case 8: 846 expect = LONG_NUMBER; 847 found = *(long long *)p; 848 break; 849 } 850 851 if (found != expect) { 852 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, 853 "%s returned not %lld but %lld\n", type, expect, found); 854 return; 855 } 856 fixture->error_str = NULL; 857 } 858 859 static int __init init_fgraph_fixture(struct fgraph_fixture *fixture) 860 { 861 char *func_name; 862 int len; 863 864 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, 865 "Failed to execute storage %s\n", fixture->store_type_name); 866 fixture->error_str = fixture->error_str_buf; 867 868 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 869 len = strlen(func_name); 870 871 return ftrace_set_filter(&fixture->gops.ops, func_name, len, 1); 872 } 873 874 /* Test fgraph storage for each size */ 875 static int __init test_graph_storage_single(struct fgraph_fixture *fixture) 876 { 877 int size = fixture->store_size; 878 int ret; 879 880 pr_cont("PASSED\n"); 881 pr_info("Testing fgraph storage of %d byte%s: ", size, str_plural(size)); 882 883 ret = init_fgraph_fixture(fixture); 884 if (ret && ret != -ENODEV) { 885 pr_cont("*Could not set filter* "); 886 return -1; 887 } 888 889 ret = register_ftrace_graph(&fixture->gops); 890 if (ret) { 891 pr_warn("Failed to init store_bytes fgraph tracing\n"); 892 return -1; 893 } 894 895 DYN_FTRACE_TEST_NAME(); 896 897 unregister_ftrace_graph(&fixture->gops); 898 899 if (fixture->error_str) { 900 pr_cont("*** %s ***", fixture->error_str); 901 return -1; 902 } 903 904 return 0; 905 } 906 907 static struct fgraph_fixture store_bytes[4] __initdata = { 908 [0] = { 909 .gops = { 910 .entryfunc = store_entry, 911 .retfunc = store_return, 912 }, 913 .store_size = 1, 914 .store_type_name = "byte", 915 }, 916 [1] = { 917 .gops = { 918 .entryfunc = store_entry, 919 .retfunc = store_return, 920 }, 921 .store_size = 2, 922 .store_type_name = "short", 923 }, 924 [2] = { 925 .gops = { 926 .entryfunc = store_entry, 927 .retfunc = store_return, 928 }, 929 .store_size = 4, 930 .store_type_name = "word", 931 }, 932 [3] = { 933 .gops = { 934 .entryfunc = store_entry, 935 .retfunc = store_return, 936 }, 937 .store_size = 8, 938 .store_type_name = "long long", 939 }, 940 }; 941 942 static __init int test_graph_storage_multi(void) 943 { 944 struct fgraph_fixture *fixture; 945 bool printed = false; 946 int i, j, ret; 947 948 pr_cont("PASSED\n"); 949 pr_info("Testing multiple fgraph storage on a function: "); 950 951 for (i = 0; i < ARRAY_SIZE(store_bytes); i++) { 952 fixture = &store_bytes[i]; 953 ret = init_fgraph_fixture(fixture); 954 if (ret && ret != -ENODEV) { 955 pr_cont("*Could not set filter* "); 956 printed = true; 957 goto out2; 958 } 959 } 960 961 for (j = 0; j < ARRAY_SIZE(store_bytes); j++) { 962 fixture = &store_bytes[j]; 963 ret = register_ftrace_graph(&fixture->gops); 964 if (ret) { 965 pr_warn("Failed to init store_bytes fgraph tracing\n"); 966 printed = true; 967 goto out1; 968 } 969 } 970 971 DYN_FTRACE_TEST_NAME(); 972 out1: 973 while (--j >= 0) { 974 fixture = &store_bytes[j]; 975 unregister_ftrace_graph(&fixture->gops); 976 977 if (fixture->error_str && !printed) { 978 pr_cont("*** %s ***", fixture->error_str); 979 printed = true; 980 } 981 } 982 out2: 983 while (--i >= 0) { 984 fixture = &store_bytes[i]; 985 ftrace_free_filter(&fixture->gops.ops); 986 987 if (fixture->error_str && !printed) { 988 pr_cont("*** %s ***", fixture->error_str); 989 printed = true; 990 } 991 } 992 return printed ? -1 : 0; 993 } 994 995 /* Test the storage passed across function_graph entry and return */ 996 static __init int test_graph_storage(void) 997 { 998 int ret; 999 1000 ret = test_graph_storage_single(&store_bytes[0]); 1001 if (ret) 1002 return ret; 1003 ret = test_graph_storage_single(&store_bytes[1]); 1004 if (ret) 1005 return ret; 1006 ret = test_graph_storage_single(&store_bytes[2]); 1007 if (ret) 1008 return ret; 1009 ret = test_graph_storage_single(&store_bytes[3]); 1010 if (ret) 1011 return ret; 1012 ret = test_graph_storage_multi(); 1013 if (ret) 1014 return ret; 1015 return 0; 1016 } 1017 #else 1018 static inline int test_graph_storage(void) { return 0; } 1019 #endif /* CONFIG_DYNAMIC_FTRACE */ 1020 1021 /* Maximum number of functions to trace before diagnosing a hang */ 1022 #define GRAPH_MAX_FUNC_TEST 100000000 1023 1024 static unsigned int graph_hang_thresh; 1025 1026 /* Wrap the real function entry probe to avoid possible hanging */ 1027 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace, 1028 struct fgraph_ops *gops) 1029 { 1030 /* This is harmlessly racy, we want to approximately detect a hang */ 1031 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { 1032 ftrace_graph_stop(); 1033 printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); 1034 if (ftrace_dump_on_oops_enabled()) { 1035 ftrace_dump(DUMP_ALL); 1036 /* ftrace_dump() disables tracing */ 1037 tracing_on(); 1038 } 1039 return 0; 1040 } 1041 1042 return trace_graph_entry(trace, gops); 1043 } 1044 1045 static struct fgraph_ops fgraph_ops __initdata = { 1046 .entryfunc = &trace_graph_entry_watchdog, 1047 .retfunc = &trace_graph_return, 1048 }; 1049 1050 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 1051 static struct ftrace_ops direct; 1052 #endif 1053 1054 /* 1055 * Pretty much the same than for the function tracer from which the selftest 1056 * has been borrowed. 1057 */ 1058 __init int 1059 trace_selftest_startup_function_graph(struct tracer *trace, 1060 struct trace_array *tr) 1061 { 1062 int ret; 1063 unsigned long count; 1064 char *func_name __maybe_unused; 1065 1066 #ifdef CONFIG_DYNAMIC_FTRACE 1067 if (ftrace_filter_param) { 1068 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 1069 return 0; 1070 } 1071 #endif 1072 1073 /* 1074 * Simulate the init() callback but we attach a watchdog callback 1075 * to detect and recover from possible hangs 1076 */ 1077 tracing_reset_online_cpus(&tr->array_buffer); 1078 fgraph_ops.private = tr; 1079 ret = register_ftrace_graph(&fgraph_ops); 1080 if (ret) { 1081 warn_failed_init_tracer(trace, ret); 1082 goto out; 1083 } 1084 tracing_start_cmdline_record(); 1085 1086 /* Sleep for a 1/10 of a second */ 1087 msleep(100); 1088 1089 /* Have we just recovered from a hang? */ 1090 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { 1091 disable_tracing_selftest("recovering from a hang"); 1092 ret = -1; 1093 goto out; 1094 } 1095 1096 tracing_stop(); 1097 1098 /* check the trace buffer */ 1099 ret = trace_test_buffer(&tr->array_buffer, &count); 1100 1101 /* Need to also simulate the tr->reset to remove this fgraph_ops */ 1102 tracing_stop_cmdline_record(); 1103 unregister_ftrace_graph(&fgraph_ops); 1104 1105 tracing_start(); 1106 1107 if (!ret && !count) { 1108 printk(KERN_CONT ".. no entries found .."); 1109 ret = -1; 1110 goto out; 1111 } 1112 1113 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 1114 /* 1115 * These tests can take some time to run. Make sure on non PREEMPT 1116 * kernels, we do not trigger the softlockup detector. 1117 */ 1118 cond_resched(); 1119 1120 tracing_reset_online_cpus(&tr->array_buffer); 1121 fgraph_ops.private = tr; 1122 1123 /* 1124 * Some archs *cough*PowerPC*cough* add characters to the 1125 * start of the function names. We simply put a '*' to 1126 * accommodate them. 1127 */ 1128 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 1129 ftrace_set_global_filter(func_name, strlen(func_name), 1); 1130 1131 /* 1132 * Register direct function together with graph tracer 1133 * and make sure we get graph trace. 1134 */ 1135 ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0); 1136 ret = register_ftrace_direct(&direct, 1137 (unsigned long)ftrace_stub_direct_tramp); 1138 if (ret) 1139 goto out; 1140 1141 cond_resched(); 1142 1143 ret = register_ftrace_graph(&fgraph_ops); 1144 if (ret) { 1145 warn_failed_init_tracer(trace, ret); 1146 goto out; 1147 } 1148 1149 DYN_FTRACE_TEST_NAME(); 1150 1151 count = 0; 1152 1153 tracing_stop(); 1154 /* check the trace buffer */ 1155 ret = trace_test_buffer(&tr->array_buffer, &count); 1156 1157 unregister_ftrace_graph(&fgraph_ops); 1158 1159 ret = unregister_ftrace_direct(&direct, 1160 (unsigned long)ftrace_stub_direct_tramp, 1161 true); 1162 if (ret) 1163 goto out; 1164 1165 cond_resched(); 1166 1167 tracing_start(); 1168 1169 if (!ret && !count) { 1170 ret = -1; 1171 goto out; 1172 } 1173 1174 /* Enable tracing on all functions again */ 1175 ftrace_set_global_filter(NULL, 0, 1); 1176 #endif 1177 1178 ret = test_graph_storage(); 1179 1180 /* Don't test dynamic tracing, the function tracer already did */ 1181 out: 1182 /* Stop it if we failed */ 1183 if (ret) 1184 ftrace_graph_stop(); 1185 1186 return ret; 1187 } 1188 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1189 1190 1191 #ifdef CONFIG_IRQSOFF_TRACER 1192 int 1193 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) 1194 { 1195 unsigned long save_max = tr->max_latency; 1196 unsigned long count; 1197 int ret; 1198 1199 /* start the tracing */ 1200 ret = tracer_init(trace, tr); 1201 if (ret) { 1202 warn_failed_init_tracer(trace, ret); 1203 return ret; 1204 } 1205 1206 /* reset the max latency */ 1207 tr->max_latency = 0; 1208 /* disable interrupts for a bit */ 1209 local_irq_disable(); 1210 udelay(100); 1211 local_irq_enable(); 1212 1213 /* 1214 * Stop the tracer to avoid a warning subsequent 1215 * to buffer flipping failure because tracing_stop() 1216 * disables the tr and max buffers, making flipping impossible 1217 * in case of parallels max irqs off latencies. 1218 */ 1219 trace->stop(tr); 1220 /* stop the tracing. */ 1221 tracing_stop(); 1222 /* check both trace buffers */ 1223 ret = trace_test_buffer(&tr->array_buffer, NULL); 1224 if (!ret) 1225 ret = trace_test_buffer(&tr->max_buffer, &count); 1226 trace->reset(tr); 1227 tracing_start(); 1228 1229 if (!ret && !count) { 1230 printk(KERN_CONT ".. no entries found .."); 1231 ret = -1; 1232 } 1233 1234 tr->max_latency = save_max; 1235 1236 return ret; 1237 } 1238 #endif /* CONFIG_IRQSOFF_TRACER */ 1239 1240 #ifdef CONFIG_PREEMPT_TRACER 1241 int 1242 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) 1243 { 1244 unsigned long save_max = tr->max_latency; 1245 unsigned long count; 1246 int ret; 1247 1248 /* 1249 * Now that the big kernel lock is no longer preemptible, 1250 * and this is called with the BKL held, it will always 1251 * fail. If preemption is already disabled, simply 1252 * pass the test. When the BKL is removed, or becomes 1253 * preemptible again, we will once again test this, 1254 * so keep it in. 1255 */ 1256 if (preempt_count()) { 1257 printk(KERN_CONT "can not test ... force "); 1258 return 0; 1259 } 1260 1261 /* start the tracing */ 1262 ret = tracer_init(trace, tr); 1263 if (ret) { 1264 warn_failed_init_tracer(trace, ret); 1265 return ret; 1266 } 1267 1268 /* reset the max latency */ 1269 tr->max_latency = 0; 1270 /* disable preemption for a bit */ 1271 preempt_disable(); 1272 udelay(100); 1273 preempt_enable(); 1274 1275 /* 1276 * Stop the tracer to avoid a warning subsequent 1277 * to buffer flipping failure because tracing_stop() 1278 * disables the tr and max buffers, making flipping impossible 1279 * in case of parallels max preempt off latencies. 1280 */ 1281 trace->stop(tr); 1282 /* stop the tracing. */ 1283 tracing_stop(); 1284 /* check both trace buffers */ 1285 ret = trace_test_buffer(&tr->array_buffer, NULL); 1286 if (!ret) 1287 ret = trace_test_buffer(&tr->max_buffer, &count); 1288 trace->reset(tr); 1289 tracing_start(); 1290 1291 if (!ret && !count) { 1292 printk(KERN_CONT ".. no entries found .."); 1293 ret = -1; 1294 } 1295 1296 tr->max_latency = save_max; 1297 1298 return ret; 1299 } 1300 #endif /* CONFIG_PREEMPT_TRACER */ 1301 1302 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) 1303 int 1304 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) 1305 { 1306 unsigned long save_max = tr->max_latency; 1307 unsigned long count; 1308 int ret; 1309 1310 /* 1311 * Now that the big kernel lock is no longer preemptible, 1312 * and this is called with the BKL held, it will always 1313 * fail. If preemption is already disabled, simply 1314 * pass the test. When the BKL is removed, or becomes 1315 * preemptible again, we will once again test this, 1316 * so keep it in. 1317 */ 1318 if (preempt_count()) { 1319 printk(KERN_CONT "can not test ... force "); 1320 return 0; 1321 } 1322 1323 /* start the tracing */ 1324 ret = tracer_init(trace, tr); 1325 if (ret) { 1326 warn_failed_init_tracer(trace, ret); 1327 goto out_no_start; 1328 } 1329 1330 /* reset the max latency */ 1331 tr->max_latency = 0; 1332 1333 /* disable preemption and interrupts for a bit */ 1334 preempt_disable(); 1335 local_irq_disable(); 1336 udelay(100); 1337 preempt_enable(); 1338 /* reverse the order of preempt vs irqs */ 1339 local_irq_enable(); 1340 1341 /* 1342 * Stop the tracer to avoid a warning subsequent 1343 * to buffer flipping failure because tracing_stop() 1344 * disables the tr and max buffers, making flipping impossible 1345 * in case of parallels max irqs/preempt off latencies. 1346 */ 1347 trace->stop(tr); 1348 /* stop the tracing. */ 1349 tracing_stop(); 1350 /* check both trace buffers */ 1351 ret = trace_test_buffer(&tr->array_buffer, NULL); 1352 if (ret) 1353 goto out; 1354 1355 ret = trace_test_buffer(&tr->max_buffer, &count); 1356 if (ret) 1357 goto out; 1358 1359 if (!ret && !count) { 1360 printk(KERN_CONT ".. no entries found .."); 1361 ret = -1; 1362 goto out; 1363 } 1364 1365 /* do the test by disabling interrupts first this time */ 1366 tr->max_latency = 0; 1367 tracing_start(); 1368 trace->start(tr); 1369 1370 preempt_disable(); 1371 local_irq_disable(); 1372 udelay(100); 1373 preempt_enable(); 1374 /* reverse the order of preempt vs irqs */ 1375 local_irq_enable(); 1376 1377 trace->stop(tr); 1378 /* stop the tracing. */ 1379 tracing_stop(); 1380 /* check both trace buffers */ 1381 ret = trace_test_buffer(&tr->array_buffer, NULL); 1382 if (ret) 1383 goto out; 1384 1385 ret = trace_test_buffer(&tr->max_buffer, &count); 1386 1387 if (!ret && !count) { 1388 printk(KERN_CONT ".. no entries found .."); 1389 ret = -1; 1390 goto out; 1391 } 1392 1393 out: 1394 tracing_start(); 1395 out_no_start: 1396 trace->reset(tr); 1397 tr->max_latency = save_max; 1398 1399 return ret; 1400 } 1401 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ 1402 1403 #ifdef CONFIG_NOP_TRACER 1404 int 1405 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) 1406 { 1407 /* What could possibly go wrong? */ 1408 return 0; 1409 } 1410 #endif 1411 1412 #ifdef CONFIG_SCHED_TRACER 1413 1414 struct wakeup_test_data { 1415 struct completion is_ready; 1416 int go; 1417 }; 1418 1419 static int trace_wakeup_test_thread(void *data) 1420 { 1421 /* Make this a -deadline thread */ 1422 static const struct sched_attr attr = { 1423 .sched_policy = SCHED_DEADLINE, 1424 .sched_runtime = 100000ULL, 1425 .sched_deadline = 10000000ULL, 1426 .sched_period = 10000000ULL 1427 }; 1428 struct wakeup_test_data *x = data; 1429 1430 sched_setattr(current, &attr); 1431 1432 /* Make it know we have a new prio */ 1433 complete(&x->is_ready); 1434 1435 /* now go to sleep and let the test wake us up */ 1436 set_current_state(TASK_INTERRUPTIBLE); 1437 while (!x->go) { 1438 schedule(); 1439 set_current_state(TASK_INTERRUPTIBLE); 1440 } 1441 1442 complete(&x->is_ready); 1443 1444 set_current_state(TASK_INTERRUPTIBLE); 1445 1446 /* we are awake, now wait to disappear */ 1447 while (!kthread_should_stop()) { 1448 schedule(); 1449 set_current_state(TASK_INTERRUPTIBLE); 1450 } 1451 1452 __set_current_state(TASK_RUNNING); 1453 1454 return 0; 1455 } 1456 int 1457 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) 1458 { 1459 unsigned long save_max = tr->max_latency; 1460 struct task_struct *p; 1461 struct wakeup_test_data data; 1462 unsigned long count; 1463 int ret; 1464 1465 memset(&data, 0, sizeof(data)); 1466 1467 init_completion(&data.is_ready); 1468 1469 /* create a -deadline thread */ 1470 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test"); 1471 if (IS_ERR(p)) { 1472 printk(KERN_CONT "Failed to create ftrace wakeup test thread "); 1473 return -1; 1474 } 1475 1476 /* make sure the thread is running at -deadline policy */ 1477 wait_for_completion(&data.is_ready); 1478 1479 /* start the tracing */ 1480 ret = tracer_init(trace, tr); 1481 if (ret) { 1482 warn_failed_init_tracer(trace, ret); 1483 return ret; 1484 } 1485 1486 /* reset the max latency */ 1487 tr->max_latency = 0; 1488 1489 while (task_is_runnable(p)) { 1490 /* 1491 * Sleep to make sure the -deadline thread is asleep too. 1492 * On virtual machines we can't rely on timings, 1493 * but we want to make sure this test still works. 1494 */ 1495 msleep(100); 1496 } 1497 1498 init_completion(&data.is_ready); 1499 1500 data.go = 1; 1501 /* memory barrier is in the wake_up_process() */ 1502 1503 wake_up_process(p); 1504 1505 /* Wait for the task to wake up */ 1506 wait_for_completion(&data.is_ready); 1507 1508 /* stop the tracing. */ 1509 tracing_stop(); 1510 /* check both trace buffers */ 1511 ret = trace_test_buffer(&tr->array_buffer, NULL); 1512 if (!ret) 1513 ret = trace_test_buffer(&tr->max_buffer, &count); 1514 1515 1516 trace->reset(tr); 1517 tracing_start(); 1518 1519 tr->max_latency = save_max; 1520 1521 /* kill the thread */ 1522 kthread_stop(p); 1523 1524 if (!ret && !count) { 1525 printk(KERN_CONT ".. no entries found .."); 1526 ret = -1; 1527 } 1528 1529 return ret; 1530 } 1531 #endif /* CONFIG_SCHED_TRACER */ 1532 1533 #ifdef CONFIG_BRANCH_TRACER 1534 int 1535 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) 1536 { 1537 unsigned long count; 1538 int ret; 1539 1540 /* start the tracing */ 1541 ret = tracer_init(trace, tr); 1542 if (ret) { 1543 warn_failed_init_tracer(trace, ret); 1544 return ret; 1545 } 1546 1547 /* Sleep for a 1/10 of a second */ 1548 msleep(100); 1549 /* stop the tracing. */ 1550 tracing_stop(); 1551 /* check the trace buffer */ 1552 ret = trace_test_buffer(&tr->array_buffer, &count); 1553 trace->reset(tr); 1554 tracing_start(); 1555 1556 if (!ret && !count) { 1557 printk(KERN_CONT ".. no entries found .."); 1558 ret = -1; 1559 } 1560 1561 return ret; 1562 } 1563 #endif /* CONFIG_BRANCH_TRACER */ 1564 1565