1# SPDX-License-Identifier: GPL-2.0-only 2# 3# Architectures that offer an FUNCTION_TRACER implementation should 4# select HAVE_FUNCTION_TRACER: 5# 6 7config USER_STACKTRACE_SUPPORT 8 bool 9 10config NOP_TRACER 11 bool 12 13config HAVE_RETHOOK 14 bool 15 16config RETHOOK 17 bool 18 depends on HAVE_RETHOOK 19 help 20 Enable generic return hooking feature. This is an internal 21 API, which will be used by other function-entry hooking 22 features like fprobe and kprobes. 23 24config HAVE_FUNCTION_TRACER 25 bool 26 help 27 See Documentation/trace/ftrace-design.rst 28 29config HAVE_FUNCTION_GRAPH_TRACER 30 bool 31 help 32 See Documentation/trace/ftrace-design.rst 33 34config HAVE_FUNCTION_GRAPH_FREGS 35 bool 36 37config HAVE_FTRACE_GRAPH_FUNC 38 bool 39 help 40 True if ftrace_graph_func() is defined. 41 42config HAVE_DYNAMIC_FTRACE 43 bool 44 help 45 See Documentation/trace/ftrace-design.rst 46 47config HAVE_DYNAMIC_FTRACE_WITH_REGS 48 bool 49 50config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 51 bool 52 53config HAVE_SINGLE_FTRACE_DIRECT_OPS 54 bool 55 56config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 57 bool 58 59config HAVE_EXTRA_IPI_TRACEPOINTS 60 bool 61 help 62 For architectures that use ipi_raise, ipi_entry and ipi_exit 63 tracepoints. 64 65config HAVE_DYNAMIC_FTRACE_WITH_ARGS 66 bool 67 help 68 If this is set, then arguments and stack can be found from 69 the ftrace_regs passed into the function callback regs parameter 70 by default, even without setting the REGS flag in the ftrace_ops. 71 This allows for use of ftrace_regs_get_argument() and 72 ftrace_regs_get_stack_pointer(). 73 74config HAVE_FTRACE_REGS_HAVING_PT_REGS 75 bool 76 help 77 If this is set, ftrace_regs has pt_regs, thus it can convert to 78 pt_regs without allocating memory. 79 80config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE 81 bool 82 help 83 If the architecture generates __patchable_function_entries sections 84 but does not want them included in the ftrace locations. 85 86config HAVE_DYNAMIC_FTRACE_WITH_JMP 87 bool 88 help 89 If the architecture supports to replace the __fentry__ with a 90 "jmp" instruction. 91 92config HAVE_SYSCALL_TRACEPOINTS 93 bool 94 help 95 See Documentation/trace/ftrace-design.rst 96 97config HAVE_FENTRY 98 bool 99 help 100 Arch supports the gcc options -pg with -mfentry 101 102config HAVE_NOP_MCOUNT 103 bool 104 help 105 Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount 106 107config HAVE_OBJTOOL_MCOUNT 108 bool 109 help 110 Arch supports objtool --mcount 111 112config HAVE_OBJTOOL_NOP_MCOUNT 113 bool 114 help 115 Arch supports the objtool options --mcount with --mnop. 116 An architecture can select this if it wants to enable nop'ing 117 of ftrace locations. 118 119config HAVE_C_RECORDMCOUNT 120 bool 121 help 122 C version of recordmcount available? 123 124config HAVE_BUILDTIME_MCOUNT_SORT 125 bool 126 help 127 An architecture selects this if it sorts the mcount_loc section 128 at build time. 129 130config BUILDTIME_MCOUNT_SORT 131 bool 132 default y 133 depends on HAVE_BUILDTIME_MCOUNT_SORT && DYNAMIC_FTRACE 134 help 135 Sort the mcount_loc section at build time. 136 137config TRACER_MAX_TRACE 138 bool 139 select TRACER_SNAPSHOT 140 141config TRACE_CLOCK 142 bool 143 144config RING_BUFFER 145 bool 146 select TRACE_CLOCK 147 select IRQ_WORK 148 149config EVENT_TRACING 150 select CONTEXT_SWITCH_TRACER 151 select GLOB 152 bool 153 154config CONTEXT_SWITCH_TRACER 155 bool 156 157config RING_BUFFER_ALLOW_SWAP 158 bool 159 help 160 Allow the use of ring_buffer_swap_cpu. 161 Adds a very slight overhead to tracing when enabled. 162 163config PREEMPTIRQ_TRACEPOINTS 164 bool 165 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 166 select TRACING 167 default y 168 help 169 Create preempt/irq toggle tracepoints if needed, so that other parts 170 of the kernel can use them to generate or add hooks to them. 171 172# All tracer options should select GENERIC_TRACER. For those options that are 173# enabled by all tracers (context switch and event tracer) they select TRACING. 174# This allows those options to appear when no other tracer is selected. But the 175# options do not appear when something else selects it. We need the two options 176# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 177# hiding of the automatic options. 178 179config TRACING 180 bool 181 select RING_BUFFER 182 select STACKTRACE if STACKTRACE_SUPPORT 183 select TRACEPOINTS 184 select NOP_TRACER 185 select BINARY_PRINTF 186 select EVENT_TRACING 187 select TRACE_CLOCK 188 select NEED_TASKS_RCU 189 190config GENERIC_TRACER 191 bool 192 select TRACING 193 194# 195# Minimum requirements an architecture has to meet for us to 196# be able to offer generic tracing facilities: 197# 198config TRACING_SUPPORT 199 bool 200 depends on TRACE_IRQFLAGS_SUPPORT 201 depends on STACKTRACE_SUPPORT 202 default y 203 204menuconfig FTRACE 205 bool "Tracers" 206 depends on TRACING_SUPPORT 207 default y if DEBUG_KERNEL 208 help 209 Enable the kernel tracing infrastructure. 210 211if FTRACE 212 213config TRACEFS_AUTOMOUNT_DEPRECATED 214 bool "Automount tracefs on debugfs [DEPRECATED]" 215 depends on TRACING 216 default y 217 help 218 The tracing interface was moved from /sys/kernel/debug/tracing 219 to /sys/kernel/tracing in 2015, but the tracing file system 220 was still automounted in /sys/kernel/debug for backward 221 compatibility with tooling. 222 223 The new interface has been around for more than 10 years and 224 the old debug mount will soon be removed. 225 226config BOOTTIME_TRACING 227 bool "Boot-time Tracing support" 228 depends on TRACING 229 select BOOT_CONFIG 230 help 231 Enable developer to setup ftrace subsystem via supplemental 232 kernel cmdline at boot time for debugging (tracing) driver 233 initialization and boot process. 234 235config FUNCTION_TRACER 236 bool "Kernel Function Tracer" 237 depends on HAVE_FUNCTION_TRACER 238 select KALLSYMS 239 select GENERIC_TRACER 240 select CONTEXT_SWITCH_TRACER 241 select GLOB 242 select NEED_TASKS_RCU 243 select TASKS_RUDE_RCU 244 help 245 Enable the kernel to trace every kernel function. This is done 246 by using a compiler feature to insert a small, 5-byte No-Operation 247 instruction at the beginning of every kernel function, which NOP 248 sequence is then dynamically patched into a tracer call when 249 tracing is enabled by the administrator. If it's runtime disabled 250 (the bootup default), then the overhead of the instructions is very 251 small and not measurable even in micro-benchmarks (at least on 252 x86, but may have impact on other architectures). 253 254config FUNCTION_GRAPH_TRACER 255 bool "Kernel Function Graph Tracer" 256 depends on HAVE_FUNCTION_GRAPH_TRACER 257 depends on FUNCTION_TRACER 258 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 259 default y 260 help 261 Enable the kernel to trace a function at both its return 262 and its entry. 263 Its first purpose is to trace the duration of functions and 264 draw a call graph for each thread with some information like 265 the return value. This is done by setting the current return 266 address on the current task structure into a stack of calls. 267 268config FUNCTION_GRAPH_RETVAL 269 bool "Kernel Function Graph Return Value" 270 depends on HAVE_FUNCTION_GRAPH_FREGS 271 depends on FUNCTION_GRAPH_TRACER 272 default n 273 help 274 Support recording and printing the function return value when 275 using function graph tracer. It can be helpful to locate functions 276 that return errors. This feature is off by default, and you can 277 enable it via the trace option funcgraph-retval. 278 See Documentation/trace/ftrace.rst 279 280config FUNCTION_GRAPH_RETADDR 281 bool "Kernel Function Graph Return Address" 282 depends on FUNCTION_GRAPH_TRACER 283 default n 284 help 285 Support recording and printing the function return address when 286 using function graph tracer. It can be helpful to locate code line that 287 the function is called. This feature is off by default, and you can 288 enable it via the trace option funcgraph-retaddr. 289 290config FUNCTION_TRACE_ARGS 291 bool 292 depends on PROBE_EVENTS_BTF_ARGS 293 default y 294 help 295 If supported with function argument access API and BTF, then 296 the function tracer and function graph tracer will support printing 297 of function arguments. This feature is off by default, and can be 298 enabled via the trace option func-args (for the function tracer) and 299 funcgraph-args (for the function graph tracer) 300 301config DYNAMIC_FTRACE 302 bool 303 depends on FUNCTION_TRACER 304 depends on HAVE_DYNAMIC_FTRACE 305 default y 306 help 307 This option will modify all the calls to function tracing 308 dynamically (will patch them out of the binary image and 309 replace them with a No-Op instruction) on boot up. During 310 compile time, a table is made of all the locations that ftrace 311 can function trace, and this table is linked into the kernel 312 image. When this is enabled, functions can be individually 313 enabled, and the functions not enabled will not affect 314 performance of the system. 315 316 See the files in /sys/kernel/tracing: 317 available_filter_functions 318 set_ftrace_filter 319 set_ftrace_notrace 320 321 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 322 otherwise has native performance as long as no tracing is active. 323 324config DYNAMIC_FTRACE_WITH_REGS 325 def_bool y 326 depends on DYNAMIC_FTRACE 327 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 328 329config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 330 def_bool y 331 depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS 332 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 333 334config DYNAMIC_FTRACE_WITH_CALL_OPS 335 def_bool y 336 depends on HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 337 338config DYNAMIC_FTRACE_WITH_ARGS 339 def_bool y 340 depends on DYNAMIC_FTRACE 341 depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS 342 343config DYNAMIC_FTRACE_WITH_JMP 344 def_bool y 345 depends on DYNAMIC_FTRACE 346 depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS 347 depends on HAVE_DYNAMIC_FTRACE_WITH_JMP 348 349config FUNCTION_SELF_TRACING 350 bool "Function trace tracing code" 351 depends on FUNCTION_TRACER 352 help 353 Normally all the tracing code is set to notrace, where the function 354 tracer will ignore all the tracing functions. Sometimes it is useful 355 for debugging to trace some of the tracing infratructure itself. 356 Enable this to allow some of the tracing infrastructure to be traced 357 by the function tracer. Note, this will likely add noise to function 358 tracing if events and other tracing features are enabled along with 359 function tracing. 360 361 If unsure, say N. 362 363config FPROBE 364 bool "Kernel Function Probe (fprobe)" 365 depends on HAVE_FUNCTION_GRAPH_FREGS && HAVE_FTRACE_GRAPH_FUNC 366 depends on DYNAMIC_FTRACE_WITH_ARGS 367 select FUNCTION_GRAPH_TRACER 368 default n 369 help 370 This option enables kernel function probe (fprobe) based on ftrace. 371 The fprobe is similar to kprobes, but probes only for kernel function 372 entries and exits. This also can probe multiple functions by one 373 fprobe. 374 375 If unsure, say N. 376 377config FUNCTION_PROFILER 378 bool "Kernel function profiler" 379 depends on FUNCTION_TRACER 380 default n 381 help 382 This option enables the kernel function profiler. A file is created 383 in debugfs called function_profile_enabled which defaults to zero. 384 When a 1 is echoed into this file profiling begins, and when a 385 zero is entered, profiling stops. A "functions" file is created in 386 the trace_stat directory; this file shows the list of functions that 387 have been hit and their counters. 388 389 If in doubt, say N. 390 391config STACK_TRACER 392 bool "Trace max stack" 393 depends on HAVE_FUNCTION_TRACER 394 select FUNCTION_TRACER 395 select STACKTRACE 396 select KALLSYMS 397 help 398 This special tracer records the maximum stack footprint of the 399 kernel and displays it in /sys/kernel/tracing/stack_trace. 400 401 This tracer works by hooking into every function call that the 402 kernel executes, and keeping a maximum stack depth value and 403 stack-trace saved. If this is configured with DYNAMIC_FTRACE 404 then it will not have any overhead while the stack tracer 405 is disabled. 406 407 To enable the stack tracer on bootup, pass in 'stacktrace' 408 on the kernel command line. 409 410 The stack tracer can also be enabled or disabled via the 411 sysctl kernel.stack_tracer_enabled 412 413 Say N if unsure. 414 415config TRACE_PREEMPT_TOGGLE 416 bool 417 help 418 Enables hooks which will be called when preemption is first disabled, 419 and last enabled. 420 421config IRQSOFF_TRACER 422 bool "Interrupts-off Latency Tracer" 423 default n 424 depends on TRACE_IRQFLAGS_SUPPORT 425 select TRACE_IRQFLAGS 426 select GENERIC_TRACER 427 select TRACER_MAX_TRACE 428 select RING_BUFFER_ALLOW_SWAP 429 select TRACER_SNAPSHOT_PER_CPU_SWAP 430 help 431 This option measures the time spent in irqs-off critical 432 sections, with microsecond accuracy. 433 434 The default measurement method is a maximum search, which is 435 disabled by default and can be runtime (re-)started 436 via: 437 438 echo 0 > /sys/kernel/tracing/tracing_max_latency 439 440 (Note that kernel size and overhead increase with this option 441 enabled. This option and the preempt-off timing option can be 442 used together or separately.) 443 444config PREEMPT_TRACER 445 bool "Preemption-off Latency Tracer" 446 default n 447 depends on PREEMPTION 448 select GENERIC_TRACER 449 select TRACER_MAX_TRACE 450 select RING_BUFFER_ALLOW_SWAP 451 select TRACER_SNAPSHOT_PER_CPU_SWAP 452 select TRACE_PREEMPT_TOGGLE 453 help 454 This option measures the time spent in preemption-off critical 455 sections, with microsecond accuracy. 456 457 The default measurement method is a maximum search, which is 458 disabled by default and can be runtime (re-)started 459 via: 460 461 echo 0 > /sys/kernel/tracing/tracing_max_latency 462 463 (Note that kernel size and overhead increase with this option 464 enabled. This option and the irqs-off timing option can be 465 used together or separately.) 466 467config SCHED_TRACER 468 bool "Scheduling Latency Tracer" 469 select GENERIC_TRACER 470 select CONTEXT_SWITCH_TRACER 471 select TRACER_MAX_TRACE 472 help 473 This tracer tracks the latency of the highest priority task 474 to be scheduled in, starting from the point it has woken up. 475 476config HWLAT_TRACER 477 bool "Tracer to detect hardware latencies (like SMIs)" 478 select GENERIC_TRACER 479 select TRACER_MAX_TRACE 480 help 481 This tracer, when enabled will create one or more kernel threads, 482 depending on what the cpumask file is set to, which each thread 483 spinning in a loop looking for interruptions caused by 484 something other than the kernel. For example, if a 485 System Management Interrupt (SMI) takes a noticeable amount of 486 time, this tracer will detect it. This is useful for testing 487 if a system is reliable for Real Time tasks. 488 489 Some files are created in the tracing directory when this 490 is enabled: 491 492 hwlat_detector/width - time in usecs for how long to spin for 493 hwlat_detector/window - time in usecs between the start of each 494 iteration 495 496 A kernel thread is created that will spin with interrupts disabled 497 for "width" microseconds in every "window" cycle. It will not spin 498 for "window - width" microseconds, where the system can 499 continue to operate. 500 501 The output will appear in the trace and trace_pipe files. 502 503 When the tracer is not running, it has no affect on the system, 504 but when it is running, it can cause the system to be 505 periodically non responsive. Do not run this tracer on a 506 production system. 507 508 To enable this tracer, echo in "hwlat" into the current_tracer 509 file. Every time a latency is greater than tracing_thresh, it will 510 be recorded into the ring buffer. 511 512config OSNOISE_TRACER 513 bool "OS Noise tracer" 514 select GENERIC_TRACER 515 select TRACER_MAX_TRACE 516 help 517 In the context of high-performance computing (HPC), the Operating 518 System Noise (osnoise) refers to the interference experienced by an 519 application due to activities inside the operating system. In the 520 context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread 521 can cause noise to the system. Moreover, hardware-related jobs can 522 also cause noise, for example, via SMIs. 523 524 The osnoise tracer leverages the hwlat_detector by running a similar 525 loop with preemption, SoftIRQs and IRQs enabled, thus allowing all 526 the sources of osnoise during its execution. The osnoise tracer takes 527 note of the entry and exit point of any source of interferences, 528 increasing a per-cpu interference counter. It saves an interference 529 counter for each source of interference. The interference counter for 530 NMI, IRQs, SoftIRQs, and threads is increased anytime the tool 531 observes these interferences' entry events. When a noise happens 532 without any interference from the operating system level, the 533 hardware noise counter increases, pointing to a hardware-related 534 noise. In this way, osnoise can account for any source of 535 interference. At the end of the period, the osnoise tracer prints 536 the sum of all noise, the max single noise, the percentage of CPU 537 available for the thread, and the counters for the noise sources. 538 539 In addition to the tracer, a set of tracepoints were added to 540 facilitate the identification of the osnoise source. 541 542 The output will appear in the trace and trace_pipe files. 543 544 To enable this tracer, echo in "osnoise" into the current_tracer 545 file. 546 547config TIMERLAT_TRACER 548 bool "Timerlat tracer" 549 select OSNOISE_TRACER 550 select GENERIC_TRACER 551 help 552 The timerlat tracer aims to help the preemptive kernel developers 553 to find sources of wakeup latencies of real-time threads. 554 555 The tracer creates a per-cpu kernel thread with real-time priority. 556 The tracer thread sets a periodic timer to wakeup itself, and goes 557 to sleep waiting for the timer to fire. At the wakeup, the thread 558 then computes a wakeup latency value as the difference between 559 the current time and the absolute time that the timer was set 560 to expire. 561 562 The tracer prints two lines at every activation. The first is the 563 timer latency observed at the hardirq context before the 564 activation of the thread. The second is the timer latency observed 565 by the thread, which is the same level that cyclictest reports. The 566 ACTIVATION ID field serves to relate the irq execution to its 567 respective thread execution. 568 569 The tracer is build on top of osnoise tracer, and the osnoise: 570 events can be used to trace the source of interference from NMI, 571 IRQs and other threads. It also enables the capture of the 572 stacktrace at the IRQ context, which helps to identify the code 573 path that can cause thread delay. 574 575config MMIOTRACE 576 bool "Memory mapped IO tracing" 577 depends on HAVE_MMIOTRACE_SUPPORT && PCI 578 select GENERIC_TRACER 579 help 580 Mmiotrace traces Memory Mapped I/O access and is meant for 581 debugging and reverse engineering. It is called from the ioremap 582 implementation and works via page faults. Tracing is disabled by 583 default and can be enabled at run-time. 584 585 See Documentation/trace/mmiotrace.rst. 586 If you are not helping to develop drivers, say N. 587 588config ENABLE_DEFAULT_TRACERS 589 bool "Trace process context switches and events" 590 depends on !GENERIC_TRACER 591 select TRACING 592 help 593 This tracer hooks to various trace points in the kernel, 594 allowing the user to pick and choose which trace point they 595 want to trace. It also includes the sched_switch tracer plugin. 596 597config FTRACE_SYSCALLS 598 bool "Trace syscalls" 599 depends on HAVE_SYSCALL_TRACEPOINTS 600 select GENERIC_TRACER 601 select KALLSYMS 602 help 603 Basic tracer to catch the syscall entry and exit events. 604 605config TRACE_SYSCALL_BUF_SIZE_DEFAULT 606 int "System call user read max size" 607 range 0 165 608 default 63 609 depends on FTRACE_SYSCALLS 610 help 611 Some system call trace events will record the data from a user 612 space address that one of the parameters point to. The amount of 613 data per event is limited. That limit is set by this config and 614 this config also affects how much user space data perf can read. 615 616 For a tracing instance, this size may be changed by writing into 617 its syscall_user_buf_size file. 618 619config TRACER_SNAPSHOT 620 bool "Create a snapshot trace buffer" 621 help 622 Allow tracing users to take snapshot of the current buffer using the 623 ftrace interface, e.g.: 624 625 echo 1 > /sys/kernel/tracing/snapshot 626 cat snapshot 627 628 Note, the latency tracers select this option. To disable it, 629 all the latency tracers need to be disabled. 630 631config TRACER_SNAPSHOT_PER_CPU_SWAP 632 bool "Allow snapshot to swap per CPU" 633 depends on TRACER_SNAPSHOT 634 select RING_BUFFER_ALLOW_SWAP 635 help 636 Allow doing a snapshot of a single CPU buffer instead of a 637 full swap (all buffers). If this is set, then the following is 638 allowed: 639 640 echo 1 > /sys/kernel/tracing/per_cpu/cpu2/snapshot 641 642 After which, only the tracing buffer for CPU 2 was swapped with 643 the main tracing buffer, and the other CPU buffers remain the same. 644 645 When this is enabled, this adds a little more overhead to the 646 trace recording, as it needs to add some checks to synchronize 647 recording with swaps. But this does not affect the performance 648 of the overall system. This is enabled by default when the preempt 649 or irq latency tracers are enabled, as those need to swap as well 650 and already adds the overhead (plus a lot more). 651 652config TRACE_BRANCH_PROFILING 653 bool 654 select GENERIC_TRACER 655 656choice 657 prompt "Branch Profiling" 658 default BRANCH_PROFILE_NONE 659 help 660 The branch profiling is a software profiler. It will add hooks 661 into the C conditionals to test which path a branch takes. 662 663 The likely/unlikely profiler only looks at the conditions that 664 are annotated with a likely or unlikely macro. 665 666 The "all branch" profiler will profile every if-statement in the 667 kernel. This profiler will also enable the likely/unlikely 668 profiler. 669 670 Either of the above profilers adds a bit of overhead to the system. 671 If unsure, choose "No branch profiling". 672 673config BRANCH_PROFILE_NONE 674 bool "No branch profiling" 675 help 676 No branch profiling. Branch profiling adds a bit of overhead. 677 Only enable it if you want to analyse the branching behavior. 678 Otherwise keep it disabled. 679 680config PROFILE_ANNOTATED_BRANCHES 681 bool "Trace likely/unlikely profiler" 682 select TRACE_BRANCH_PROFILING 683 help 684 This tracer profiles all likely and unlikely macros 685 in the kernel. It will display the results in: 686 687 /sys/kernel/tracing/trace_stat/branch_annotated 688 689 Note: this will add a significant overhead; only turn this 690 on if you need to profile the system's use of these macros. 691 692config PROFILE_ALL_BRANCHES 693 bool "Profile all if conditionals" if !FORTIFY_SOURCE 694 select TRACE_BRANCH_PROFILING 695 help 696 This tracer profiles all branch conditions. Every if () 697 taken in the kernel is recorded whether it hit or miss. 698 The results will be displayed in: 699 700 /sys/kernel/tracing/trace_stat/branch_all 701 702 This option also enables the likely/unlikely profiler. 703 704 This configuration, when enabled, will impose a great overhead 705 on the system. This should only be enabled when the system 706 is to be analyzed in much detail. 707endchoice 708 709config TRACING_BRANCHES 710 bool 711 help 712 Selected by tracers that will trace the likely and unlikely 713 conditions. This prevents the tracers themselves from being 714 profiled. Profiling the tracing infrastructure can only happen 715 when the likelys and unlikelys are not being traced. 716 717config BRANCH_TRACER 718 bool "Trace likely/unlikely instances" 719 depends on TRACE_BRANCH_PROFILING 720 select TRACING_BRANCHES 721 help 722 This traces the events of likely and unlikely condition 723 calls in the kernel. The difference between this and the 724 "Trace likely/unlikely profiler" is that this is not a 725 histogram of the callers, but actually places the calling 726 events into a running trace buffer to see when and where the 727 events happened, as well as their results. 728 729 Say N if unsure. 730 731config BLK_DEV_IO_TRACE 732 bool "Support for tracing block IO actions" 733 depends on SYSFS 734 depends on BLOCK 735 select RELAY 736 select DEBUG_FS 737 select TRACEPOINTS 738 select GENERIC_TRACER 739 select STACKTRACE 740 help 741 Say Y here if you want to be able to trace the block layer actions 742 on a given queue. Tracing allows you to see any traffic happening 743 on a block device queue. For more information (and the userspace 744 support tools needed), fetch the blktrace tools from: 745 746 git://git.kernel.dk/blktrace.git 747 748 Tracing also is possible using the ftrace interface, e.g.: 749 750 echo 1 > /sys/block/sda/sda1/trace/enable 751 echo blk > /sys/kernel/tracing/current_tracer 752 cat /sys/kernel/tracing/trace_pipe 753 754 If unsure, say N. 755 756config FPROBE_EVENTS 757 depends on FPROBE 758 depends on HAVE_REGS_AND_STACK_ACCESS_API 759 bool "Enable fprobe-based dynamic events" 760 select TRACING 761 select PROBE_EVENTS 762 select DYNAMIC_EVENTS 763 default y 764 help 765 This allows user to add tracing events on the function entry and 766 exit via ftrace interface. The syntax is same as the kprobe events 767 and the kprobe events on function entry and exit will be 768 transparently converted to this fprobe events. 769 770config PROBE_EVENTS_BTF_ARGS 771 depends on HAVE_FUNCTION_ARG_ACCESS_API 772 depends on FPROBE_EVENTS || KPROBE_EVENTS 773 depends on DEBUG_INFO_BTF && BPF_SYSCALL 774 bool "Support BTF function arguments for probe events" 775 default y 776 help 777 The user can specify the arguments of the probe event using the names 778 of the arguments of the probed function, when the probe location is a 779 kernel function entry or a tracepoint. 780 This is available only if BTF (BPF Type Format) support is enabled. 781 782config KPROBE_EVENTS 783 depends on KPROBES 784 depends on HAVE_REGS_AND_STACK_ACCESS_API 785 bool "Enable kprobes-based dynamic events" 786 select TRACING 787 select PROBE_EVENTS 788 select DYNAMIC_EVENTS 789 default y 790 help 791 This allows the user to add tracing events (similar to tracepoints) 792 on the fly via the ftrace interface. See 793 Documentation/trace/kprobetrace.rst for more details. 794 795 Those events can be inserted wherever kprobes can probe, and record 796 various register and memory values. 797 798 This option is also required by perf-probe subcommand of perf tools. 799 If you want to use perf tools, this option is strongly recommended. 800 801config KPROBE_EVENTS_ON_NOTRACE 802 bool "Do NOT protect notrace function from kprobe events" 803 depends on KPROBE_EVENTS 804 depends on DYNAMIC_FTRACE 805 default n 806 help 807 This is only for the developers who want to debug ftrace itself 808 using kprobe events. 809 810 If kprobes can use ftrace instead of breakpoint, ftrace related 811 functions are protected from kprobe-events to prevent an infinite 812 recursion or any unexpected execution path which leads to a kernel 813 crash. 814 815 This option disables such protection and allows you to put kprobe 816 events on ftrace functions for debugging ftrace by itself. 817 Note that this might let you shoot yourself in the foot. 818 819 If unsure, say N. 820 821config UPROBE_EVENTS 822 bool "Enable uprobes-based dynamic events" 823 depends on ARCH_SUPPORTS_UPROBES 824 depends on MMU 825 depends on PERF_EVENTS 826 select UPROBES 827 select PROBE_EVENTS 828 select DYNAMIC_EVENTS 829 select TRACING 830 default y 831 help 832 This allows the user to add tracing events on top of userspace 833 dynamic events (similar to tracepoints) on the fly via the trace 834 events interface. Those events can be inserted wherever uprobes 835 can probe, and record various registers. 836 This option is required if you plan to use perf-probe subcommand 837 of perf tools on user space applications. 838 839config EPROBE_EVENTS 840 bool "Enable event-based dynamic events" 841 depends on TRACING 842 depends on HAVE_REGS_AND_STACK_ACCESS_API 843 select PROBE_EVENTS 844 select DYNAMIC_EVENTS 845 default y 846 help 847 Eprobes are dynamic events that can be placed on other existing 848 events. It can be used to limit what fields are recorded in 849 an event or even dereference a field of an event. It can 850 convert the type of an event field. For example, turn an 851 address into a string. 852 853config BPF_EVENTS 854 depends on BPF_SYSCALL 855 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 856 bool 857 default y 858 help 859 This allows the user to attach BPF programs to kprobe, uprobe, and 860 tracepoint events. 861 862config DYNAMIC_EVENTS 863 def_bool n 864 865config PROBE_EVENTS 866 def_bool n 867 868config BPF_KPROBE_OVERRIDE 869 bool "Enable BPF programs to override a kprobed function" 870 depends on BPF_EVENTS 871 depends on FUNCTION_ERROR_INJECTION 872 default n 873 help 874 Allows BPF to override the execution of a probed function and 875 set a different return value. This is used for error injection. 876 877config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 878 bool 879 depends on DYNAMIC_FTRACE 880 881config FTRACE_MCOUNT_USE_CC 882 def_bool y 883 depends on $(cc-option,-mrecord-mcount) 884 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 885 depends on DYNAMIC_FTRACE 886 887config FTRACE_MCOUNT_USE_OBJTOOL 888 def_bool y 889 depends on HAVE_OBJTOOL_MCOUNT 890 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 891 depends on !FTRACE_MCOUNT_USE_CC 892 depends on DYNAMIC_FTRACE 893 select OBJTOOL 894 895config FTRACE_MCOUNT_USE_RECORDMCOUNT 896 def_bool y 897 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 898 depends on !FTRACE_MCOUNT_USE_CC 899 depends on !FTRACE_MCOUNT_USE_OBJTOOL 900 depends on DYNAMIC_FTRACE 901 902config TRACING_MAP 903 bool 904 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 905 help 906 tracing_map is a special-purpose lock-free map for tracing, 907 separated out as a stand-alone facility in order to allow it 908 to be shared between multiple tracers. It isn't meant to be 909 generally used outside of that context, and is normally 910 selected by tracers that use it. 911 912config SYNTH_EVENTS 913 bool "Synthetic trace events" 914 select TRACING 915 select DYNAMIC_EVENTS 916 default n 917 help 918 Synthetic events are user-defined trace events that can be 919 used to combine data from other trace events or in fact any 920 data source. Synthetic events can be generated indirectly 921 via the trace() action of histogram triggers or directly 922 by way of an in-kernel API. 923 924 See Documentation/trace/events.rst or 925 Documentation/trace/histogram.rst for details and examples. 926 927 If in doubt, say N. 928 929config USER_EVENTS 930 bool "User trace events" 931 select TRACING 932 select DYNAMIC_EVENTS 933 help 934 User trace events are user-defined trace events that 935 can be used like an existing kernel trace event. User trace 936 events are generated by writing to a tracefs file. User 937 processes can determine if their tracing events should be 938 generated by registering a value and bit with the kernel 939 that reflects when it is enabled or not. 940 941 See Documentation/trace/user_events.rst. 942 If in doubt, say N. 943 944config HIST_TRIGGERS 945 bool "Histogram triggers" 946 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 947 select TRACING_MAP 948 select TRACING 949 select DYNAMIC_EVENTS 950 select SYNTH_EVENTS 951 default n 952 help 953 Hist triggers allow one or more arbitrary trace event fields 954 to be aggregated into hash tables and dumped to stdout by 955 reading a debugfs/tracefs file. They're useful for 956 gathering quick and dirty (though precise) summaries of 957 event activity as an initial guide for further investigation 958 using more advanced tools. 959 960 Inter-event tracing of quantities such as latencies is also 961 supported using hist triggers under this option. 962 963 See Documentation/trace/histogram.rst. 964 If in doubt, say N. 965 966config TRACE_EVENT_INJECT 967 bool "Trace event injection" 968 depends on TRACING 969 help 970 Allow user-space to inject a specific trace event into the ring 971 buffer. This is mainly used for testing purpose. 972 973 If unsure, say N. 974 975config TRACEPOINT_BENCHMARK 976 bool "Add tracepoint that benchmarks tracepoints" 977 help 978 This option creates the tracepoint "benchmark:benchmark_event". 979 When the tracepoint is enabled, it kicks off a kernel thread that 980 goes into an infinite loop (calling cond_resched() to let other tasks 981 run), and calls the tracepoint. Each iteration will record the time 982 it took to write to the tracepoint and the next iteration that 983 data will be passed to the tracepoint itself. That is, the tracepoint 984 will report the time it took to do the previous tracepoint. 985 The string written to the tracepoint is a static string of 128 bytes 986 to keep the time the same. The initial string is simply a write of 987 "START". The second string records the cold cache time of the first 988 write which is not added to the rest of the calculations. 989 990 As it is a tight loop, it benchmarks as hot cache. That's fine because 991 we care most about hot paths that are probably in cache already. 992 993 An example of the output: 994 995 START 996 first=3672 [COLD CACHED] 997 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 998 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 999 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 1000 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 1001 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 1002 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 1003 1004 1005config RING_BUFFER_BENCHMARK 1006 tristate "Ring buffer benchmark stress tester" 1007 depends on RING_BUFFER 1008 help 1009 This option creates a test to stress the ring buffer and benchmark it. 1010 It creates its own ring buffer such that it will not interfere with 1011 any other users of the ring buffer (such as ftrace). It then creates 1012 a producer and consumer that will run for 10 seconds and sleep for 1013 10 seconds. Each interval it will print out the number of events 1014 it recorded and give a rough estimate of how long each iteration took. 1015 1016 It does not disable interrupts or raise its priority, so it may be 1017 affected by processes that are running. 1018 1019 If unsure, say N. 1020 1021config TRACE_EVAL_MAP_FILE 1022 bool "Show eval mappings for trace events" 1023 depends on TRACING 1024 help 1025 The "print fmt" of the trace events will show the enum/sizeof names 1026 instead of their values. This can cause problems for user space tools 1027 that use this string to parse the raw data as user space does not know 1028 how to convert the string to its value. 1029 1030 To fix this, there's a special macro in the kernel that can be used 1031 to convert an enum/sizeof into its value. If this macro is used, then 1032 the print fmt strings will be converted to their values. 1033 1034 If something does not get converted properly, this option can be 1035 used to show what enums/sizeof the kernel tried to convert. 1036 1037 This option is for debugging the conversions. A file is created 1038 in the tracing directory called "eval_map" that will show the 1039 names matched with their values and what trace event system they 1040 belong too. 1041 1042 Normally, the mapping of the strings to values will be freed after 1043 boot up or module load. With this option, they will not be freed, as 1044 they are needed for the "eval_map" file. Enabling this option will 1045 increase the memory footprint of the running kernel. 1046 1047 If unsure, say N. 1048 1049config FTRACE_RECORD_RECURSION 1050 bool "Record functions that recurse in function tracing" 1051 depends on FUNCTION_TRACER 1052 help 1053 All callbacks that attach to the function tracing have some sort 1054 of protection against recursion. Even though the protection exists, 1055 it adds overhead. This option will create a file in the tracefs 1056 file system called "recursed_functions" that will list the functions 1057 that triggered a recursion. 1058 1059 This will add more overhead to cases that have recursion. 1060 1061 If unsure, say N 1062 1063config FTRACE_RECORD_RECURSION_SIZE 1064 int "Max number of recursed functions to record" 1065 default 128 1066 depends on FTRACE_RECORD_RECURSION 1067 help 1068 This defines the limit of number of functions that can be 1069 listed in the "recursed_functions" file, that lists all 1070 the functions that caused a recursion to happen. 1071 This file can be reset, but the limit can not change in 1072 size at runtime. 1073 1074config FTRACE_VALIDATE_RCU_IS_WATCHING 1075 bool "Validate RCU is on during ftrace execution" 1076 depends on FUNCTION_TRACER 1077 depends on ARCH_WANTS_NO_INSTR 1078 help 1079 All callbacks that attach to the function tracing have some sort of 1080 protection against recursion. This option is only to verify that 1081 ftrace (and other users of ftrace_test_recursion_trylock()) are not 1082 called outside of RCU, as if they are, it can cause a race. But it 1083 also has a noticeable overhead when enabled. 1084 1085 If unsure, say N 1086 1087config RING_BUFFER_RECORD_RECURSION 1088 bool "Record functions that recurse in the ring buffer" 1089 depends on FTRACE_RECORD_RECURSION 1090 # default y, because it is coupled with FTRACE_RECORD_RECURSION 1091 default y 1092 help 1093 The ring buffer has its own internal recursion. Although when 1094 recursion happens it won't cause harm because of the protection, 1095 but it does cause unwanted overhead. Enabling this option will 1096 place where recursion was detected into the ftrace "recursed_functions" 1097 file. 1098 1099 This will add more overhead to cases that have recursion. 1100 1101config GCOV_PROFILE_FTRACE 1102 bool "Enable GCOV profiling on ftrace subsystem" 1103 depends on GCOV_KERNEL 1104 help 1105 Enable GCOV profiling on ftrace subsystem for checking 1106 which functions/lines are tested. 1107 1108 If unsure, say N. 1109 1110 Note that on a kernel compiled with this config, ftrace will 1111 run significantly slower. 1112 1113config FTRACE_SELFTEST 1114 bool 1115 1116config FTRACE_STARTUP_TEST 1117 bool "Perform a startup test on ftrace" 1118 depends on GENERIC_TRACER 1119 select FTRACE_SELFTEST 1120 help 1121 This option performs a series of startup tests on ftrace. On bootup 1122 a series of tests are made to verify that the tracer is 1123 functioning properly. It will do tests on all the configured 1124 tracers of ftrace. 1125 1126config EVENT_TRACE_STARTUP_TEST 1127 bool "Run selftest on trace events" 1128 depends on FTRACE_STARTUP_TEST 1129 default y 1130 help 1131 This option performs a test on all trace events in the system. 1132 It basically just enables each event and runs some code that 1133 will trigger events (not necessarily the event it enables) 1134 This may take some time run as there are a lot of events. 1135 1136config EVENT_TRACE_TEST_SYSCALLS 1137 bool "Run selftest on syscall events" 1138 depends on EVENT_TRACE_STARTUP_TEST 1139 help 1140 This option will also enable testing every syscall event. 1141 It only enables the event and disables it and runs various loads 1142 with the event enabled. This adds a bit more time for kernel boot 1143 up since it runs this on every system call defined. 1144 1145 TBD - enable a way to actually call the syscalls as we test their 1146 events 1147 1148config FTRACE_SORT_STARTUP_TEST 1149 bool "Verify compile time sorting of ftrace functions" 1150 depends on DYNAMIC_FTRACE 1151 depends on BUILDTIME_MCOUNT_SORT 1152 help 1153 Sorting of the mcount_loc sections that is used to find the 1154 where the ftrace knows where to patch functions for tracing 1155 and other callbacks is done at compile time. But if the sort 1156 is not done correctly, it will cause non-deterministic failures. 1157 When this is set, the sorted sections will be verified that they 1158 are in deed sorted and will warn if they are not. 1159 1160 If unsure, say N 1161 1162config RING_BUFFER_STARTUP_TEST 1163 bool "Ring buffer startup self test" 1164 depends on RING_BUFFER 1165 help 1166 Run a simple self test on the ring buffer on boot up. Late in the 1167 kernel boot sequence, the test will start that kicks off 1168 a thread per cpu. Each thread will write various size events 1169 into the ring buffer. Another thread is created to send IPIs 1170 to each of the threads, where the IPI handler will also write 1171 to the ring buffer, to test/stress the nesting ability. 1172 If any anomalies are discovered, a warning will be displayed 1173 and all ring buffers will be disabled. 1174 1175 The test runs for 10 seconds. This will slow your boot time 1176 by at least 10 more seconds. 1177 1178 At the end of the test, statistics and more checks are done. 1179 It will output the stats of each per cpu buffer: What 1180 was written, the sizes, what was read, what was lost, and 1181 other similar details. 1182 1183 If unsure, say N 1184 1185config RING_BUFFER_VALIDATE_TIME_DELTAS 1186 bool "Verify ring buffer time stamp deltas" 1187 depends on RING_BUFFER 1188 help 1189 This will audit the time stamps on the ring buffer sub 1190 buffer to make sure that all the time deltas for the 1191 events on a sub buffer matches the current time stamp. 1192 This audit is performed for every event that is not 1193 interrupted, or interrupting another event. A check 1194 is also made when traversing sub buffers to make sure 1195 that all the deltas on the previous sub buffer do not 1196 add up to be greater than the current time stamp. 1197 1198 NOTE: This adds significant overhead to recording of events, 1199 and should only be used to test the logic of the ring buffer. 1200 Do not use it on production systems. 1201 1202 Only say Y if you understand what this does, and you 1203 still want it enabled. Otherwise say N 1204 1205config MMIOTRACE_TEST 1206 tristate "Test module for mmiotrace" 1207 depends on MMIOTRACE && m 1208 help 1209 This is a dumb module for testing mmiotrace. It is very dangerous 1210 as it will write garbage to IO memory starting at a given address. 1211 However, it should be safe to use on e.g. unused portion of VRAM. 1212 1213 Say N, unless you absolutely know what you are doing. 1214 1215config PREEMPTIRQ_DELAY_TEST 1216 tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers" 1217 depends on m 1218 help 1219 Select this option to build a test module that can help test latency 1220 tracers by executing a preempt or irq disable section with a user 1221 configurable delay. The module busy waits for the duration of the 1222 critical section. 1223 1224 For example, the following invocation generates a burst of three 1225 irq-disabled critical sections for 500us: 1226 modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3 1227 1228 What's more, if you want to attach the test on the cpu which the latency 1229 tracer is running on, specify cpu_affinity=cpu_num at the end of the 1230 command. 1231 1232 If unsure, say N 1233 1234config SYNTH_EVENT_GEN_TEST 1235 tristate "Test module for in-kernel synthetic event generation" 1236 depends on SYNTH_EVENTS && m 1237 help 1238 This option creates a test module to check the base 1239 functionality of in-kernel synthetic event definition and 1240 generation. 1241 1242 To test, insert the module, and then check the trace buffer 1243 for the generated sample events. 1244 1245 If unsure, say N. 1246 1247config KPROBE_EVENT_GEN_TEST 1248 tristate "Test module for in-kernel kprobe event generation" 1249 depends on KPROBE_EVENTS && m 1250 help 1251 This option creates a test module to check the base 1252 functionality of in-kernel kprobe event definition. 1253 1254 To test, insert the module, and then check the trace buffer 1255 for the generated kprobe events. 1256 1257 If unsure, say N. 1258 1259config HIST_TRIGGERS_DEBUG 1260 bool "Hist trigger debug support" 1261 depends on HIST_TRIGGERS 1262 help 1263 Add "hist_debug" file for each event, which when read will 1264 dump out a bunch of internal details about the hist triggers 1265 defined on that event. 1266 1267 The hist_debug file serves a couple of purposes: 1268 1269 - Helps developers verify that nothing is broken. 1270 1271 - Provides educational information to support the details 1272 of the hist trigger internals as described by 1273 Documentation/trace/histogram-design.rst. 1274 1275 The hist_debug output only covers the data structures 1276 related to the histogram definitions themselves and doesn't 1277 display the internals of map buckets or variable values of 1278 running histograms. 1279 1280 If unsure, say N. 1281 1282source "kernel/trace/rv/Kconfig" 1283 1284endif # FTRACE 1285