1# SPDX-License-Identifier: GPL-2.0-only 2# 3# Architectures that offer an FUNCTION_TRACER implementation should 4# select HAVE_FUNCTION_TRACER: 5# 6 7config USER_STACKTRACE_SUPPORT 8 bool 9 10config NOP_TRACER 11 bool 12 13config HAVE_RETHOOK 14 bool 15 16config RETHOOK 17 bool 18 depends on HAVE_RETHOOK 19 help 20 Enable generic return hooking feature. This is an internal 21 API, which will be used by other function-entry hooking 22 features like fprobe and kprobes. 23 24config HAVE_FUNCTION_TRACER 25 bool 26 help 27 See Documentation/trace/ftrace-design.rst 28 29config HAVE_FUNCTION_GRAPH_TRACER 30 bool 31 help 32 See Documentation/trace/ftrace-design.rst 33 34config HAVE_FUNCTION_GRAPH_FREGS 35 bool 36 37config HAVE_FTRACE_GRAPH_FUNC 38 bool 39 help 40 True if ftrace_graph_func() is defined. 41 42config HAVE_DYNAMIC_FTRACE 43 bool 44 help 45 See Documentation/trace/ftrace-design.rst 46 47config HAVE_DYNAMIC_FTRACE_WITH_REGS 48 bool 49 50config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 51 bool 52 53config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 54 bool 55 56config HAVE_DYNAMIC_FTRACE_WITH_ARGS 57 bool 58 help 59 If this is set, then arguments and stack can be found from 60 the ftrace_regs passed into the function callback regs parameter 61 by default, even without setting the REGS flag in the ftrace_ops. 62 This allows for use of ftrace_regs_get_argument() and 63 ftrace_regs_get_stack_pointer(). 64 65config HAVE_FTRACE_REGS_HAVING_PT_REGS 66 bool 67 help 68 If this is set, ftrace_regs has pt_regs, thus it can convert to 69 pt_regs without allocating memory. 70 71config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE 72 bool 73 help 74 If the architecture generates __patchable_function_entries sections 75 but does not want them included in the ftrace locations. 76 77config HAVE_FTRACE_MCOUNT_RECORD 78 bool 79 help 80 See Documentation/trace/ftrace-design.rst 81 82config HAVE_SYSCALL_TRACEPOINTS 83 bool 84 help 85 See Documentation/trace/ftrace-design.rst 86 87config HAVE_FENTRY 88 bool 89 help 90 Arch supports the gcc options -pg with -mfentry 91 92config HAVE_NOP_MCOUNT 93 bool 94 help 95 Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount 96 97config HAVE_OBJTOOL_MCOUNT 98 bool 99 help 100 Arch supports objtool --mcount 101 102config HAVE_OBJTOOL_NOP_MCOUNT 103 bool 104 help 105 Arch supports the objtool options --mcount with --mnop. 106 An architecture can select this if it wants to enable nop'ing 107 of ftrace locations. 108 109config HAVE_C_RECORDMCOUNT 110 bool 111 help 112 C version of recordmcount available? 113 114config HAVE_BUILDTIME_MCOUNT_SORT 115 bool 116 help 117 An architecture selects this if it sorts the mcount_loc section 118 at build time. 119 120config BUILDTIME_MCOUNT_SORT 121 bool 122 default y 123 depends on HAVE_BUILDTIME_MCOUNT_SORT && DYNAMIC_FTRACE 124 help 125 Sort the mcount_loc section at build time. 126 127config TRACER_MAX_TRACE 128 bool 129 130config TRACE_CLOCK 131 bool 132 133config RING_BUFFER 134 bool 135 select TRACE_CLOCK 136 select IRQ_WORK 137 138config EVENT_TRACING 139 select CONTEXT_SWITCH_TRACER 140 select GLOB 141 bool 142 143config CONTEXT_SWITCH_TRACER 144 bool 145 146config RING_BUFFER_ALLOW_SWAP 147 bool 148 help 149 Allow the use of ring_buffer_swap_cpu. 150 Adds a very slight overhead to tracing when enabled. 151 152config PREEMPTIRQ_TRACEPOINTS 153 bool 154 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 155 select TRACING 156 default y 157 help 158 Create preempt/irq toggle tracepoints if needed, so that other parts 159 of the kernel can use them to generate or add hooks to them. 160 161# All tracer options should select GENERIC_TRACER. For those options that are 162# enabled by all tracers (context switch and event tracer) they select TRACING. 163# This allows those options to appear when no other tracer is selected. But the 164# options do not appear when something else selects it. We need the two options 165# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 166# hiding of the automatic options. 167 168config TRACING 169 bool 170 select RING_BUFFER 171 select STACKTRACE if STACKTRACE_SUPPORT 172 select TRACEPOINTS 173 select NOP_TRACER 174 select BINARY_PRINTF 175 select EVENT_TRACING 176 select TRACE_CLOCK 177 select NEED_TASKS_RCU 178 179config GENERIC_TRACER 180 bool 181 select TRACING 182 183# 184# Minimum requirements an architecture has to meet for us to 185# be able to offer generic tracing facilities: 186# 187config TRACING_SUPPORT 188 bool 189 depends on TRACE_IRQFLAGS_SUPPORT 190 depends on STACKTRACE_SUPPORT 191 default y 192 193menuconfig FTRACE 194 bool "Tracers" 195 depends on TRACING_SUPPORT 196 default y if DEBUG_KERNEL 197 help 198 Enable the kernel tracing infrastructure. 199 200if FTRACE 201 202config TRACEFS_AUTOMOUNT_DEPRECATED 203 bool "Automount tracefs on debugfs [DEPRECATED]" 204 depends on TRACING 205 default y 206 help 207 The tracing interface was moved from /sys/kernel/debug/tracing 208 to /sys/kernel/tracing in 2015, but the tracing file system 209 was still automounted in /sys/kernel/debug for backward 210 compatibility with tooling. 211 212 The new interface has been around for more than 10 years and 213 the old debug mount will soon be removed. 214 215config BOOTTIME_TRACING 216 bool "Boot-time Tracing support" 217 depends on TRACING 218 select BOOT_CONFIG 219 help 220 Enable developer to setup ftrace subsystem via supplemental 221 kernel cmdline at boot time for debugging (tracing) driver 222 initialization and boot process. 223 224config FUNCTION_TRACER 225 bool "Kernel Function Tracer" 226 depends on HAVE_FUNCTION_TRACER 227 select KALLSYMS 228 select GENERIC_TRACER 229 select CONTEXT_SWITCH_TRACER 230 select GLOB 231 select NEED_TASKS_RCU 232 select TASKS_RUDE_RCU 233 help 234 Enable the kernel to trace every kernel function. This is done 235 by using a compiler feature to insert a small, 5-byte No-Operation 236 instruction at the beginning of every kernel function, which NOP 237 sequence is then dynamically patched into a tracer call when 238 tracing is enabled by the administrator. If it's runtime disabled 239 (the bootup default), then the overhead of the instructions is very 240 small and not measurable even in micro-benchmarks (at least on 241 x86, but may have impact on other architectures). 242 243config FUNCTION_GRAPH_TRACER 244 bool "Kernel Function Graph Tracer" 245 depends on HAVE_FUNCTION_GRAPH_TRACER 246 depends on FUNCTION_TRACER 247 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 248 default y 249 help 250 Enable the kernel to trace a function at both its return 251 and its entry. 252 Its first purpose is to trace the duration of functions and 253 draw a call graph for each thread with some information like 254 the return value. This is done by setting the current return 255 address on the current task structure into a stack of calls. 256 257config FUNCTION_GRAPH_RETVAL 258 bool "Kernel Function Graph Return Value" 259 depends on HAVE_FUNCTION_GRAPH_FREGS 260 depends on FUNCTION_GRAPH_TRACER 261 default n 262 help 263 Support recording and printing the function return value when 264 using function graph tracer. It can be helpful to locate functions 265 that return errors. This feature is off by default, and you can 266 enable it via the trace option funcgraph-retval. 267 See Documentation/trace/ftrace.rst 268 269config FUNCTION_GRAPH_RETADDR 270 bool "Kernel Function Graph Return Address" 271 depends on FUNCTION_GRAPH_TRACER 272 default n 273 help 274 Support recording and printing the function return address when 275 using function graph tracer. It can be helpful to locate code line that 276 the function is called. This feature is off by default, and you can 277 enable it via the trace option funcgraph-retaddr. 278 279config FUNCTION_TRACE_ARGS 280 bool 281 depends on PROBE_EVENTS_BTF_ARGS 282 default y 283 help 284 If supported with function argument access API and BTF, then 285 the function tracer and function graph tracer will support printing 286 of function arguments. This feature is off by default, and can be 287 enabled via the trace option func-args (for the function tracer) and 288 funcgraph-args (for the function graph tracer) 289 290config DYNAMIC_FTRACE 291 bool "enable/disable function tracing dynamically" 292 depends on FUNCTION_TRACER 293 depends on HAVE_DYNAMIC_FTRACE 294 default y 295 help 296 This option will modify all the calls to function tracing 297 dynamically (will patch them out of the binary image and 298 replace them with a No-Op instruction) on boot up. During 299 compile time, a table is made of all the locations that ftrace 300 can function trace, and this table is linked into the kernel 301 image. When this is enabled, functions can be individually 302 enabled, and the functions not enabled will not affect 303 performance of the system. 304 305 See the files in /sys/kernel/tracing: 306 available_filter_functions 307 set_ftrace_filter 308 set_ftrace_notrace 309 310 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 311 otherwise has native performance as long as no tracing is active. 312 313config DYNAMIC_FTRACE_WITH_REGS 314 def_bool y 315 depends on DYNAMIC_FTRACE 316 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 317 318config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 319 def_bool y 320 depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS 321 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 322 323config DYNAMIC_FTRACE_WITH_CALL_OPS 324 def_bool y 325 depends on HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 326 327config DYNAMIC_FTRACE_WITH_ARGS 328 def_bool y 329 depends on DYNAMIC_FTRACE 330 depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS 331 332config FPROBE 333 bool "Kernel Function Probe (fprobe)" 334 depends on HAVE_FUNCTION_GRAPH_FREGS && HAVE_FTRACE_GRAPH_FUNC 335 depends on DYNAMIC_FTRACE_WITH_ARGS 336 select FUNCTION_GRAPH_TRACER 337 default n 338 help 339 This option enables kernel function probe (fprobe) based on ftrace. 340 The fprobe is similar to kprobes, but probes only for kernel function 341 entries and exits. This also can probe multiple functions by one 342 fprobe. 343 344 If unsure, say N. 345 346config FUNCTION_PROFILER 347 bool "Kernel function profiler" 348 depends on FUNCTION_TRACER 349 default n 350 help 351 This option enables the kernel function profiler. A file is created 352 in debugfs called function_profile_enabled which defaults to zero. 353 When a 1 is echoed into this file profiling begins, and when a 354 zero is entered, profiling stops. A "functions" file is created in 355 the trace_stat directory; this file shows the list of functions that 356 have been hit and their counters. 357 358 If in doubt, say N. 359 360config STACK_TRACER 361 bool "Trace max stack" 362 depends on HAVE_FUNCTION_TRACER 363 select FUNCTION_TRACER 364 select STACKTRACE 365 select KALLSYMS 366 help 367 This special tracer records the maximum stack footprint of the 368 kernel and displays it in /sys/kernel/tracing/stack_trace. 369 370 This tracer works by hooking into every function call that the 371 kernel executes, and keeping a maximum stack depth value and 372 stack-trace saved. If this is configured with DYNAMIC_FTRACE 373 then it will not have any overhead while the stack tracer 374 is disabled. 375 376 To enable the stack tracer on bootup, pass in 'stacktrace' 377 on the kernel command line. 378 379 The stack tracer can also be enabled or disabled via the 380 sysctl kernel.stack_tracer_enabled 381 382 Say N if unsure. 383 384config TRACE_PREEMPT_TOGGLE 385 bool 386 help 387 Enables hooks which will be called when preemption is first disabled, 388 and last enabled. 389 390config IRQSOFF_TRACER 391 bool "Interrupts-off Latency Tracer" 392 default n 393 depends on TRACE_IRQFLAGS_SUPPORT 394 select TRACE_IRQFLAGS 395 select GENERIC_TRACER 396 select TRACER_MAX_TRACE 397 select RING_BUFFER_ALLOW_SWAP 398 select TRACER_SNAPSHOT 399 select TRACER_SNAPSHOT_PER_CPU_SWAP 400 help 401 This option measures the time spent in irqs-off critical 402 sections, with microsecond accuracy. 403 404 The default measurement method is a maximum search, which is 405 disabled by default and can be runtime (re-)started 406 via: 407 408 echo 0 > /sys/kernel/tracing/tracing_max_latency 409 410 (Note that kernel size and overhead increase with this option 411 enabled. This option and the preempt-off timing option can be 412 used together or separately.) 413 414config PREEMPT_TRACER 415 bool "Preemption-off Latency Tracer" 416 default n 417 depends on PREEMPTION 418 select GENERIC_TRACER 419 select TRACER_MAX_TRACE 420 select RING_BUFFER_ALLOW_SWAP 421 select TRACER_SNAPSHOT 422 select TRACER_SNAPSHOT_PER_CPU_SWAP 423 select TRACE_PREEMPT_TOGGLE 424 help 425 This option measures the time spent in preemption-off critical 426 sections, with microsecond accuracy. 427 428 The default measurement method is a maximum search, which is 429 disabled by default and can be runtime (re-)started 430 via: 431 432 echo 0 > /sys/kernel/tracing/tracing_max_latency 433 434 (Note that kernel size and overhead increase with this option 435 enabled. This option and the irqs-off timing option can be 436 used together or separately.) 437 438config SCHED_TRACER 439 bool "Scheduling Latency Tracer" 440 select GENERIC_TRACER 441 select CONTEXT_SWITCH_TRACER 442 select TRACER_MAX_TRACE 443 select TRACER_SNAPSHOT 444 help 445 This tracer tracks the latency of the highest priority task 446 to be scheduled in, starting from the point it has woken up. 447 448config HWLAT_TRACER 449 bool "Tracer to detect hardware latencies (like SMIs)" 450 select GENERIC_TRACER 451 select TRACER_MAX_TRACE 452 help 453 This tracer, when enabled will create one or more kernel threads, 454 depending on what the cpumask file is set to, which each thread 455 spinning in a loop looking for interruptions caused by 456 something other than the kernel. For example, if a 457 System Management Interrupt (SMI) takes a noticeable amount of 458 time, this tracer will detect it. This is useful for testing 459 if a system is reliable for Real Time tasks. 460 461 Some files are created in the tracing directory when this 462 is enabled: 463 464 hwlat_detector/width - time in usecs for how long to spin for 465 hwlat_detector/window - time in usecs between the start of each 466 iteration 467 468 A kernel thread is created that will spin with interrupts disabled 469 for "width" microseconds in every "window" cycle. It will not spin 470 for "window - width" microseconds, where the system can 471 continue to operate. 472 473 The output will appear in the trace and trace_pipe files. 474 475 When the tracer is not running, it has no affect on the system, 476 but when it is running, it can cause the system to be 477 periodically non responsive. Do not run this tracer on a 478 production system. 479 480 To enable this tracer, echo in "hwlat" into the current_tracer 481 file. Every time a latency is greater than tracing_thresh, it will 482 be recorded into the ring buffer. 483 484config OSNOISE_TRACER 485 bool "OS Noise tracer" 486 select GENERIC_TRACER 487 select TRACER_MAX_TRACE 488 help 489 In the context of high-performance computing (HPC), the Operating 490 System Noise (osnoise) refers to the interference experienced by an 491 application due to activities inside the operating system. In the 492 context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread 493 can cause noise to the system. Moreover, hardware-related jobs can 494 also cause noise, for example, via SMIs. 495 496 The osnoise tracer leverages the hwlat_detector by running a similar 497 loop with preemption, SoftIRQs and IRQs enabled, thus allowing all 498 the sources of osnoise during its execution. The osnoise tracer takes 499 note of the entry and exit point of any source of interferences, 500 increasing a per-cpu interference counter. It saves an interference 501 counter for each source of interference. The interference counter for 502 NMI, IRQs, SoftIRQs, and threads is increased anytime the tool 503 observes these interferences' entry events. When a noise happens 504 without any interference from the operating system level, the 505 hardware noise counter increases, pointing to a hardware-related 506 noise. In this way, osnoise can account for any source of 507 interference. At the end of the period, the osnoise tracer prints 508 the sum of all noise, the max single noise, the percentage of CPU 509 available for the thread, and the counters for the noise sources. 510 511 In addition to the tracer, a set of tracepoints were added to 512 facilitate the identification of the osnoise source. 513 514 The output will appear in the trace and trace_pipe files. 515 516 To enable this tracer, echo in "osnoise" into the current_tracer 517 file. 518 519config TIMERLAT_TRACER 520 bool "Timerlat tracer" 521 select OSNOISE_TRACER 522 select GENERIC_TRACER 523 help 524 The timerlat tracer aims to help the preemptive kernel developers 525 to find sources of wakeup latencies of real-time threads. 526 527 The tracer creates a per-cpu kernel thread with real-time priority. 528 The tracer thread sets a periodic timer to wakeup itself, and goes 529 to sleep waiting for the timer to fire. At the wakeup, the thread 530 then computes a wakeup latency value as the difference between 531 the current time and the absolute time that the timer was set 532 to expire. 533 534 The tracer prints two lines at every activation. The first is the 535 timer latency observed at the hardirq context before the 536 activation of the thread. The second is the timer latency observed 537 by the thread, which is the same level that cyclictest reports. The 538 ACTIVATION ID field serves to relate the irq execution to its 539 respective thread execution. 540 541 The tracer is build on top of osnoise tracer, and the osnoise: 542 events can be used to trace the source of interference from NMI, 543 IRQs and other threads. It also enables the capture of the 544 stacktrace at the IRQ context, which helps to identify the code 545 path that can cause thread delay. 546 547config MMIOTRACE 548 bool "Memory mapped IO tracing" 549 depends on HAVE_MMIOTRACE_SUPPORT && PCI 550 select GENERIC_TRACER 551 help 552 Mmiotrace traces Memory Mapped I/O access and is meant for 553 debugging and reverse engineering. It is called from the ioremap 554 implementation and works via page faults. Tracing is disabled by 555 default and can be enabled at run-time. 556 557 See Documentation/trace/mmiotrace.rst. 558 If you are not helping to develop drivers, say N. 559 560config ENABLE_DEFAULT_TRACERS 561 bool "Trace process context switches and events" 562 depends on !GENERIC_TRACER 563 select TRACING 564 help 565 This tracer hooks to various trace points in the kernel, 566 allowing the user to pick and choose which trace point they 567 want to trace. It also includes the sched_switch tracer plugin. 568 569config FTRACE_SYSCALLS 570 bool "Trace syscalls" 571 depends on HAVE_SYSCALL_TRACEPOINTS 572 select GENERIC_TRACER 573 select KALLSYMS 574 help 575 Basic tracer to catch the syscall entry and exit events. 576 577config TRACER_SNAPSHOT 578 bool "Create a snapshot trace buffer" 579 select TRACER_MAX_TRACE 580 help 581 Allow tracing users to take snapshot of the current buffer using the 582 ftrace interface, e.g.: 583 584 echo 1 > /sys/kernel/tracing/snapshot 585 cat snapshot 586 587config TRACER_SNAPSHOT_PER_CPU_SWAP 588 bool "Allow snapshot to swap per CPU" 589 depends on TRACER_SNAPSHOT 590 select RING_BUFFER_ALLOW_SWAP 591 help 592 Allow doing a snapshot of a single CPU buffer instead of a 593 full swap (all buffers). If this is set, then the following is 594 allowed: 595 596 echo 1 > /sys/kernel/tracing/per_cpu/cpu2/snapshot 597 598 After which, only the tracing buffer for CPU 2 was swapped with 599 the main tracing buffer, and the other CPU buffers remain the same. 600 601 When this is enabled, this adds a little more overhead to the 602 trace recording, as it needs to add some checks to synchronize 603 recording with swaps. But this does not affect the performance 604 of the overall system. This is enabled by default when the preempt 605 or irq latency tracers are enabled, as those need to swap as well 606 and already adds the overhead (plus a lot more). 607 608config TRACE_BRANCH_PROFILING 609 bool 610 select GENERIC_TRACER 611 612choice 613 prompt "Branch Profiling" 614 default BRANCH_PROFILE_NONE 615 help 616 The branch profiling is a software profiler. It will add hooks 617 into the C conditionals to test which path a branch takes. 618 619 The likely/unlikely profiler only looks at the conditions that 620 are annotated with a likely or unlikely macro. 621 622 The "all branch" profiler will profile every if-statement in the 623 kernel. This profiler will also enable the likely/unlikely 624 profiler. 625 626 Either of the above profilers adds a bit of overhead to the system. 627 If unsure, choose "No branch profiling". 628 629config BRANCH_PROFILE_NONE 630 bool "No branch profiling" 631 help 632 No branch profiling. Branch profiling adds a bit of overhead. 633 Only enable it if you want to analyse the branching behavior. 634 Otherwise keep it disabled. 635 636config PROFILE_ANNOTATED_BRANCHES 637 bool "Trace likely/unlikely profiler" 638 select TRACE_BRANCH_PROFILING 639 help 640 This tracer profiles all likely and unlikely macros 641 in the kernel. It will display the results in: 642 643 /sys/kernel/tracing/trace_stat/branch_annotated 644 645 Note: this will add a significant overhead; only turn this 646 on if you need to profile the system's use of these macros. 647 648config PROFILE_ALL_BRANCHES 649 bool "Profile all if conditionals" if !FORTIFY_SOURCE 650 select TRACE_BRANCH_PROFILING 651 help 652 This tracer profiles all branch conditions. Every if () 653 taken in the kernel is recorded whether it hit or miss. 654 The results will be displayed in: 655 656 /sys/kernel/tracing/trace_stat/branch_all 657 658 This option also enables the likely/unlikely profiler. 659 660 This configuration, when enabled, will impose a great overhead 661 on the system. This should only be enabled when the system 662 is to be analyzed in much detail. 663endchoice 664 665config TRACING_BRANCHES 666 bool 667 help 668 Selected by tracers that will trace the likely and unlikely 669 conditions. This prevents the tracers themselves from being 670 profiled. Profiling the tracing infrastructure can only happen 671 when the likelys and unlikelys are not being traced. 672 673config BRANCH_TRACER 674 bool "Trace likely/unlikely instances" 675 depends on TRACE_BRANCH_PROFILING 676 select TRACING_BRANCHES 677 help 678 This traces the events of likely and unlikely condition 679 calls in the kernel. The difference between this and the 680 "Trace likely/unlikely profiler" is that this is not a 681 histogram of the callers, but actually places the calling 682 events into a running trace buffer to see when and where the 683 events happened, as well as their results. 684 685 Say N if unsure. 686 687config BLK_DEV_IO_TRACE 688 bool "Support for tracing block IO actions" 689 depends on SYSFS 690 depends on BLOCK 691 select RELAY 692 select DEBUG_FS 693 select TRACEPOINTS 694 select GENERIC_TRACER 695 select STACKTRACE 696 help 697 Say Y here if you want to be able to trace the block layer actions 698 on a given queue. Tracing allows you to see any traffic happening 699 on a block device queue. For more information (and the userspace 700 support tools needed), fetch the blktrace tools from: 701 702 git://git.kernel.dk/blktrace.git 703 704 Tracing also is possible using the ftrace interface, e.g.: 705 706 echo 1 > /sys/block/sda/sda1/trace/enable 707 echo blk > /sys/kernel/tracing/current_tracer 708 cat /sys/kernel/tracing/trace_pipe 709 710 If unsure, say N. 711 712config FPROBE_EVENTS 713 depends on FPROBE 714 depends on HAVE_REGS_AND_STACK_ACCESS_API 715 bool "Enable fprobe-based dynamic events" 716 select TRACING 717 select PROBE_EVENTS 718 select DYNAMIC_EVENTS 719 default y 720 help 721 This allows user to add tracing events on the function entry and 722 exit via ftrace interface. The syntax is same as the kprobe events 723 and the kprobe events on function entry and exit will be 724 transparently converted to this fprobe events. 725 726config PROBE_EVENTS_BTF_ARGS 727 depends on HAVE_FUNCTION_ARG_ACCESS_API 728 depends on FPROBE_EVENTS || KPROBE_EVENTS 729 depends on DEBUG_INFO_BTF && BPF_SYSCALL 730 bool "Support BTF function arguments for probe events" 731 default y 732 help 733 The user can specify the arguments of the probe event using the names 734 of the arguments of the probed function, when the probe location is a 735 kernel function entry or a tracepoint. 736 This is available only if BTF (BPF Type Format) support is enabled. 737 738config KPROBE_EVENTS 739 depends on KPROBES 740 depends on HAVE_REGS_AND_STACK_ACCESS_API 741 bool "Enable kprobes-based dynamic events" 742 select TRACING 743 select PROBE_EVENTS 744 select DYNAMIC_EVENTS 745 default y 746 help 747 This allows the user to add tracing events (similar to tracepoints) 748 on the fly via the ftrace interface. See 749 Documentation/trace/kprobetrace.rst for more details. 750 751 Those events can be inserted wherever kprobes can probe, and record 752 various register and memory values. 753 754 This option is also required by perf-probe subcommand of perf tools. 755 If you want to use perf tools, this option is strongly recommended. 756 757config KPROBE_EVENTS_ON_NOTRACE 758 bool "Do NOT protect notrace function from kprobe events" 759 depends on KPROBE_EVENTS 760 depends on DYNAMIC_FTRACE 761 default n 762 help 763 This is only for the developers who want to debug ftrace itself 764 using kprobe events. 765 766 If kprobes can use ftrace instead of breakpoint, ftrace related 767 functions are protected from kprobe-events to prevent an infinite 768 recursion or any unexpected execution path which leads to a kernel 769 crash. 770 771 This option disables such protection and allows you to put kprobe 772 events on ftrace functions for debugging ftrace by itself. 773 Note that this might let you shoot yourself in the foot. 774 775 If unsure, say N. 776 777config UPROBE_EVENTS 778 bool "Enable uprobes-based dynamic events" 779 depends on ARCH_SUPPORTS_UPROBES 780 depends on MMU 781 depends on PERF_EVENTS 782 select UPROBES 783 select PROBE_EVENTS 784 select DYNAMIC_EVENTS 785 select TRACING 786 default y 787 help 788 This allows the user to add tracing events on top of userspace 789 dynamic events (similar to tracepoints) on the fly via the trace 790 events interface. Those events can be inserted wherever uprobes 791 can probe, and record various registers. 792 This option is required if you plan to use perf-probe subcommand 793 of perf tools on user space applications. 794 795config BPF_EVENTS 796 depends on BPF_SYSCALL 797 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 798 bool 799 default y 800 help 801 This allows the user to attach BPF programs to kprobe, uprobe, and 802 tracepoint events. 803 804config DYNAMIC_EVENTS 805 def_bool n 806 807config PROBE_EVENTS 808 def_bool n 809 810config BPF_KPROBE_OVERRIDE 811 bool "Enable BPF programs to override a kprobed function" 812 depends on BPF_EVENTS 813 depends on FUNCTION_ERROR_INJECTION 814 default n 815 help 816 Allows BPF to override the execution of a probed function and 817 set a different return value. This is used for error injection. 818 819config FTRACE_MCOUNT_RECORD 820 def_bool y 821 depends on DYNAMIC_FTRACE 822 depends on HAVE_FTRACE_MCOUNT_RECORD 823 824config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 825 bool 826 depends on FTRACE_MCOUNT_RECORD 827 828config FTRACE_MCOUNT_USE_CC 829 def_bool y 830 depends on $(cc-option,-mrecord-mcount) 831 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 832 depends on FTRACE_MCOUNT_RECORD 833 834config FTRACE_MCOUNT_USE_OBJTOOL 835 def_bool y 836 depends on HAVE_OBJTOOL_MCOUNT 837 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 838 depends on !FTRACE_MCOUNT_USE_CC 839 depends on FTRACE_MCOUNT_RECORD 840 select OBJTOOL 841 842config FTRACE_MCOUNT_USE_RECORDMCOUNT 843 def_bool y 844 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 845 depends on !FTRACE_MCOUNT_USE_CC 846 depends on !FTRACE_MCOUNT_USE_OBJTOOL 847 depends on FTRACE_MCOUNT_RECORD 848 849config TRACING_MAP 850 bool 851 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 852 help 853 tracing_map is a special-purpose lock-free map for tracing, 854 separated out as a stand-alone facility in order to allow it 855 to be shared between multiple tracers. It isn't meant to be 856 generally used outside of that context, and is normally 857 selected by tracers that use it. 858 859config SYNTH_EVENTS 860 bool "Synthetic trace events" 861 select TRACING 862 select DYNAMIC_EVENTS 863 default n 864 help 865 Synthetic events are user-defined trace events that can be 866 used to combine data from other trace events or in fact any 867 data source. Synthetic events can be generated indirectly 868 via the trace() action of histogram triggers or directly 869 by way of an in-kernel API. 870 871 See Documentation/trace/events.rst or 872 Documentation/trace/histogram.rst for details and examples. 873 874 If in doubt, say N. 875 876config USER_EVENTS 877 bool "User trace events" 878 select TRACING 879 select DYNAMIC_EVENTS 880 help 881 User trace events are user-defined trace events that 882 can be used like an existing kernel trace event. User trace 883 events are generated by writing to a tracefs file. User 884 processes can determine if their tracing events should be 885 generated by registering a value and bit with the kernel 886 that reflects when it is enabled or not. 887 888 See Documentation/trace/user_events.rst. 889 If in doubt, say N. 890 891config HIST_TRIGGERS 892 bool "Histogram triggers" 893 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 894 select TRACING_MAP 895 select TRACING 896 select DYNAMIC_EVENTS 897 select SYNTH_EVENTS 898 default n 899 help 900 Hist triggers allow one or more arbitrary trace event fields 901 to be aggregated into hash tables and dumped to stdout by 902 reading a debugfs/tracefs file. They're useful for 903 gathering quick and dirty (though precise) summaries of 904 event activity as an initial guide for further investigation 905 using more advanced tools. 906 907 Inter-event tracing of quantities such as latencies is also 908 supported using hist triggers under this option. 909 910 See Documentation/trace/histogram.rst. 911 If in doubt, say N. 912 913config TRACE_EVENT_INJECT 914 bool "Trace event injection" 915 depends on TRACING 916 help 917 Allow user-space to inject a specific trace event into the ring 918 buffer. This is mainly used for testing purpose. 919 920 If unsure, say N. 921 922config TRACEPOINT_BENCHMARK 923 bool "Add tracepoint that benchmarks tracepoints" 924 help 925 This option creates the tracepoint "benchmark:benchmark_event". 926 When the tracepoint is enabled, it kicks off a kernel thread that 927 goes into an infinite loop (calling cond_resched() to let other tasks 928 run), and calls the tracepoint. Each iteration will record the time 929 it took to write to the tracepoint and the next iteration that 930 data will be passed to the tracepoint itself. That is, the tracepoint 931 will report the time it took to do the previous tracepoint. 932 The string written to the tracepoint is a static string of 128 bytes 933 to keep the time the same. The initial string is simply a write of 934 "START". The second string records the cold cache time of the first 935 write which is not added to the rest of the calculations. 936 937 As it is a tight loop, it benchmarks as hot cache. That's fine because 938 we care most about hot paths that are probably in cache already. 939 940 An example of the output: 941 942 START 943 first=3672 [COLD CACHED] 944 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 945 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 946 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 947 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 948 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 949 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 950 951 952config RING_BUFFER_BENCHMARK 953 tristate "Ring buffer benchmark stress tester" 954 depends on RING_BUFFER 955 help 956 This option creates a test to stress the ring buffer and benchmark it. 957 It creates its own ring buffer such that it will not interfere with 958 any other users of the ring buffer (such as ftrace). It then creates 959 a producer and consumer that will run for 10 seconds and sleep for 960 10 seconds. Each interval it will print out the number of events 961 it recorded and give a rough estimate of how long each iteration took. 962 963 It does not disable interrupts or raise its priority, so it may be 964 affected by processes that are running. 965 966 If unsure, say N. 967 968config TRACE_EVAL_MAP_FILE 969 bool "Show eval mappings for trace events" 970 depends on TRACING 971 help 972 The "print fmt" of the trace events will show the enum/sizeof names 973 instead of their values. This can cause problems for user space tools 974 that use this string to parse the raw data as user space does not know 975 how to convert the string to its value. 976 977 To fix this, there's a special macro in the kernel that can be used 978 to convert an enum/sizeof into its value. If this macro is used, then 979 the print fmt strings will be converted to their values. 980 981 If something does not get converted properly, this option can be 982 used to show what enums/sizeof the kernel tried to convert. 983 984 This option is for debugging the conversions. A file is created 985 in the tracing directory called "eval_map" that will show the 986 names matched with their values and what trace event system they 987 belong too. 988 989 Normally, the mapping of the strings to values will be freed after 990 boot up or module load. With this option, they will not be freed, as 991 they are needed for the "eval_map" file. Enabling this option will 992 increase the memory footprint of the running kernel. 993 994 If unsure, say N. 995 996config FTRACE_RECORD_RECURSION 997 bool "Record functions that recurse in function tracing" 998 depends on FUNCTION_TRACER 999 help 1000 All callbacks that attach to the function tracing have some sort 1001 of protection against recursion. Even though the protection exists, 1002 it adds overhead. This option will create a file in the tracefs 1003 file system called "recursed_functions" that will list the functions 1004 that triggered a recursion. 1005 1006 This will add more overhead to cases that have recursion. 1007 1008 If unsure, say N 1009 1010config FTRACE_RECORD_RECURSION_SIZE 1011 int "Max number of recursed functions to record" 1012 default 128 1013 depends on FTRACE_RECORD_RECURSION 1014 help 1015 This defines the limit of number of functions that can be 1016 listed in the "recursed_functions" file, that lists all 1017 the functions that caused a recursion to happen. 1018 This file can be reset, but the limit can not change in 1019 size at runtime. 1020 1021config FTRACE_VALIDATE_RCU_IS_WATCHING 1022 bool "Validate RCU is on during ftrace execution" 1023 depends on FUNCTION_TRACER 1024 depends on ARCH_WANTS_NO_INSTR 1025 help 1026 All callbacks that attach to the function tracing have some sort of 1027 protection against recursion. This option is only to verify that 1028 ftrace (and other users of ftrace_test_recursion_trylock()) are not 1029 called outside of RCU, as if they are, it can cause a race. But it 1030 also has a noticeable overhead when enabled. 1031 1032 If unsure, say N 1033 1034config RING_BUFFER_RECORD_RECURSION 1035 bool "Record functions that recurse in the ring buffer" 1036 depends on FTRACE_RECORD_RECURSION 1037 # default y, because it is coupled with FTRACE_RECORD_RECURSION 1038 default y 1039 help 1040 The ring buffer has its own internal recursion. Although when 1041 recursion happens it won't cause harm because of the protection, 1042 but it does cause unwanted overhead. Enabling this option will 1043 place where recursion was detected into the ftrace "recursed_functions" 1044 file. 1045 1046 This will add more overhead to cases that have recursion. 1047 1048config GCOV_PROFILE_FTRACE 1049 bool "Enable GCOV profiling on ftrace subsystem" 1050 depends on GCOV_KERNEL 1051 help 1052 Enable GCOV profiling on ftrace subsystem for checking 1053 which functions/lines are tested. 1054 1055 If unsure, say N. 1056 1057 Note that on a kernel compiled with this config, ftrace will 1058 run significantly slower. 1059 1060config FTRACE_SELFTEST 1061 bool 1062 1063config FTRACE_STARTUP_TEST 1064 bool "Perform a startup test on ftrace" 1065 depends on GENERIC_TRACER 1066 select FTRACE_SELFTEST 1067 help 1068 This option performs a series of startup tests on ftrace. On bootup 1069 a series of tests are made to verify that the tracer is 1070 functioning properly. It will do tests on all the configured 1071 tracers of ftrace. 1072 1073config EVENT_TRACE_STARTUP_TEST 1074 bool "Run selftest on trace events" 1075 depends on FTRACE_STARTUP_TEST 1076 default y 1077 help 1078 This option performs a test on all trace events in the system. 1079 It basically just enables each event and runs some code that 1080 will trigger events (not necessarily the event it enables) 1081 This may take some time run as there are a lot of events. 1082 1083config EVENT_TRACE_TEST_SYSCALLS 1084 bool "Run selftest on syscall events" 1085 depends on EVENT_TRACE_STARTUP_TEST 1086 help 1087 This option will also enable testing every syscall event. 1088 It only enables the event and disables it and runs various loads 1089 with the event enabled. This adds a bit more time for kernel boot 1090 up since it runs this on every system call defined. 1091 1092 TBD - enable a way to actually call the syscalls as we test their 1093 events 1094 1095config FTRACE_SORT_STARTUP_TEST 1096 bool "Verify compile time sorting of ftrace functions" 1097 depends on DYNAMIC_FTRACE 1098 depends on BUILDTIME_MCOUNT_SORT 1099 help 1100 Sorting of the mcount_loc sections that is used to find the 1101 where the ftrace knows where to patch functions for tracing 1102 and other callbacks is done at compile time. But if the sort 1103 is not done correctly, it will cause non-deterministic failures. 1104 When this is set, the sorted sections will be verified that they 1105 are in deed sorted and will warn if they are not. 1106 1107 If unsure, say N 1108 1109config RING_BUFFER_STARTUP_TEST 1110 bool "Ring buffer startup self test" 1111 depends on RING_BUFFER 1112 help 1113 Run a simple self test on the ring buffer on boot up. Late in the 1114 kernel boot sequence, the test will start that kicks off 1115 a thread per cpu. Each thread will write various size events 1116 into the ring buffer. Another thread is created to send IPIs 1117 to each of the threads, where the IPI handler will also write 1118 to the ring buffer, to test/stress the nesting ability. 1119 If any anomalies are discovered, a warning will be displayed 1120 and all ring buffers will be disabled. 1121 1122 The test runs for 10 seconds. This will slow your boot time 1123 by at least 10 more seconds. 1124 1125 At the end of the test, statistics and more checks are done. 1126 It will output the stats of each per cpu buffer: What 1127 was written, the sizes, what was read, what was lost, and 1128 other similar details. 1129 1130 If unsure, say N 1131 1132config RING_BUFFER_VALIDATE_TIME_DELTAS 1133 bool "Verify ring buffer time stamp deltas" 1134 depends on RING_BUFFER 1135 help 1136 This will audit the time stamps on the ring buffer sub 1137 buffer to make sure that all the time deltas for the 1138 events on a sub buffer matches the current time stamp. 1139 This audit is performed for every event that is not 1140 interrupted, or interrupting another event. A check 1141 is also made when traversing sub buffers to make sure 1142 that all the deltas on the previous sub buffer do not 1143 add up to be greater than the current time stamp. 1144 1145 NOTE: This adds significant overhead to recording of events, 1146 and should only be used to test the logic of the ring buffer. 1147 Do not use it on production systems. 1148 1149 Only say Y if you understand what this does, and you 1150 still want it enabled. Otherwise say N 1151 1152config MMIOTRACE_TEST 1153 tristate "Test module for mmiotrace" 1154 depends on MMIOTRACE && m 1155 help 1156 This is a dumb module for testing mmiotrace. It is very dangerous 1157 as it will write garbage to IO memory starting at a given address. 1158 However, it should be safe to use on e.g. unused portion of VRAM. 1159 1160 Say N, unless you absolutely know what you are doing. 1161 1162config PREEMPTIRQ_DELAY_TEST 1163 tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers" 1164 depends on m 1165 help 1166 Select this option to build a test module that can help test latency 1167 tracers by executing a preempt or irq disable section with a user 1168 configurable delay. The module busy waits for the duration of the 1169 critical section. 1170 1171 For example, the following invocation generates a burst of three 1172 irq-disabled critical sections for 500us: 1173 modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3 1174 1175 What's more, if you want to attach the test on the cpu which the latency 1176 tracer is running on, specify cpu_affinity=cpu_num at the end of the 1177 command. 1178 1179 If unsure, say N 1180 1181config SYNTH_EVENT_GEN_TEST 1182 tristate "Test module for in-kernel synthetic event generation" 1183 depends on SYNTH_EVENTS && m 1184 help 1185 This option creates a test module to check the base 1186 functionality of in-kernel synthetic event definition and 1187 generation. 1188 1189 To test, insert the module, and then check the trace buffer 1190 for the generated sample events. 1191 1192 If unsure, say N. 1193 1194config KPROBE_EVENT_GEN_TEST 1195 tristate "Test module for in-kernel kprobe event generation" 1196 depends on KPROBE_EVENTS && m 1197 help 1198 This option creates a test module to check the base 1199 functionality of in-kernel kprobe event definition. 1200 1201 To test, insert the module, and then check the trace buffer 1202 for the generated kprobe events. 1203 1204 If unsure, say N. 1205 1206config HIST_TRIGGERS_DEBUG 1207 bool "Hist trigger debug support" 1208 depends on HIST_TRIGGERS 1209 help 1210 Add "hist_debug" file for each event, which when read will 1211 dump out a bunch of internal details about the hist triggers 1212 defined on that event. 1213 1214 The hist_debug file serves a couple of purposes: 1215 1216 - Helps developers verify that nothing is broken. 1217 1218 - Provides educational information to support the details 1219 of the hist trigger internals as described by 1220 Documentation/trace/histogram-design.rst. 1221 1222 The hist_debug output only covers the data structures 1223 related to the histogram definitions themselves and doesn't 1224 display the internals of map buckets or variable values of 1225 running histograms. 1226 1227 If unsure, say N. 1228 1229source "kernel/trace/rv/Kconfig" 1230 1231endif # FTRACE 1232