1# SPDX-License-Identifier: GPL-2.0-only 2# 3# Architectures that offer an FUNCTION_TRACER implementation should 4# select HAVE_FUNCTION_TRACER: 5# 6 7config USER_STACKTRACE_SUPPORT 8 bool 9 10config NOP_TRACER 11 bool 12 13config HAVE_RETHOOK 14 bool 15 16config RETHOOK 17 bool 18 depends on HAVE_RETHOOK 19 help 20 Enable generic return hooking feature. This is an internal 21 API, which will be used by other function-entry hooking 22 features like fprobe and kprobes. 23 24config HAVE_FUNCTION_TRACER 25 bool 26 help 27 See Documentation/trace/ftrace-design.rst 28 29config HAVE_FUNCTION_GRAPH_TRACER 30 bool 31 help 32 See Documentation/trace/ftrace-design.rst 33 34config HAVE_FUNCTION_GRAPH_RETVAL 35 bool 36 37config HAVE_DYNAMIC_FTRACE 38 bool 39 help 40 See Documentation/trace/ftrace-design.rst 41 42config HAVE_DYNAMIC_FTRACE_WITH_REGS 43 bool 44 45config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 46 bool 47 48config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 49 bool 50 51config HAVE_DYNAMIC_FTRACE_WITH_ARGS 52 bool 53 help 54 If this is set, then arguments and stack can be found from 55 the ftrace_regs passed into the function callback regs parameter 56 by default, even without setting the REGS flag in the ftrace_ops. 57 This allows for use of ftrace_regs_get_argument() and 58 ftrace_regs_get_stack_pointer(). 59 60config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE 61 bool 62 help 63 If the architecture generates __patchable_function_entries sections 64 but does not want them included in the ftrace locations. 65 66config HAVE_FTRACE_MCOUNT_RECORD 67 bool 68 help 69 See Documentation/trace/ftrace-design.rst 70 71config HAVE_SYSCALL_TRACEPOINTS 72 bool 73 help 74 See Documentation/trace/ftrace-design.rst 75 76config HAVE_FENTRY 77 bool 78 help 79 Arch supports the gcc options -pg with -mfentry 80 81config HAVE_NOP_MCOUNT 82 bool 83 help 84 Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount 85 86config HAVE_OBJTOOL_MCOUNT 87 bool 88 help 89 Arch supports objtool --mcount 90 91config HAVE_OBJTOOL_NOP_MCOUNT 92 bool 93 help 94 Arch supports the objtool options --mcount with --mnop. 95 An architecture can select this if it wants to enable nop'ing 96 of ftrace locations. 97 98config HAVE_C_RECORDMCOUNT 99 bool 100 help 101 C version of recordmcount available? 102 103config HAVE_BUILDTIME_MCOUNT_SORT 104 bool 105 help 106 An architecture selects this if it sorts the mcount_loc section 107 at build time. 108 109config BUILDTIME_MCOUNT_SORT 110 bool 111 default y 112 depends on HAVE_BUILDTIME_MCOUNT_SORT && DYNAMIC_FTRACE 113 help 114 Sort the mcount_loc section at build time. 115 116config TRACER_MAX_TRACE 117 bool 118 119config TRACE_CLOCK 120 bool 121 122config RING_BUFFER 123 bool 124 select TRACE_CLOCK 125 select IRQ_WORK 126 127config EVENT_TRACING 128 select CONTEXT_SWITCH_TRACER 129 select GLOB 130 bool 131 132config CONTEXT_SWITCH_TRACER 133 bool 134 135config RING_BUFFER_ALLOW_SWAP 136 bool 137 help 138 Allow the use of ring_buffer_swap_cpu. 139 Adds a very slight overhead to tracing when enabled. 140 141config PREEMPTIRQ_TRACEPOINTS 142 bool 143 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 144 select TRACING 145 default y 146 help 147 Create preempt/irq toggle tracepoints if needed, so that other parts 148 of the kernel can use them to generate or add hooks to them. 149 150# All tracer options should select GENERIC_TRACER. For those options that are 151# enabled by all tracers (context switch and event tracer) they select TRACING. 152# This allows those options to appear when no other tracer is selected. But the 153# options do not appear when something else selects it. We need the two options 154# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 155# hiding of the automatic options. 156 157config TRACING 158 bool 159 select RING_BUFFER 160 select STACKTRACE if STACKTRACE_SUPPORT 161 select TRACEPOINTS 162 select NOP_TRACER 163 select BINARY_PRINTF 164 select EVENT_TRACING 165 select TRACE_CLOCK 166 select NEED_TASKS_RCU 167 168config GENERIC_TRACER 169 bool 170 select TRACING 171 172# 173# Minimum requirements an architecture has to meet for us to 174# be able to offer generic tracing facilities: 175# 176config TRACING_SUPPORT 177 bool 178 depends on TRACE_IRQFLAGS_SUPPORT 179 depends on STACKTRACE_SUPPORT 180 default y 181 182menuconfig FTRACE 183 bool "Tracers" 184 depends on TRACING_SUPPORT 185 default y if DEBUG_KERNEL 186 help 187 Enable the kernel tracing infrastructure. 188 189if FTRACE 190 191config BOOTTIME_TRACING 192 bool "Boot-time Tracing support" 193 depends on TRACING 194 select BOOT_CONFIG 195 help 196 Enable developer to setup ftrace subsystem via supplemental 197 kernel cmdline at boot time for debugging (tracing) driver 198 initialization and boot process. 199 200config FUNCTION_TRACER 201 bool "Kernel Function Tracer" 202 depends on HAVE_FUNCTION_TRACER 203 select KALLSYMS 204 select GENERIC_TRACER 205 select CONTEXT_SWITCH_TRACER 206 select GLOB 207 select NEED_TASKS_RCU 208 select TASKS_RUDE_RCU 209 help 210 Enable the kernel to trace every kernel function. This is done 211 by using a compiler feature to insert a small, 5-byte No-Operation 212 instruction at the beginning of every kernel function, which NOP 213 sequence is then dynamically patched into a tracer call when 214 tracing is enabled by the administrator. If it's runtime disabled 215 (the bootup default), then the overhead of the instructions is very 216 small and not measurable even in micro-benchmarks (at least on 217 x86, but may have impact on other architectures). 218 219config FUNCTION_GRAPH_TRACER 220 bool "Kernel Function Graph Tracer" 221 depends on HAVE_FUNCTION_GRAPH_TRACER 222 depends on FUNCTION_TRACER 223 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 224 default y 225 help 226 Enable the kernel to trace a function at both its return 227 and its entry. 228 Its first purpose is to trace the duration of functions and 229 draw a call graph for each thread with some information like 230 the return value. This is done by setting the current return 231 address on the current task structure into a stack of calls. 232 233config FUNCTION_GRAPH_RETVAL 234 bool "Kernel Function Graph Return Value" 235 depends on HAVE_FUNCTION_GRAPH_RETVAL 236 depends on FUNCTION_GRAPH_TRACER 237 default n 238 help 239 Support recording and printing the function return value when 240 using function graph tracer. It can be helpful to locate functions 241 that return errors. This feature is off by default, and you can 242 enable it via the trace option funcgraph-retval. 243 See Documentation/trace/ftrace.rst 244 245config FUNCTION_GRAPH_RETADDR 246 bool "Kernel Function Graph Return Address" 247 depends on FUNCTION_GRAPH_TRACER 248 default n 249 help 250 Support recording and printing the function return address when 251 using function graph tracer. It can be helpful to locate code line that 252 the function is called. This feature is off by default, and you can 253 enable it via the trace option funcgraph-retaddr. 254 255config DYNAMIC_FTRACE 256 bool "enable/disable function tracing dynamically" 257 depends on FUNCTION_TRACER 258 depends on HAVE_DYNAMIC_FTRACE 259 default y 260 help 261 This option will modify all the calls to function tracing 262 dynamically (will patch them out of the binary image and 263 replace them with a No-Op instruction) on boot up. During 264 compile time, a table is made of all the locations that ftrace 265 can function trace, and this table is linked into the kernel 266 image. When this is enabled, functions can be individually 267 enabled, and the functions not enabled will not affect 268 performance of the system. 269 270 See the files in /sys/kernel/tracing: 271 available_filter_functions 272 set_ftrace_filter 273 set_ftrace_notrace 274 275 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 276 otherwise has native performance as long as no tracing is active. 277 278config DYNAMIC_FTRACE_WITH_REGS 279 def_bool y 280 depends on DYNAMIC_FTRACE 281 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 282 283config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 284 def_bool y 285 depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS 286 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 287 288config DYNAMIC_FTRACE_WITH_CALL_OPS 289 def_bool y 290 depends on HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 291 292config DYNAMIC_FTRACE_WITH_ARGS 293 def_bool y 294 depends on DYNAMIC_FTRACE 295 depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS 296 297config FPROBE 298 bool "Kernel Function Probe (fprobe)" 299 depends on FUNCTION_TRACER 300 depends on DYNAMIC_FTRACE_WITH_REGS 301 depends on HAVE_RETHOOK 302 select RETHOOK 303 default n 304 help 305 This option enables kernel function probe (fprobe) based on ftrace. 306 The fprobe is similar to kprobes, but probes only for kernel function 307 entries and exits. This also can probe multiple functions by one 308 fprobe. 309 310 If unsure, say N. 311 312config FUNCTION_PROFILER 313 bool "Kernel function profiler" 314 depends on FUNCTION_TRACER 315 default n 316 help 317 This option enables the kernel function profiler. A file is created 318 in debugfs called function_profile_enabled which defaults to zero. 319 When a 1 is echoed into this file profiling begins, and when a 320 zero is entered, profiling stops. A "functions" file is created in 321 the trace_stat directory; this file shows the list of functions that 322 have been hit and their counters. 323 324 If in doubt, say N. 325 326config STACK_TRACER 327 bool "Trace max stack" 328 depends on HAVE_FUNCTION_TRACER 329 select FUNCTION_TRACER 330 select STACKTRACE 331 select KALLSYMS 332 help 333 This special tracer records the maximum stack footprint of the 334 kernel and displays it in /sys/kernel/tracing/stack_trace. 335 336 This tracer works by hooking into every function call that the 337 kernel executes, and keeping a maximum stack depth value and 338 stack-trace saved. If this is configured with DYNAMIC_FTRACE 339 then it will not have any overhead while the stack tracer 340 is disabled. 341 342 To enable the stack tracer on bootup, pass in 'stacktrace' 343 on the kernel command line. 344 345 The stack tracer can also be enabled or disabled via the 346 sysctl kernel.stack_tracer_enabled 347 348 Say N if unsure. 349 350config TRACE_PREEMPT_TOGGLE 351 bool 352 help 353 Enables hooks which will be called when preemption is first disabled, 354 and last enabled. 355 356config IRQSOFF_TRACER 357 bool "Interrupts-off Latency Tracer" 358 default n 359 depends on TRACE_IRQFLAGS_SUPPORT 360 select TRACE_IRQFLAGS 361 select GENERIC_TRACER 362 select TRACER_MAX_TRACE 363 select RING_BUFFER_ALLOW_SWAP 364 select TRACER_SNAPSHOT 365 select TRACER_SNAPSHOT_PER_CPU_SWAP 366 help 367 This option measures the time spent in irqs-off critical 368 sections, with microsecond accuracy. 369 370 The default measurement method is a maximum search, which is 371 disabled by default and can be runtime (re-)started 372 via: 373 374 echo 0 > /sys/kernel/tracing/tracing_max_latency 375 376 (Note that kernel size and overhead increase with this option 377 enabled. This option and the preempt-off timing option can be 378 used together or separately.) 379 380config PREEMPT_TRACER 381 bool "Preemption-off Latency Tracer" 382 default n 383 depends on PREEMPTION 384 select GENERIC_TRACER 385 select TRACER_MAX_TRACE 386 select RING_BUFFER_ALLOW_SWAP 387 select TRACER_SNAPSHOT 388 select TRACER_SNAPSHOT_PER_CPU_SWAP 389 select TRACE_PREEMPT_TOGGLE 390 help 391 This option measures the time spent in preemption-off critical 392 sections, with microsecond accuracy. 393 394 The default measurement method is a maximum search, which is 395 disabled by default and can be runtime (re-)started 396 via: 397 398 echo 0 > /sys/kernel/tracing/tracing_max_latency 399 400 (Note that kernel size and overhead increase with this option 401 enabled. This option and the irqs-off timing option can be 402 used together or separately.) 403 404config SCHED_TRACER 405 bool "Scheduling Latency Tracer" 406 select GENERIC_TRACER 407 select CONTEXT_SWITCH_TRACER 408 select TRACER_MAX_TRACE 409 select TRACER_SNAPSHOT 410 help 411 This tracer tracks the latency of the highest priority task 412 to be scheduled in, starting from the point it has woken up. 413 414config HWLAT_TRACER 415 bool "Tracer to detect hardware latencies (like SMIs)" 416 select GENERIC_TRACER 417 select TRACER_MAX_TRACE 418 help 419 This tracer, when enabled will create one or more kernel threads, 420 depending on what the cpumask file is set to, which each thread 421 spinning in a loop looking for interruptions caused by 422 something other than the kernel. For example, if a 423 System Management Interrupt (SMI) takes a noticeable amount of 424 time, this tracer will detect it. This is useful for testing 425 if a system is reliable for Real Time tasks. 426 427 Some files are created in the tracing directory when this 428 is enabled: 429 430 hwlat_detector/width - time in usecs for how long to spin for 431 hwlat_detector/window - time in usecs between the start of each 432 iteration 433 434 A kernel thread is created that will spin with interrupts disabled 435 for "width" microseconds in every "window" cycle. It will not spin 436 for "window - width" microseconds, where the system can 437 continue to operate. 438 439 The output will appear in the trace and trace_pipe files. 440 441 When the tracer is not running, it has no affect on the system, 442 but when it is running, it can cause the system to be 443 periodically non responsive. Do not run this tracer on a 444 production system. 445 446 To enable this tracer, echo in "hwlat" into the current_tracer 447 file. Every time a latency is greater than tracing_thresh, it will 448 be recorded into the ring buffer. 449 450config OSNOISE_TRACER 451 bool "OS Noise tracer" 452 select GENERIC_TRACER 453 select TRACER_MAX_TRACE 454 help 455 In the context of high-performance computing (HPC), the Operating 456 System Noise (osnoise) refers to the interference experienced by an 457 application due to activities inside the operating system. In the 458 context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread 459 can cause noise to the system. Moreover, hardware-related jobs can 460 also cause noise, for example, via SMIs. 461 462 The osnoise tracer leverages the hwlat_detector by running a similar 463 loop with preemption, SoftIRQs and IRQs enabled, thus allowing all 464 the sources of osnoise during its execution. The osnoise tracer takes 465 note of the entry and exit point of any source of interferences, 466 increasing a per-cpu interference counter. It saves an interference 467 counter for each source of interference. The interference counter for 468 NMI, IRQs, SoftIRQs, and threads is increased anytime the tool 469 observes these interferences' entry events. When a noise happens 470 without any interference from the operating system level, the 471 hardware noise counter increases, pointing to a hardware-related 472 noise. In this way, osnoise can account for any source of 473 interference. At the end of the period, the osnoise tracer prints 474 the sum of all noise, the max single noise, the percentage of CPU 475 available for the thread, and the counters for the noise sources. 476 477 In addition to the tracer, a set of tracepoints were added to 478 facilitate the identification of the osnoise source. 479 480 The output will appear in the trace and trace_pipe files. 481 482 To enable this tracer, echo in "osnoise" into the current_tracer 483 file. 484 485config TIMERLAT_TRACER 486 bool "Timerlat tracer" 487 select OSNOISE_TRACER 488 select GENERIC_TRACER 489 help 490 The timerlat tracer aims to help the preemptive kernel developers 491 to find sources of wakeup latencies of real-time threads. 492 493 The tracer creates a per-cpu kernel thread with real-time priority. 494 The tracer thread sets a periodic timer to wakeup itself, and goes 495 to sleep waiting for the timer to fire. At the wakeup, the thread 496 then computes a wakeup latency value as the difference between 497 the current time and the absolute time that the timer was set 498 to expire. 499 500 The tracer prints two lines at every activation. The first is the 501 timer latency observed at the hardirq context before the 502 activation of the thread. The second is the timer latency observed 503 by the thread, which is the same level that cyclictest reports. The 504 ACTIVATION ID field serves to relate the irq execution to its 505 respective thread execution. 506 507 The tracer is build on top of osnoise tracer, and the osnoise: 508 events can be used to trace the source of interference from NMI, 509 IRQs and other threads. It also enables the capture of the 510 stacktrace at the IRQ context, which helps to identify the code 511 path that can cause thread delay. 512 513config MMIOTRACE 514 bool "Memory mapped IO tracing" 515 depends on HAVE_MMIOTRACE_SUPPORT && PCI 516 select GENERIC_TRACER 517 help 518 Mmiotrace traces Memory Mapped I/O access and is meant for 519 debugging and reverse engineering. It is called from the ioremap 520 implementation and works via page faults. Tracing is disabled by 521 default and can be enabled at run-time. 522 523 See Documentation/trace/mmiotrace.rst. 524 If you are not helping to develop drivers, say N. 525 526config ENABLE_DEFAULT_TRACERS 527 bool "Trace process context switches and events" 528 depends on !GENERIC_TRACER 529 select TRACING 530 help 531 This tracer hooks to various trace points in the kernel, 532 allowing the user to pick and choose which trace point they 533 want to trace. It also includes the sched_switch tracer plugin. 534 535config FTRACE_SYSCALLS 536 bool "Trace syscalls" 537 depends on HAVE_SYSCALL_TRACEPOINTS 538 select GENERIC_TRACER 539 select KALLSYMS 540 help 541 Basic tracer to catch the syscall entry and exit events. 542 543config TRACER_SNAPSHOT 544 bool "Create a snapshot trace buffer" 545 select TRACER_MAX_TRACE 546 help 547 Allow tracing users to take snapshot of the current buffer using the 548 ftrace interface, e.g.: 549 550 echo 1 > /sys/kernel/tracing/snapshot 551 cat snapshot 552 553config TRACER_SNAPSHOT_PER_CPU_SWAP 554 bool "Allow snapshot to swap per CPU" 555 depends on TRACER_SNAPSHOT 556 select RING_BUFFER_ALLOW_SWAP 557 help 558 Allow doing a snapshot of a single CPU buffer instead of a 559 full swap (all buffers). If this is set, then the following is 560 allowed: 561 562 echo 1 > /sys/kernel/tracing/per_cpu/cpu2/snapshot 563 564 After which, only the tracing buffer for CPU 2 was swapped with 565 the main tracing buffer, and the other CPU buffers remain the same. 566 567 When this is enabled, this adds a little more overhead to the 568 trace recording, as it needs to add some checks to synchronize 569 recording with swaps. But this does not affect the performance 570 of the overall system. This is enabled by default when the preempt 571 or irq latency tracers are enabled, as those need to swap as well 572 and already adds the overhead (plus a lot more). 573 574config TRACE_BRANCH_PROFILING 575 bool 576 select GENERIC_TRACER 577 578choice 579 prompt "Branch Profiling" 580 default BRANCH_PROFILE_NONE 581 help 582 The branch profiling is a software profiler. It will add hooks 583 into the C conditionals to test which path a branch takes. 584 585 The likely/unlikely profiler only looks at the conditions that 586 are annotated with a likely or unlikely macro. 587 588 The "all branch" profiler will profile every if-statement in the 589 kernel. This profiler will also enable the likely/unlikely 590 profiler. 591 592 Either of the above profilers adds a bit of overhead to the system. 593 If unsure, choose "No branch profiling". 594 595config BRANCH_PROFILE_NONE 596 bool "No branch profiling" 597 help 598 No branch profiling. Branch profiling adds a bit of overhead. 599 Only enable it if you want to analyse the branching behavior. 600 Otherwise keep it disabled. 601 602config PROFILE_ANNOTATED_BRANCHES 603 bool "Trace likely/unlikely profiler" 604 select TRACE_BRANCH_PROFILING 605 help 606 This tracer profiles all likely and unlikely macros 607 in the kernel. It will display the results in: 608 609 /sys/kernel/tracing/trace_stat/branch_annotated 610 611 Note: this will add a significant overhead; only turn this 612 on if you need to profile the system's use of these macros. 613 614config PROFILE_ALL_BRANCHES 615 bool "Profile all if conditionals" if !FORTIFY_SOURCE 616 select TRACE_BRANCH_PROFILING 617 help 618 This tracer profiles all branch conditions. Every if () 619 taken in the kernel is recorded whether it hit or miss. 620 The results will be displayed in: 621 622 /sys/kernel/tracing/trace_stat/branch_all 623 624 This option also enables the likely/unlikely profiler. 625 626 This configuration, when enabled, will impose a great overhead 627 on the system. This should only be enabled when the system 628 is to be analyzed in much detail. 629endchoice 630 631config TRACING_BRANCHES 632 bool 633 help 634 Selected by tracers that will trace the likely and unlikely 635 conditions. This prevents the tracers themselves from being 636 profiled. Profiling the tracing infrastructure can only happen 637 when the likelys and unlikelys are not being traced. 638 639config BRANCH_TRACER 640 bool "Trace likely/unlikely instances" 641 depends on TRACE_BRANCH_PROFILING 642 select TRACING_BRANCHES 643 help 644 This traces the events of likely and unlikely condition 645 calls in the kernel. The difference between this and the 646 "Trace likely/unlikely profiler" is that this is not a 647 histogram of the callers, but actually places the calling 648 events into a running trace buffer to see when and where the 649 events happened, as well as their results. 650 651 Say N if unsure. 652 653config BLK_DEV_IO_TRACE 654 bool "Support for tracing block IO actions" 655 depends on SYSFS 656 depends on BLOCK 657 select RELAY 658 select DEBUG_FS 659 select TRACEPOINTS 660 select GENERIC_TRACER 661 select STACKTRACE 662 help 663 Say Y here if you want to be able to trace the block layer actions 664 on a given queue. Tracing allows you to see any traffic happening 665 on a block device queue. For more information (and the userspace 666 support tools needed), fetch the blktrace tools from: 667 668 git://git.kernel.dk/blktrace.git 669 670 Tracing also is possible using the ftrace interface, e.g.: 671 672 echo 1 > /sys/block/sda/sda1/trace/enable 673 echo blk > /sys/kernel/tracing/current_tracer 674 cat /sys/kernel/tracing/trace_pipe 675 676 If unsure, say N. 677 678config FPROBE_EVENTS 679 depends on FPROBE 680 depends on HAVE_REGS_AND_STACK_ACCESS_API 681 bool "Enable fprobe-based dynamic events" 682 select TRACING 683 select PROBE_EVENTS 684 select DYNAMIC_EVENTS 685 default y 686 help 687 This allows user to add tracing events on the function entry and 688 exit via ftrace interface. The syntax is same as the kprobe events 689 and the kprobe events on function entry and exit will be 690 transparently converted to this fprobe events. 691 692config PROBE_EVENTS_BTF_ARGS 693 depends on HAVE_FUNCTION_ARG_ACCESS_API 694 depends on FPROBE_EVENTS || KPROBE_EVENTS 695 depends on DEBUG_INFO_BTF && BPF_SYSCALL 696 bool "Support BTF function arguments for probe events" 697 default y 698 help 699 The user can specify the arguments of the probe event using the names 700 of the arguments of the probed function, when the probe location is a 701 kernel function entry or a tracepoint. 702 This is available only if BTF (BPF Type Format) support is enabled. 703 704config KPROBE_EVENTS 705 depends on KPROBES 706 depends on HAVE_REGS_AND_STACK_ACCESS_API 707 bool "Enable kprobes-based dynamic events" 708 select TRACING 709 select PROBE_EVENTS 710 select DYNAMIC_EVENTS 711 default y 712 help 713 This allows the user to add tracing events (similar to tracepoints) 714 on the fly via the ftrace interface. See 715 Documentation/trace/kprobetrace.rst for more details. 716 717 Those events can be inserted wherever kprobes can probe, and record 718 various register and memory values. 719 720 This option is also required by perf-probe subcommand of perf tools. 721 If you want to use perf tools, this option is strongly recommended. 722 723config KPROBE_EVENTS_ON_NOTRACE 724 bool "Do NOT protect notrace function from kprobe events" 725 depends on KPROBE_EVENTS 726 depends on DYNAMIC_FTRACE 727 default n 728 help 729 This is only for the developers who want to debug ftrace itself 730 using kprobe events. 731 732 If kprobes can use ftrace instead of breakpoint, ftrace related 733 functions are protected from kprobe-events to prevent an infinite 734 recursion or any unexpected execution path which leads to a kernel 735 crash. 736 737 This option disables such protection and allows you to put kprobe 738 events on ftrace functions for debugging ftrace by itself. 739 Note that this might let you shoot yourself in the foot. 740 741 If unsure, say N. 742 743config UPROBE_EVENTS 744 bool "Enable uprobes-based dynamic events" 745 depends on ARCH_SUPPORTS_UPROBES 746 depends on MMU 747 depends on PERF_EVENTS 748 select UPROBES 749 select PROBE_EVENTS 750 select DYNAMIC_EVENTS 751 select TRACING 752 default y 753 help 754 This allows the user to add tracing events on top of userspace 755 dynamic events (similar to tracepoints) on the fly via the trace 756 events interface. Those events can be inserted wherever uprobes 757 can probe, and record various registers. 758 This option is required if you plan to use perf-probe subcommand 759 of perf tools on user space applications. 760 761config BPF_EVENTS 762 depends on BPF_SYSCALL 763 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 764 bool 765 default y 766 help 767 This allows the user to attach BPF programs to kprobe, uprobe, and 768 tracepoint events. 769 770config DYNAMIC_EVENTS 771 def_bool n 772 773config PROBE_EVENTS 774 def_bool n 775 776config BPF_KPROBE_OVERRIDE 777 bool "Enable BPF programs to override a kprobed function" 778 depends on BPF_EVENTS 779 depends on FUNCTION_ERROR_INJECTION 780 default n 781 help 782 Allows BPF to override the execution of a probed function and 783 set a different return value. This is used for error injection. 784 785config FTRACE_MCOUNT_RECORD 786 def_bool y 787 depends on DYNAMIC_FTRACE 788 depends on HAVE_FTRACE_MCOUNT_RECORD 789 790config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 791 bool 792 depends on FTRACE_MCOUNT_RECORD 793 794config FTRACE_MCOUNT_USE_CC 795 def_bool y 796 depends on $(cc-option,-mrecord-mcount) 797 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 798 depends on FTRACE_MCOUNT_RECORD 799 800config FTRACE_MCOUNT_USE_OBJTOOL 801 def_bool y 802 depends on HAVE_OBJTOOL_MCOUNT 803 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 804 depends on !FTRACE_MCOUNT_USE_CC 805 depends on FTRACE_MCOUNT_RECORD 806 select OBJTOOL 807 808config FTRACE_MCOUNT_USE_RECORDMCOUNT 809 def_bool y 810 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 811 depends on !FTRACE_MCOUNT_USE_CC 812 depends on !FTRACE_MCOUNT_USE_OBJTOOL 813 depends on FTRACE_MCOUNT_RECORD 814 815config TRACING_MAP 816 bool 817 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 818 help 819 tracing_map is a special-purpose lock-free map for tracing, 820 separated out as a stand-alone facility in order to allow it 821 to be shared between multiple tracers. It isn't meant to be 822 generally used outside of that context, and is normally 823 selected by tracers that use it. 824 825config SYNTH_EVENTS 826 bool "Synthetic trace events" 827 select TRACING 828 select DYNAMIC_EVENTS 829 default n 830 help 831 Synthetic events are user-defined trace events that can be 832 used to combine data from other trace events or in fact any 833 data source. Synthetic events can be generated indirectly 834 via the trace() action of histogram triggers or directly 835 by way of an in-kernel API. 836 837 See Documentation/trace/events.rst or 838 Documentation/trace/histogram.rst for details and examples. 839 840 If in doubt, say N. 841 842config USER_EVENTS 843 bool "User trace events" 844 select TRACING 845 select DYNAMIC_EVENTS 846 help 847 User trace events are user-defined trace events that 848 can be used like an existing kernel trace event. User trace 849 events are generated by writing to a tracefs file. User 850 processes can determine if their tracing events should be 851 generated by registering a value and bit with the kernel 852 that reflects when it is enabled or not. 853 854 See Documentation/trace/user_events.rst. 855 If in doubt, say N. 856 857config HIST_TRIGGERS 858 bool "Histogram triggers" 859 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 860 select TRACING_MAP 861 select TRACING 862 select DYNAMIC_EVENTS 863 select SYNTH_EVENTS 864 default n 865 help 866 Hist triggers allow one or more arbitrary trace event fields 867 to be aggregated into hash tables and dumped to stdout by 868 reading a debugfs/tracefs file. They're useful for 869 gathering quick and dirty (though precise) summaries of 870 event activity as an initial guide for further investigation 871 using more advanced tools. 872 873 Inter-event tracing of quantities such as latencies is also 874 supported using hist triggers under this option. 875 876 See Documentation/trace/histogram.rst. 877 If in doubt, say N. 878 879config TRACE_EVENT_INJECT 880 bool "Trace event injection" 881 depends on TRACING 882 help 883 Allow user-space to inject a specific trace event into the ring 884 buffer. This is mainly used for testing purpose. 885 886 If unsure, say N. 887 888config TRACEPOINT_BENCHMARK 889 bool "Add tracepoint that benchmarks tracepoints" 890 help 891 This option creates the tracepoint "benchmark:benchmark_event". 892 When the tracepoint is enabled, it kicks off a kernel thread that 893 goes into an infinite loop (calling cond_resched() to let other tasks 894 run), and calls the tracepoint. Each iteration will record the time 895 it took to write to the tracepoint and the next iteration that 896 data will be passed to the tracepoint itself. That is, the tracepoint 897 will report the time it took to do the previous tracepoint. 898 The string written to the tracepoint is a static string of 128 bytes 899 to keep the time the same. The initial string is simply a write of 900 "START". The second string records the cold cache time of the first 901 write which is not added to the rest of the calculations. 902 903 As it is a tight loop, it benchmarks as hot cache. That's fine because 904 we care most about hot paths that are probably in cache already. 905 906 An example of the output: 907 908 START 909 first=3672 [COLD CACHED] 910 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 911 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 912 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 913 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 914 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 915 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 916 917 918config RING_BUFFER_BENCHMARK 919 tristate "Ring buffer benchmark stress tester" 920 depends on RING_BUFFER 921 help 922 This option creates a test to stress the ring buffer and benchmark it. 923 It creates its own ring buffer such that it will not interfere with 924 any other users of the ring buffer (such as ftrace). It then creates 925 a producer and consumer that will run for 10 seconds and sleep for 926 10 seconds. Each interval it will print out the number of events 927 it recorded and give a rough estimate of how long each iteration took. 928 929 It does not disable interrupts or raise its priority, so it may be 930 affected by processes that are running. 931 932 If unsure, say N. 933 934config TRACE_EVAL_MAP_FILE 935 bool "Show eval mappings for trace events" 936 depends on TRACING 937 help 938 The "print fmt" of the trace events will show the enum/sizeof names 939 instead of their values. This can cause problems for user space tools 940 that use this string to parse the raw data as user space does not know 941 how to convert the string to its value. 942 943 To fix this, there's a special macro in the kernel that can be used 944 to convert an enum/sizeof into its value. If this macro is used, then 945 the print fmt strings will be converted to their values. 946 947 If something does not get converted properly, this option can be 948 used to show what enums/sizeof the kernel tried to convert. 949 950 This option is for debugging the conversions. A file is created 951 in the tracing directory called "eval_map" that will show the 952 names matched with their values and what trace event system they 953 belong too. 954 955 Normally, the mapping of the strings to values will be freed after 956 boot up or module load. With this option, they will not be freed, as 957 they are needed for the "eval_map" file. Enabling this option will 958 increase the memory footprint of the running kernel. 959 960 If unsure, say N. 961 962config FTRACE_RECORD_RECURSION 963 bool "Record functions that recurse in function tracing" 964 depends on FUNCTION_TRACER 965 help 966 All callbacks that attach to the function tracing have some sort 967 of protection against recursion. Even though the protection exists, 968 it adds overhead. This option will create a file in the tracefs 969 file system called "recursed_functions" that will list the functions 970 that triggered a recursion. 971 972 This will add more overhead to cases that have recursion. 973 974 If unsure, say N 975 976config FTRACE_RECORD_RECURSION_SIZE 977 int "Max number of recursed functions to record" 978 default 128 979 depends on FTRACE_RECORD_RECURSION 980 help 981 This defines the limit of number of functions that can be 982 listed in the "recursed_functions" file, that lists all 983 the functions that caused a recursion to happen. 984 This file can be reset, but the limit can not change in 985 size at runtime. 986 987config FTRACE_VALIDATE_RCU_IS_WATCHING 988 bool "Validate RCU is on during ftrace execution" 989 depends on FUNCTION_TRACER 990 depends on ARCH_WANTS_NO_INSTR 991 help 992 All callbacks that attach to the function tracing have some sort of 993 protection against recursion. This option is only to verify that 994 ftrace (and other users of ftrace_test_recursion_trylock()) are not 995 called outside of RCU, as if they are, it can cause a race. But it 996 also has a noticeable overhead when enabled. 997 998 If unsure, say N 999 1000config RING_BUFFER_RECORD_RECURSION 1001 bool "Record functions that recurse in the ring buffer" 1002 depends on FTRACE_RECORD_RECURSION 1003 # default y, because it is coupled with FTRACE_RECORD_RECURSION 1004 default y 1005 help 1006 The ring buffer has its own internal recursion. Although when 1007 recursion happens it won't cause harm because of the protection, 1008 but it does cause unwanted overhead. Enabling this option will 1009 place where recursion was detected into the ftrace "recursed_functions" 1010 file. 1011 1012 This will add more overhead to cases that have recursion. 1013 1014config GCOV_PROFILE_FTRACE 1015 bool "Enable GCOV profiling on ftrace subsystem" 1016 depends on GCOV_KERNEL 1017 help 1018 Enable GCOV profiling on ftrace subsystem for checking 1019 which functions/lines are tested. 1020 1021 If unsure, say N. 1022 1023 Note that on a kernel compiled with this config, ftrace will 1024 run significantly slower. 1025 1026config FTRACE_SELFTEST 1027 bool 1028 1029config FTRACE_STARTUP_TEST 1030 bool "Perform a startup test on ftrace" 1031 depends on GENERIC_TRACER 1032 select FTRACE_SELFTEST 1033 help 1034 This option performs a series of startup tests on ftrace. On bootup 1035 a series of tests are made to verify that the tracer is 1036 functioning properly. It will do tests on all the configured 1037 tracers of ftrace. 1038 1039config EVENT_TRACE_STARTUP_TEST 1040 bool "Run selftest on trace events" 1041 depends on FTRACE_STARTUP_TEST 1042 default y 1043 help 1044 This option performs a test on all trace events in the system. 1045 It basically just enables each event and runs some code that 1046 will trigger events (not necessarily the event it enables) 1047 This may take some time run as there are a lot of events. 1048 1049config EVENT_TRACE_TEST_SYSCALLS 1050 bool "Run selftest on syscall events" 1051 depends on EVENT_TRACE_STARTUP_TEST 1052 help 1053 This option will also enable testing every syscall event. 1054 It only enables the event and disables it and runs various loads 1055 with the event enabled. This adds a bit more time for kernel boot 1056 up since it runs this on every system call defined. 1057 1058 TBD - enable a way to actually call the syscalls as we test their 1059 events 1060 1061config FTRACE_SORT_STARTUP_TEST 1062 bool "Verify compile time sorting of ftrace functions" 1063 depends on DYNAMIC_FTRACE 1064 depends on BUILDTIME_MCOUNT_SORT 1065 help 1066 Sorting of the mcount_loc sections that is used to find the 1067 where the ftrace knows where to patch functions for tracing 1068 and other callbacks is done at compile time. But if the sort 1069 is not done correctly, it will cause non-deterministic failures. 1070 When this is set, the sorted sections will be verified that they 1071 are in deed sorted and will warn if they are not. 1072 1073 If unsure, say N 1074 1075config RING_BUFFER_STARTUP_TEST 1076 bool "Ring buffer startup self test" 1077 depends on RING_BUFFER 1078 help 1079 Run a simple self test on the ring buffer on boot up. Late in the 1080 kernel boot sequence, the test will start that kicks off 1081 a thread per cpu. Each thread will write various size events 1082 into the ring buffer. Another thread is created to send IPIs 1083 to each of the threads, where the IPI handler will also write 1084 to the ring buffer, to test/stress the nesting ability. 1085 If any anomalies are discovered, a warning will be displayed 1086 and all ring buffers will be disabled. 1087 1088 The test runs for 10 seconds. This will slow your boot time 1089 by at least 10 more seconds. 1090 1091 At the end of the test, statistics and more checks are done. 1092 It will output the stats of each per cpu buffer: What 1093 was written, the sizes, what was read, what was lost, and 1094 other similar details. 1095 1096 If unsure, say N 1097 1098config RING_BUFFER_VALIDATE_TIME_DELTAS 1099 bool "Verify ring buffer time stamp deltas" 1100 depends on RING_BUFFER 1101 help 1102 This will audit the time stamps on the ring buffer sub 1103 buffer to make sure that all the time deltas for the 1104 events on a sub buffer matches the current time stamp. 1105 This audit is performed for every event that is not 1106 interrupted, or interrupting another event. A check 1107 is also made when traversing sub buffers to make sure 1108 that all the deltas on the previous sub buffer do not 1109 add up to be greater than the current time stamp. 1110 1111 NOTE: This adds significant overhead to recording of events, 1112 and should only be used to test the logic of the ring buffer. 1113 Do not use it on production systems. 1114 1115 Only say Y if you understand what this does, and you 1116 still want it enabled. Otherwise say N 1117 1118config MMIOTRACE_TEST 1119 tristate "Test module for mmiotrace" 1120 depends on MMIOTRACE && m 1121 help 1122 This is a dumb module for testing mmiotrace. It is very dangerous 1123 as it will write garbage to IO memory starting at a given address. 1124 However, it should be safe to use on e.g. unused portion of VRAM. 1125 1126 Say N, unless you absolutely know what you are doing. 1127 1128config PREEMPTIRQ_DELAY_TEST 1129 tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers" 1130 depends on m 1131 help 1132 Select this option to build a test module that can help test latency 1133 tracers by executing a preempt or irq disable section with a user 1134 configurable delay. The module busy waits for the duration of the 1135 critical section. 1136 1137 For example, the following invocation generates a burst of three 1138 irq-disabled critical sections for 500us: 1139 modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3 1140 1141 What's more, if you want to attach the test on the cpu which the latency 1142 tracer is running on, specify cpu_affinity=cpu_num at the end of the 1143 command. 1144 1145 If unsure, say N 1146 1147config SYNTH_EVENT_GEN_TEST 1148 tristate "Test module for in-kernel synthetic event generation" 1149 depends on SYNTH_EVENTS && m 1150 help 1151 This option creates a test module to check the base 1152 functionality of in-kernel synthetic event definition and 1153 generation. 1154 1155 To test, insert the module, and then check the trace buffer 1156 for the generated sample events. 1157 1158 If unsure, say N. 1159 1160config KPROBE_EVENT_GEN_TEST 1161 tristate "Test module for in-kernel kprobe event generation" 1162 depends on KPROBE_EVENTS && m 1163 help 1164 This option creates a test module to check the base 1165 functionality of in-kernel kprobe event definition. 1166 1167 To test, insert the module, and then check the trace buffer 1168 for the generated kprobe events. 1169 1170 If unsure, say N. 1171 1172config HIST_TRIGGERS_DEBUG 1173 bool "Hist trigger debug support" 1174 depends on HIST_TRIGGERS 1175 help 1176 Add "hist_debug" file for each event, which when read will 1177 dump out a bunch of internal details about the hist triggers 1178 defined on that event. 1179 1180 The hist_debug file serves a couple of purposes: 1181 1182 - Helps developers verify that nothing is broken. 1183 1184 - Provides educational information to support the details 1185 of the hist trigger internals as described by 1186 Documentation/trace/histogram-design.rst. 1187 1188 The hist_debug output only covers the data structures 1189 related to the histogram definitions themselves and doesn't 1190 display the internals of map buckets or variable values of 1191 running histograms. 1192 1193 If unsure, say N. 1194 1195source "kernel/trace/rv/Kconfig" 1196 1197endif # FTRACE 1198