1# SPDX-License-Identifier: GPL-2.0-only 2# 3# Architectures that offer an FUNCTION_TRACER implementation should 4# select HAVE_FUNCTION_TRACER: 5# 6 7config USER_STACKTRACE_SUPPORT 8 bool 9 10config NOP_TRACER 11 bool 12 13config HAVE_RETHOOK 14 bool 15 16config RETHOOK 17 bool 18 depends on HAVE_RETHOOK 19 help 20 Enable generic return hooking feature. This is an internal 21 API, which will be used by other function-entry hooking 22 features like fprobe and kprobes. 23 24config HAVE_FUNCTION_TRACER 25 bool 26 help 27 See Documentation/trace/ftrace-design.rst 28 29config HAVE_FUNCTION_GRAPH_TRACER 30 bool 31 help 32 See Documentation/trace/ftrace-design.rst 33 34config HAVE_FUNCTION_GRAPH_FREGS 35 bool 36 37config HAVE_FTRACE_GRAPH_FUNC 38 bool 39 help 40 True if ftrace_graph_func() is defined. 41 42config HAVE_DYNAMIC_FTRACE 43 bool 44 help 45 See Documentation/trace/ftrace-design.rst 46 47config HAVE_DYNAMIC_FTRACE_WITH_REGS 48 bool 49 50config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 51 bool 52 53config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 54 bool 55 56config HAVE_DYNAMIC_FTRACE_WITH_ARGS 57 bool 58 help 59 If this is set, then arguments and stack can be found from 60 the ftrace_regs passed into the function callback regs parameter 61 by default, even without setting the REGS flag in the ftrace_ops. 62 This allows for use of ftrace_regs_get_argument() and 63 ftrace_regs_get_stack_pointer(). 64 65config HAVE_FTRACE_REGS_HAVING_PT_REGS 66 bool 67 help 68 If this is set, ftrace_regs has pt_regs, thus it can convert to 69 pt_regs without allocating memory. 70 71config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE 72 bool 73 help 74 If the architecture generates __patchable_function_entries sections 75 but does not want them included in the ftrace locations. 76 77config HAVE_FTRACE_MCOUNT_RECORD 78 bool 79 help 80 See Documentation/trace/ftrace-design.rst 81 82config HAVE_SYSCALL_TRACEPOINTS 83 bool 84 help 85 See Documentation/trace/ftrace-design.rst 86 87config HAVE_FENTRY 88 bool 89 help 90 Arch supports the gcc options -pg with -mfentry 91 92config HAVE_NOP_MCOUNT 93 bool 94 help 95 Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount 96 97config HAVE_OBJTOOL_MCOUNT 98 bool 99 help 100 Arch supports objtool --mcount 101 102config HAVE_OBJTOOL_NOP_MCOUNT 103 bool 104 help 105 Arch supports the objtool options --mcount with --mnop. 106 An architecture can select this if it wants to enable nop'ing 107 of ftrace locations. 108 109config HAVE_C_RECORDMCOUNT 110 bool 111 help 112 C version of recordmcount available? 113 114config HAVE_BUILDTIME_MCOUNT_SORT 115 bool 116 help 117 An architecture selects this if it sorts the mcount_loc section 118 at build time. 119 120config BUILDTIME_MCOUNT_SORT 121 bool 122 default y 123 depends on HAVE_BUILDTIME_MCOUNT_SORT && DYNAMIC_FTRACE 124 help 125 Sort the mcount_loc section at build time. 126 127config TRACER_MAX_TRACE 128 bool 129 130config TRACE_CLOCK 131 bool 132 133config RING_BUFFER 134 bool 135 select TRACE_CLOCK 136 select IRQ_WORK 137 138config EVENT_TRACING 139 select CONTEXT_SWITCH_TRACER 140 select GLOB 141 bool 142 143config CONTEXT_SWITCH_TRACER 144 bool 145 146config RING_BUFFER_ALLOW_SWAP 147 bool 148 help 149 Allow the use of ring_buffer_swap_cpu. 150 Adds a very slight overhead to tracing when enabled. 151 152config PREEMPTIRQ_TRACEPOINTS 153 bool 154 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 155 select TRACING 156 default y 157 help 158 Create preempt/irq toggle tracepoints if needed, so that other parts 159 of the kernel can use them to generate or add hooks to them. 160 161# All tracer options should select GENERIC_TRACER. For those options that are 162# enabled by all tracers (context switch and event tracer) they select TRACING. 163# This allows those options to appear when no other tracer is selected. But the 164# options do not appear when something else selects it. We need the two options 165# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 166# hiding of the automatic options. 167 168config TRACING 169 bool 170 select RING_BUFFER 171 select STACKTRACE if STACKTRACE_SUPPORT 172 select TRACEPOINTS 173 select NOP_TRACER 174 select BINARY_PRINTF 175 select EVENT_TRACING 176 select TRACE_CLOCK 177 select NEED_TASKS_RCU 178 179config GENERIC_TRACER 180 bool 181 select TRACING 182 183# 184# Minimum requirements an architecture has to meet for us to 185# be able to offer generic tracing facilities: 186# 187config TRACING_SUPPORT 188 bool 189 depends on TRACE_IRQFLAGS_SUPPORT 190 depends on STACKTRACE_SUPPORT 191 default y 192 193menuconfig FTRACE 194 bool "Tracers" 195 depends on TRACING_SUPPORT 196 default y if DEBUG_KERNEL 197 help 198 Enable the kernel tracing infrastructure. 199 200if FTRACE 201 202config BOOTTIME_TRACING 203 bool "Boot-time Tracing support" 204 depends on TRACING 205 select BOOT_CONFIG 206 help 207 Enable developer to setup ftrace subsystem via supplemental 208 kernel cmdline at boot time for debugging (tracing) driver 209 initialization and boot process. 210 211config FUNCTION_TRACER 212 bool "Kernel Function Tracer" 213 depends on HAVE_FUNCTION_TRACER 214 select KALLSYMS 215 select GENERIC_TRACER 216 select CONTEXT_SWITCH_TRACER 217 select GLOB 218 select NEED_TASKS_RCU 219 select TASKS_RUDE_RCU 220 help 221 Enable the kernel to trace every kernel function. This is done 222 by using a compiler feature to insert a small, 5-byte No-Operation 223 instruction at the beginning of every kernel function, which NOP 224 sequence is then dynamically patched into a tracer call when 225 tracing is enabled by the administrator. If it's runtime disabled 226 (the bootup default), then the overhead of the instructions is very 227 small and not measurable even in micro-benchmarks (at least on 228 x86, but may have impact on other architectures). 229 230config FUNCTION_GRAPH_TRACER 231 bool "Kernel Function Graph Tracer" 232 depends on HAVE_FUNCTION_GRAPH_TRACER 233 depends on FUNCTION_TRACER 234 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 235 default y 236 help 237 Enable the kernel to trace a function at both its return 238 and its entry. 239 Its first purpose is to trace the duration of functions and 240 draw a call graph for each thread with some information like 241 the return value. This is done by setting the current return 242 address on the current task structure into a stack of calls. 243 244config FUNCTION_GRAPH_RETVAL 245 bool "Kernel Function Graph Return Value" 246 depends on HAVE_FUNCTION_GRAPH_FREGS 247 depends on FUNCTION_GRAPH_TRACER 248 default n 249 help 250 Support recording and printing the function return value when 251 using function graph tracer. It can be helpful to locate functions 252 that return errors. This feature is off by default, and you can 253 enable it via the trace option funcgraph-retval. 254 See Documentation/trace/ftrace.rst 255 256config FUNCTION_GRAPH_RETADDR 257 bool "Kernel Function Graph Return Address" 258 depends on FUNCTION_GRAPH_TRACER 259 default n 260 help 261 Support recording and printing the function return address when 262 using function graph tracer. It can be helpful to locate code line that 263 the function is called. This feature is off by default, and you can 264 enable it via the trace option funcgraph-retaddr. 265 266config DYNAMIC_FTRACE 267 bool "enable/disable function tracing dynamically" 268 depends on FUNCTION_TRACER 269 depends on HAVE_DYNAMIC_FTRACE 270 default y 271 help 272 This option will modify all the calls to function tracing 273 dynamically (will patch them out of the binary image and 274 replace them with a No-Op instruction) on boot up. During 275 compile time, a table is made of all the locations that ftrace 276 can function trace, and this table is linked into the kernel 277 image. When this is enabled, functions can be individually 278 enabled, and the functions not enabled will not affect 279 performance of the system. 280 281 See the files in /sys/kernel/tracing: 282 available_filter_functions 283 set_ftrace_filter 284 set_ftrace_notrace 285 286 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 287 otherwise has native performance as long as no tracing is active. 288 289config DYNAMIC_FTRACE_WITH_REGS 290 def_bool y 291 depends on DYNAMIC_FTRACE 292 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 293 294config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 295 def_bool y 296 depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS 297 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 298 299config DYNAMIC_FTRACE_WITH_CALL_OPS 300 def_bool y 301 depends on HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 302 303config DYNAMIC_FTRACE_WITH_ARGS 304 def_bool y 305 depends on DYNAMIC_FTRACE 306 depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS 307 308config FPROBE 309 bool "Kernel Function Probe (fprobe)" 310 depends on HAVE_FUNCTION_GRAPH_FREGS && HAVE_FTRACE_GRAPH_FUNC 311 depends on DYNAMIC_FTRACE_WITH_ARGS 312 select FUNCTION_GRAPH_TRACER 313 default n 314 help 315 This option enables kernel function probe (fprobe) based on ftrace. 316 The fprobe is similar to kprobes, but probes only for kernel function 317 entries and exits. This also can probe multiple functions by one 318 fprobe. 319 320 If unsure, say N. 321 322config FUNCTION_PROFILER 323 bool "Kernel function profiler" 324 depends on FUNCTION_TRACER 325 default n 326 help 327 This option enables the kernel function profiler. A file is created 328 in debugfs called function_profile_enabled which defaults to zero. 329 When a 1 is echoed into this file profiling begins, and when a 330 zero is entered, profiling stops. A "functions" file is created in 331 the trace_stat directory; this file shows the list of functions that 332 have been hit and their counters. 333 334 If in doubt, say N. 335 336config STACK_TRACER 337 bool "Trace max stack" 338 depends on HAVE_FUNCTION_TRACER 339 select FUNCTION_TRACER 340 select STACKTRACE 341 select KALLSYMS 342 help 343 This special tracer records the maximum stack footprint of the 344 kernel and displays it in /sys/kernel/tracing/stack_trace. 345 346 This tracer works by hooking into every function call that the 347 kernel executes, and keeping a maximum stack depth value and 348 stack-trace saved. If this is configured with DYNAMIC_FTRACE 349 then it will not have any overhead while the stack tracer 350 is disabled. 351 352 To enable the stack tracer on bootup, pass in 'stacktrace' 353 on the kernel command line. 354 355 The stack tracer can also be enabled or disabled via the 356 sysctl kernel.stack_tracer_enabled 357 358 Say N if unsure. 359 360config TRACE_PREEMPT_TOGGLE 361 bool 362 help 363 Enables hooks which will be called when preemption is first disabled, 364 and last enabled. 365 366config IRQSOFF_TRACER 367 bool "Interrupts-off Latency Tracer" 368 default n 369 depends on TRACE_IRQFLAGS_SUPPORT 370 select TRACE_IRQFLAGS 371 select GENERIC_TRACER 372 select TRACER_MAX_TRACE 373 select RING_BUFFER_ALLOW_SWAP 374 select TRACER_SNAPSHOT 375 select TRACER_SNAPSHOT_PER_CPU_SWAP 376 help 377 This option measures the time spent in irqs-off critical 378 sections, with microsecond accuracy. 379 380 The default measurement method is a maximum search, which is 381 disabled by default and can be runtime (re-)started 382 via: 383 384 echo 0 > /sys/kernel/tracing/tracing_max_latency 385 386 (Note that kernel size and overhead increase with this option 387 enabled. This option and the preempt-off timing option can be 388 used together or separately.) 389 390config PREEMPT_TRACER 391 bool "Preemption-off Latency Tracer" 392 default n 393 depends on PREEMPTION 394 select GENERIC_TRACER 395 select TRACER_MAX_TRACE 396 select RING_BUFFER_ALLOW_SWAP 397 select TRACER_SNAPSHOT 398 select TRACER_SNAPSHOT_PER_CPU_SWAP 399 select TRACE_PREEMPT_TOGGLE 400 help 401 This option measures the time spent in preemption-off critical 402 sections, with microsecond accuracy. 403 404 The default measurement method is a maximum search, which is 405 disabled by default and can be runtime (re-)started 406 via: 407 408 echo 0 > /sys/kernel/tracing/tracing_max_latency 409 410 (Note that kernel size and overhead increase with this option 411 enabled. This option and the irqs-off timing option can be 412 used together or separately.) 413 414config SCHED_TRACER 415 bool "Scheduling Latency Tracer" 416 select GENERIC_TRACER 417 select CONTEXT_SWITCH_TRACER 418 select TRACER_MAX_TRACE 419 select TRACER_SNAPSHOT 420 help 421 This tracer tracks the latency of the highest priority task 422 to be scheduled in, starting from the point it has woken up. 423 424config HWLAT_TRACER 425 bool "Tracer to detect hardware latencies (like SMIs)" 426 select GENERIC_TRACER 427 select TRACER_MAX_TRACE 428 help 429 This tracer, when enabled will create one or more kernel threads, 430 depending on what the cpumask file is set to, which each thread 431 spinning in a loop looking for interruptions caused by 432 something other than the kernel. For example, if a 433 System Management Interrupt (SMI) takes a noticeable amount of 434 time, this tracer will detect it. This is useful for testing 435 if a system is reliable for Real Time tasks. 436 437 Some files are created in the tracing directory when this 438 is enabled: 439 440 hwlat_detector/width - time in usecs for how long to spin for 441 hwlat_detector/window - time in usecs between the start of each 442 iteration 443 444 A kernel thread is created that will spin with interrupts disabled 445 for "width" microseconds in every "window" cycle. It will not spin 446 for "window - width" microseconds, where the system can 447 continue to operate. 448 449 The output will appear in the trace and trace_pipe files. 450 451 When the tracer is not running, it has no affect on the system, 452 but when it is running, it can cause the system to be 453 periodically non responsive. Do not run this tracer on a 454 production system. 455 456 To enable this tracer, echo in "hwlat" into the current_tracer 457 file. Every time a latency is greater than tracing_thresh, it will 458 be recorded into the ring buffer. 459 460config OSNOISE_TRACER 461 bool "OS Noise tracer" 462 select GENERIC_TRACER 463 select TRACER_MAX_TRACE 464 help 465 In the context of high-performance computing (HPC), the Operating 466 System Noise (osnoise) refers to the interference experienced by an 467 application due to activities inside the operating system. In the 468 context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread 469 can cause noise to the system. Moreover, hardware-related jobs can 470 also cause noise, for example, via SMIs. 471 472 The osnoise tracer leverages the hwlat_detector by running a similar 473 loop with preemption, SoftIRQs and IRQs enabled, thus allowing all 474 the sources of osnoise during its execution. The osnoise tracer takes 475 note of the entry and exit point of any source of interferences, 476 increasing a per-cpu interference counter. It saves an interference 477 counter for each source of interference. The interference counter for 478 NMI, IRQs, SoftIRQs, and threads is increased anytime the tool 479 observes these interferences' entry events. When a noise happens 480 without any interference from the operating system level, the 481 hardware noise counter increases, pointing to a hardware-related 482 noise. In this way, osnoise can account for any source of 483 interference. At the end of the period, the osnoise tracer prints 484 the sum of all noise, the max single noise, the percentage of CPU 485 available for the thread, and the counters for the noise sources. 486 487 In addition to the tracer, a set of tracepoints were added to 488 facilitate the identification of the osnoise source. 489 490 The output will appear in the trace and trace_pipe files. 491 492 To enable this tracer, echo in "osnoise" into the current_tracer 493 file. 494 495config TIMERLAT_TRACER 496 bool "Timerlat tracer" 497 select OSNOISE_TRACER 498 select GENERIC_TRACER 499 help 500 The timerlat tracer aims to help the preemptive kernel developers 501 to find sources of wakeup latencies of real-time threads. 502 503 The tracer creates a per-cpu kernel thread with real-time priority. 504 The tracer thread sets a periodic timer to wakeup itself, and goes 505 to sleep waiting for the timer to fire. At the wakeup, the thread 506 then computes a wakeup latency value as the difference between 507 the current time and the absolute time that the timer was set 508 to expire. 509 510 The tracer prints two lines at every activation. The first is the 511 timer latency observed at the hardirq context before the 512 activation of the thread. The second is the timer latency observed 513 by the thread, which is the same level that cyclictest reports. The 514 ACTIVATION ID field serves to relate the irq execution to its 515 respective thread execution. 516 517 The tracer is build on top of osnoise tracer, and the osnoise: 518 events can be used to trace the source of interference from NMI, 519 IRQs and other threads. It also enables the capture of the 520 stacktrace at the IRQ context, which helps to identify the code 521 path that can cause thread delay. 522 523config MMIOTRACE 524 bool "Memory mapped IO tracing" 525 depends on HAVE_MMIOTRACE_SUPPORT && PCI 526 select GENERIC_TRACER 527 help 528 Mmiotrace traces Memory Mapped I/O access and is meant for 529 debugging and reverse engineering. It is called from the ioremap 530 implementation and works via page faults. Tracing is disabled by 531 default and can be enabled at run-time. 532 533 See Documentation/trace/mmiotrace.rst. 534 If you are not helping to develop drivers, say N. 535 536config ENABLE_DEFAULT_TRACERS 537 bool "Trace process context switches and events" 538 depends on !GENERIC_TRACER 539 select TRACING 540 help 541 This tracer hooks to various trace points in the kernel, 542 allowing the user to pick and choose which trace point they 543 want to trace. It also includes the sched_switch tracer plugin. 544 545config FTRACE_SYSCALLS 546 bool "Trace syscalls" 547 depends on HAVE_SYSCALL_TRACEPOINTS 548 select GENERIC_TRACER 549 select KALLSYMS 550 help 551 Basic tracer to catch the syscall entry and exit events. 552 553config TRACER_SNAPSHOT 554 bool "Create a snapshot trace buffer" 555 select TRACER_MAX_TRACE 556 help 557 Allow tracing users to take snapshot of the current buffer using the 558 ftrace interface, e.g.: 559 560 echo 1 > /sys/kernel/tracing/snapshot 561 cat snapshot 562 563config TRACER_SNAPSHOT_PER_CPU_SWAP 564 bool "Allow snapshot to swap per CPU" 565 depends on TRACER_SNAPSHOT 566 select RING_BUFFER_ALLOW_SWAP 567 help 568 Allow doing a snapshot of a single CPU buffer instead of a 569 full swap (all buffers). If this is set, then the following is 570 allowed: 571 572 echo 1 > /sys/kernel/tracing/per_cpu/cpu2/snapshot 573 574 After which, only the tracing buffer for CPU 2 was swapped with 575 the main tracing buffer, and the other CPU buffers remain the same. 576 577 When this is enabled, this adds a little more overhead to the 578 trace recording, as it needs to add some checks to synchronize 579 recording with swaps. But this does not affect the performance 580 of the overall system. This is enabled by default when the preempt 581 or irq latency tracers are enabled, as those need to swap as well 582 and already adds the overhead (plus a lot more). 583 584config TRACE_BRANCH_PROFILING 585 bool 586 select GENERIC_TRACER 587 588choice 589 prompt "Branch Profiling" 590 default BRANCH_PROFILE_NONE 591 help 592 The branch profiling is a software profiler. It will add hooks 593 into the C conditionals to test which path a branch takes. 594 595 The likely/unlikely profiler only looks at the conditions that 596 are annotated with a likely or unlikely macro. 597 598 The "all branch" profiler will profile every if-statement in the 599 kernel. This profiler will also enable the likely/unlikely 600 profiler. 601 602 Either of the above profilers adds a bit of overhead to the system. 603 If unsure, choose "No branch profiling". 604 605config BRANCH_PROFILE_NONE 606 bool "No branch profiling" 607 help 608 No branch profiling. Branch profiling adds a bit of overhead. 609 Only enable it if you want to analyse the branching behavior. 610 Otherwise keep it disabled. 611 612config PROFILE_ANNOTATED_BRANCHES 613 bool "Trace likely/unlikely profiler" 614 select TRACE_BRANCH_PROFILING 615 help 616 This tracer profiles all likely and unlikely macros 617 in the kernel. It will display the results in: 618 619 /sys/kernel/tracing/trace_stat/branch_annotated 620 621 Note: this will add a significant overhead; only turn this 622 on if you need to profile the system's use of these macros. 623 624config PROFILE_ALL_BRANCHES 625 bool "Profile all if conditionals" if !FORTIFY_SOURCE 626 select TRACE_BRANCH_PROFILING 627 help 628 This tracer profiles all branch conditions. Every if () 629 taken in the kernel is recorded whether it hit or miss. 630 The results will be displayed in: 631 632 /sys/kernel/tracing/trace_stat/branch_all 633 634 This option also enables the likely/unlikely profiler. 635 636 This configuration, when enabled, will impose a great overhead 637 on the system. This should only be enabled when the system 638 is to be analyzed in much detail. 639endchoice 640 641config TRACING_BRANCHES 642 bool 643 help 644 Selected by tracers that will trace the likely and unlikely 645 conditions. This prevents the tracers themselves from being 646 profiled. Profiling the tracing infrastructure can only happen 647 when the likelys and unlikelys are not being traced. 648 649config BRANCH_TRACER 650 bool "Trace likely/unlikely instances" 651 depends on TRACE_BRANCH_PROFILING 652 select TRACING_BRANCHES 653 help 654 This traces the events of likely and unlikely condition 655 calls in the kernel. The difference between this and the 656 "Trace likely/unlikely profiler" is that this is not a 657 histogram of the callers, but actually places the calling 658 events into a running trace buffer to see when and where the 659 events happened, as well as their results. 660 661 Say N if unsure. 662 663config BLK_DEV_IO_TRACE 664 bool "Support for tracing block IO actions" 665 depends on SYSFS 666 depends on BLOCK 667 select RELAY 668 select DEBUG_FS 669 select TRACEPOINTS 670 select GENERIC_TRACER 671 select STACKTRACE 672 help 673 Say Y here if you want to be able to trace the block layer actions 674 on a given queue. Tracing allows you to see any traffic happening 675 on a block device queue. For more information (and the userspace 676 support tools needed), fetch the blktrace tools from: 677 678 git://git.kernel.dk/blktrace.git 679 680 Tracing also is possible using the ftrace interface, e.g.: 681 682 echo 1 > /sys/block/sda/sda1/trace/enable 683 echo blk > /sys/kernel/tracing/current_tracer 684 cat /sys/kernel/tracing/trace_pipe 685 686 If unsure, say N. 687 688config FPROBE_EVENTS 689 depends on FPROBE 690 depends on HAVE_REGS_AND_STACK_ACCESS_API 691 bool "Enable fprobe-based dynamic events" 692 select TRACING 693 select PROBE_EVENTS 694 select DYNAMIC_EVENTS 695 default y 696 help 697 This allows user to add tracing events on the function entry and 698 exit via ftrace interface. The syntax is same as the kprobe events 699 and the kprobe events on function entry and exit will be 700 transparently converted to this fprobe events. 701 702config PROBE_EVENTS_BTF_ARGS 703 depends on HAVE_FUNCTION_ARG_ACCESS_API 704 depends on FPROBE_EVENTS || KPROBE_EVENTS 705 depends on DEBUG_INFO_BTF && BPF_SYSCALL 706 bool "Support BTF function arguments for probe events" 707 default y 708 help 709 The user can specify the arguments of the probe event using the names 710 of the arguments of the probed function, when the probe location is a 711 kernel function entry or a tracepoint. 712 This is available only if BTF (BPF Type Format) support is enabled. 713 714config KPROBE_EVENTS 715 depends on KPROBES 716 depends on HAVE_REGS_AND_STACK_ACCESS_API 717 bool "Enable kprobes-based dynamic events" 718 select TRACING 719 select PROBE_EVENTS 720 select DYNAMIC_EVENTS 721 default y 722 help 723 This allows the user to add tracing events (similar to tracepoints) 724 on the fly via the ftrace interface. See 725 Documentation/trace/kprobetrace.rst for more details. 726 727 Those events can be inserted wherever kprobes can probe, and record 728 various register and memory values. 729 730 This option is also required by perf-probe subcommand of perf tools. 731 If you want to use perf tools, this option is strongly recommended. 732 733config KPROBE_EVENTS_ON_NOTRACE 734 bool "Do NOT protect notrace function from kprobe events" 735 depends on KPROBE_EVENTS 736 depends on DYNAMIC_FTRACE 737 default n 738 help 739 This is only for the developers who want to debug ftrace itself 740 using kprobe events. 741 742 If kprobes can use ftrace instead of breakpoint, ftrace related 743 functions are protected from kprobe-events to prevent an infinite 744 recursion or any unexpected execution path which leads to a kernel 745 crash. 746 747 This option disables such protection and allows you to put kprobe 748 events on ftrace functions for debugging ftrace by itself. 749 Note that this might let you shoot yourself in the foot. 750 751 If unsure, say N. 752 753config UPROBE_EVENTS 754 bool "Enable uprobes-based dynamic events" 755 depends on ARCH_SUPPORTS_UPROBES 756 depends on MMU 757 depends on PERF_EVENTS 758 select UPROBES 759 select PROBE_EVENTS 760 select DYNAMIC_EVENTS 761 select TRACING 762 default y 763 help 764 This allows the user to add tracing events on top of userspace 765 dynamic events (similar to tracepoints) on the fly via the trace 766 events interface. Those events can be inserted wherever uprobes 767 can probe, and record various registers. 768 This option is required if you plan to use perf-probe subcommand 769 of perf tools on user space applications. 770 771config BPF_EVENTS 772 depends on BPF_SYSCALL 773 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 774 bool 775 default y 776 help 777 This allows the user to attach BPF programs to kprobe, uprobe, and 778 tracepoint events. 779 780config DYNAMIC_EVENTS 781 def_bool n 782 783config PROBE_EVENTS 784 def_bool n 785 786config BPF_KPROBE_OVERRIDE 787 bool "Enable BPF programs to override a kprobed function" 788 depends on BPF_EVENTS 789 depends on FUNCTION_ERROR_INJECTION 790 default n 791 help 792 Allows BPF to override the execution of a probed function and 793 set a different return value. This is used for error injection. 794 795config FTRACE_MCOUNT_RECORD 796 def_bool y 797 depends on DYNAMIC_FTRACE 798 depends on HAVE_FTRACE_MCOUNT_RECORD 799 800config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 801 bool 802 depends on FTRACE_MCOUNT_RECORD 803 804config FTRACE_MCOUNT_USE_CC 805 def_bool y 806 depends on $(cc-option,-mrecord-mcount) 807 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 808 depends on FTRACE_MCOUNT_RECORD 809 810config FTRACE_MCOUNT_USE_OBJTOOL 811 def_bool y 812 depends on HAVE_OBJTOOL_MCOUNT 813 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 814 depends on !FTRACE_MCOUNT_USE_CC 815 depends on FTRACE_MCOUNT_RECORD 816 select OBJTOOL 817 818config FTRACE_MCOUNT_USE_RECORDMCOUNT 819 def_bool y 820 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 821 depends on !FTRACE_MCOUNT_USE_CC 822 depends on !FTRACE_MCOUNT_USE_OBJTOOL 823 depends on FTRACE_MCOUNT_RECORD 824 825config TRACING_MAP 826 bool 827 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 828 help 829 tracing_map is a special-purpose lock-free map for tracing, 830 separated out as a stand-alone facility in order to allow it 831 to be shared between multiple tracers. It isn't meant to be 832 generally used outside of that context, and is normally 833 selected by tracers that use it. 834 835config SYNTH_EVENTS 836 bool "Synthetic trace events" 837 select TRACING 838 select DYNAMIC_EVENTS 839 default n 840 help 841 Synthetic events are user-defined trace events that can be 842 used to combine data from other trace events or in fact any 843 data source. Synthetic events can be generated indirectly 844 via the trace() action of histogram triggers or directly 845 by way of an in-kernel API. 846 847 See Documentation/trace/events.rst or 848 Documentation/trace/histogram.rst for details and examples. 849 850 If in doubt, say N. 851 852config USER_EVENTS 853 bool "User trace events" 854 select TRACING 855 select DYNAMIC_EVENTS 856 help 857 User trace events are user-defined trace events that 858 can be used like an existing kernel trace event. User trace 859 events are generated by writing to a tracefs file. User 860 processes can determine if their tracing events should be 861 generated by registering a value and bit with the kernel 862 that reflects when it is enabled or not. 863 864 See Documentation/trace/user_events.rst. 865 If in doubt, say N. 866 867config HIST_TRIGGERS 868 bool "Histogram triggers" 869 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 870 select TRACING_MAP 871 select TRACING 872 select DYNAMIC_EVENTS 873 select SYNTH_EVENTS 874 default n 875 help 876 Hist triggers allow one or more arbitrary trace event fields 877 to be aggregated into hash tables and dumped to stdout by 878 reading a debugfs/tracefs file. They're useful for 879 gathering quick and dirty (though precise) summaries of 880 event activity as an initial guide for further investigation 881 using more advanced tools. 882 883 Inter-event tracing of quantities such as latencies is also 884 supported using hist triggers under this option. 885 886 See Documentation/trace/histogram.rst. 887 If in doubt, say N. 888 889config TRACE_EVENT_INJECT 890 bool "Trace event injection" 891 depends on TRACING 892 help 893 Allow user-space to inject a specific trace event into the ring 894 buffer. This is mainly used for testing purpose. 895 896 If unsure, say N. 897 898config TRACEPOINT_BENCHMARK 899 bool "Add tracepoint that benchmarks tracepoints" 900 help 901 This option creates the tracepoint "benchmark:benchmark_event". 902 When the tracepoint is enabled, it kicks off a kernel thread that 903 goes into an infinite loop (calling cond_resched() to let other tasks 904 run), and calls the tracepoint. Each iteration will record the time 905 it took to write to the tracepoint and the next iteration that 906 data will be passed to the tracepoint itself. That is, the tracepoint 907 will report the time it took to do the previous tracepoint. 908 The string written to the tracepoint is a static string of 128 bytes 909 to keep the time the same. The initial string is simply a write of 910 "START". The second string records the cold cache time of the first 911 write which is not added to the rest of the calculations. 912 913 As it is a tight loop, it benchmarks as hot cache. That's fine because 914 we care most about hot paths that are probably in cache already. 915 916 An example of the output: 917 918 START 919 first=3672 [COLD CACHED] 920 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 921 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 922 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 923 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 924 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 925 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 926 927 928config RING_BUFFER_BENCHMARK 929 tristate "Ring buffer benchmark stress tester" 930 depends on RING_BUFFER 931 help 932 This option creates a test to stress the ring buffer and benchmark it. 933 It creates its own ring buffer such that it will not interfere with 934 any other users of the ring buffer (such as ftrace). It then creates 935 a producer and consumer that will run for 10 seconds and sleep for 936 10 seconds. Each interval it will print out the number of events 937 it recorded and give a rough estimate of how long each iteration took. 938 939 It does not disable interrupts or raise its priority, so it may be 940 affected by processes that are running. 941 942 If unsure, say N. 943 944config TRACE_EVAL_MAP_FILE 945 bool "Show eval mappings for trace events" 946 depends on TRACING 947 help 948 The "print fmt" of the trace events will show the enum/sizeof names 949 instead of their values. This can cause problems for user space tools 950 that use this string to parse the raw data as user space does not know 951 how to convert the string to its value. 952 953 To fix this, there's a special macro in the kernel that can be used 954 to convert an enum/sizeof into its value. If this macro is used, then 955 the print fmt strings will be converted to their values. 956 957 If something does not get converted properly, this option can be 958 used to show what enums/sizeof the kernel tried to convert. 959 960 This option is for debugging the conversions. A file is created 961 in the tracing directory called "eval_map" that will show the 962 names matched with their values and what trace event system they 963 belong too. 964 965 Normally, the mapping of the strings to values will be freed after 966 boot up or module load. With this option, they will not be freed, as 967 they are needed for the "eval_map" file. Enabling this option will 968 increase the memory footprint of the running kernel. 969 970 If unsure, say N. 971 972config FTRACE_RECORD_RECURSION 973 bool "Record functions that recurse in function tracing" 974 depends on FUNCTION_TRACER 975 help 976 All callbacks that attach to the function tracing have some sort 977 of protection against recursion. Even though the protection exists, 978 it adds overhead. This option will create a file in the tracefs 979 file system called "recursed_functions" that will list the functions 980 that triggered a recursion. 981 982 This will add more overhead to cases that have recursion. 983 984 If unsure, say N 985 986config FTRACE_RECORD_RECURSION_SIZE 987 int "Max number of recursed functions to record" 988 default 128 989 depends on FTRACE_RECORD_RECURSION 990 help 991 This defines the limit of number of functions that can be 992 listed in the "recursed_functions" file, that lists all 993 the functions that caused a recursion to happen. 994 This file can be reset, but the limit can not change in 995 size at runtime. 996 997config FTRACE_VALIDATE_RCU_IS_WATCHING 998 bool "Validate RCU is on during ftrace execution" 999 depends on FUNCTION_TRACER 1000 depends on ARCH_WANTS_NO_INSTR 1001 help 1002 All callbacks that attach to the function tracing have some sort of 1003 protection against recursion. This option is only to verify that 1004 ftrace (and other users of ftrace_test_recursion_trylock()) are not 1005 called outside of RCU, as if they are, it can cause a race. But it 1006 also has a noticeable overhead when enabled. 1007 1008 If unsure, say N 1009 1010config RING_BUFFER_RECORD_RECURSION 1011 bool "Record functions that recurse in the ring buffer" 1012 depends on FTRACE_RECORD_RECURSION 1013 # default y, because it is coupled with FTRACE_RECORD_RECURSION 1014 default y 1015 help 1016 The ring buffer has its own internal recursion. Although when 1017 recursion happens it won't cause harm because of the protection, 1018 but it does cause unwanted overhead. Enabling this option will 1019 place where recursion was detected into the ftrace "recursed_functions" 1020 file. 1021 1022 This will add more overhead to cases that have recursion. 1023 1024config GCOV_PROFILE_FTRACE 1025 bool "Enable GCOV profiling on ftrace subsystem" 1026 depends on GCOV_KERNEL 1027 help 1028 Enable GCOV profiling on ftrace subsystem for checking 1029 which functions/lines are tested. 1030 1031 If unsure, say N. 1032 1033 Note that on a kernel compiled with this config, ftrace will 1034 run significantly slower. 1035 1036config FTRACE_SELFTEST 1037 bool 1038 1039config FTRACE_STARTUP_TEST 1040 bool "Perform a startup test on ftrace" 1041 depends on GENERIC_TRACER 1042 select FTRACE_SELFTEST 1043 help 1044 This option performs a series of startup tests on ftrace. On bootup 1045 a series of tests are made to verify that the tracer is 1046 functioning properly. It will do tests on all the configured 1047 tracers of ftrace. 1048 1049config EVENT_TRACE_STARTUP_TEST 1050 bool "Run selftest on trace events" 1051 depends on FTRACE_STARTUP_TEST 1052 default y 1053 help 1054 This option performs a test on all trace events in the system. 1055 It basically just enables each event and runs some code that 1056 will trigger events (not necessarily the event it enables) 1057 This may take some time run as there are a lot of events. 1058 1059config EVENT_TRACE_TEST_SYSCALLS 1060 bool "Run selftest on syscall events" 1061 depends on EVENT_TRACE_STARTUP_TEST 1062 help 1063 This option will also enable testing every syscall event. 1064 It only enables the event and disables it and runs various loads 1065 with the event enabled. This adds a bit more time for kernel boot 1066 up since it runs this on every system call defined. 1067 1068 TBD - enable a way to actually call the syscalls as we test their 1069 events 1070 1071config FTRACE_SORT_STARTUP_TEST 1072 bool "Verify compile time sorting of ftrace functions" 1073 depends on DYNAMIC_FTRACE 1074 depends on BUILDTIME_MCOUNT_SORT 1075 help 1076 Sorting of the mcount_loc sections that is used to find the 1077 where the ftrace knows where to patch functions for tracing 1078 and other callbacks is done at compile time. But if the sort 1079 is not done correctly, it will cause non-deterministic failures. 1080 When this is set, the sorted sections will be verified that they 1081 are in deed sorted and will warn if they are not. 1082 1083 If unsure, say N 1084 1085config RING_BUFFER_STARTUP_TEST 1086 bool "Ring buffer startup self test" 1087 depends on RING_BUFFER 1088 help 1089 Run a simple self test on the ring buffer on boot up. Late in the 1090 kernel boot sequence, the test will start that kicks off 1091 a thread per cpu. Each thread will write various size events 1092 into the ring buffer. Another thread is created to send IPIs 1093 to each of the threads, where the IPI handler will also write 1094 to the ring buffer, to test/stress the nesting ability. 1095 If any anomalies are discovered, a warning will be displayed 1096 and all ring buffers will be disabled. 1097 1098 The test runs for 10 seconds. This will slow your boot time 1099 by at least 10 more seconds. 1100 1101 At the end of the test, statistics and more checks are done. 1102 It will output the stats of each per cpu buffer: What 1103 was written, the sizes, what was read, what was lost, and 1104 other similar details. 1105 1106 If unsure, say N 1107 1108config RING_BUFFER_VALIDATE_TIME_DELTAS 1109 bool "Verify ring buffer time stamp deltas" 1110 depends on RING_BUFFER 1111 help 1112 This will audit the time stamps on the ring buffer sub 1113 buffer to make sure that all the time deltas for the 1114 events on a sub buffer matches the current time stamp. 1115 This audit is performed for every event that is not 1116 interrupted, or interrupting another event. A check 1117 is also made when traversing sub buffers to make sure 1118 that all the deltas on the previous sub buffer do not 1119 add up to be greater than the current time stamp. 1120 1121 NOTE: This adds significant overhead to recording of events, 1122 and should only be used to test the logic of the ring buffer. 1123 Do not use it on production systems. 1124 1125 Only say Y if you understand what this does, and you 1126 still want it enabled. Otherwise say N 1127 1128config MMIOTRACE_TEST 1129 tristate "Test module for mmiotrace" 1130 depends on MMIOTRACE && m 1131 help 1132 This is a dumb module for testing mmiotrace. It is very dangerous 1133 as it will write garbage to IO memory starting at a given address. 1134 However, it should be safe to use on e.g. unused portion of VRAM. 1135 1136 Say N, unless you absolutely know what you are doing. 1137 1138config PREEMPTIRQ_DELAY_TEST 1139 tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers" 1140 depends on m 1141 help 1142 Select this option to build a test module that can help test latency 1143 tracers by executing a preempt or irq disable section with a user 1144 configurable delay. The module busy waits for the duration of the 1145 critical section. 1146 1147 For example, the following invocation generates a burst of three 1148 irq-disabled critical sections for 500us: 1149 modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3 1150 1151 What's more, if you want to attach the test on the cpu which the latency 1152 tracer is running on, specify cpu_affinity=cpu_num at the end of the 1153 command. 1154 1155 If unsure, say N 1156 1157config SYNTH_EVENT_GEN_TEST 1158 tristate "Test module for in-kernel synthetic event generation" 1159 depends on SYNTH_EVENTS && m 1160 help 1161 This option creates a test module to check the base 1162 functionality of in-kernel synthetic event definition and 1163 generation. 1164 1165 To test, insert the module, and then check the trace buffer 1166 for the generated sample events. 1167 1168 If unsure, say N. 1169 1170config KPROBE_EVENT_GEN_TEST 1171 tristate "Test module for in-kernel kprobe event generation" 1172 depends on KPROBE_EVENTS && m 1173 help 1174 This option creates a test module to check the base 1175 functionality of in-kernel kprobe event definition. 1176 1177 To test, insert the module, and then check the trace buffer 1178 for the generated kprobe events. 1179 1180 If unsure, say N. 1181 1182config HIST_TRIGGERS_DEBUG 1183 bool "Hist trigger debug support" 1184 depends on HIST_TRIGGERS 1185 help 1186 Add "hist_debug" file for each event, which when read will 1187 dump out a bunch of internal details about the hist triggers 1188 defined on that event. 1189 1190 The hist_debug file serves a couple of purposes: 1191 1192 - Helps developers verify that nothing is broken. 1193 1194 - Provides educational information to support the details 1195 of the hist trigger internals as described by 1196 Documentation/trace/histogram-design.rst. 1197 1198 The hist_debug output only covers the data structures 1199 related to the histogram definitions themselves and doesn't 1200 display the internals of map buckets or variable values of 1201 running histograms. 1202 1203 If unsure, say N. 1204 1205source "kernel/trace/rv/Kconfig" 1206 1207endif # FTRACE 1208