1# SPDX-License-Identifier: GPL-2.0-only 2# 3# Architectures that offer an FUNCTION_TRACER implementation should 4# select HAVE_FUNCTION_TRACER: 5# 6 7config USER_STACKTRACE_SUPPORT 8 bool 9 10config NOP_TRACER 11 bool 12 13config HAVE_RETHOOK 14 bool 15 16config RETHOOK 17 bool 18 depends on HAVE_RETHOOK 19 help 20 Enable generic return hooking feature. This is an internal 21 API, which will be used by other function-entry hooking 22 features like fprobe and kprobes. 23 24config HAVE_FUNCTION_TRACER 25 bool 26 help 27 See Documentation/trace/ftrace-design.rst 28 29config HAVE_FUNCTION_GRAPH_TRACER 30 bool 31 help 32 See Documentation/trace/ftrace-design.rst 33 34config HAVE_FUNCTION_GRAPH_FREGS 35 bool 36 37config HAVE_FTRACE_GRAPH_FUNC 38 bool 39 help 40 True if ftrace_graph_func() is defined. 41 42config HAVE_DYNAMIC_FTRACE 43 bool 44 help 45 See Documentation/trace/ftrace-design.rst 46 47config HAVE_DYNAMIC_FTRACE_WITH_REGS 48 bool 49 50config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 51 bool 52 53config HAVE_SINGLE_FTRACE_DIRECT_OPS 54 bool 55 56config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 57 bool 58 59config HAVE_EXTRA_IPI_TRACEPOINTS 60 bool 61 help 62 For architectures that use ipi_raise, ipi_entry and ipi_exit 63 tracepoints. 64 65config HAVE_DYNAMIC_FTRACE_WITH_ARGS 66 bool 67 help 68 If this is set, then arguments and stack can be found from 69 the ftrace_regs passed into the function callback regs parameter 70 by default, even without setting the REGS flag in the ftrace_ops. 71 This allows for use of ftrace_regs_get_argument() and 72 ftrace_regs_get_stack_pointer(). 73 74config HAVE_FTRACE_REGS_HAVING_PT_REGS 75 bool 76 help 77 If this is set, ftrace_regs has pt_regs, thus it can convert to 78 pt_regs without allocating memory. 79 80config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE 81 bool 82 help 83 If the architecture generates __patchable_function_entries sections 84 but does not want them included in the ftrace locations. 85 86config HAVE_DYNAMIC_FTRACE_WITH_JMP 87 bool 88 help 89 If the architecture supports to replace the __fentry__ with a 90 "jmp" instruction. 91 92config HAVE_SYSCALL_TRACEPOINTS 93 bool 94 help 95 See Documentation/trace/ftrace-design.rst 96 97config HAVE_FENTRY 98 bool 99 help 100 Arch supports the gcc options -pg with -mfentry 101 102config HAVE_NOP_MCOUNT 103 bool 104 help 105 Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount 106 107config HAVE_OBJTOOL_MCOUNT 108 bool 109 help 110 Arch supports objtool --mcount 111 112config HAVE_OBJTOOL_NOP_MCOUNT 113 bool 114 help 115 Arch supports the objtool options --mcount with --mnop. 116 An architecture can select this if it wants to enable nop'ing 117 of ftrace locations. 118 119config HAVE_C_RECORDMCOUNT 120 bool 121 help 122 C version of recordmcount available? 123 124config HAVE_BUILDTIME_MCOUNT_SORT 125 bool 126 help 127 An architecture selects this if it sorts the mcount_loc section 128 at build time. 129 130config BUILDTIME_MCOUNT_SORT 131 bool 132 default y 133 depends on HAVE_BUILDTIME_MCOUNT_SORT && DYNAMIC_FTRACE 134 help 135 Sort the mcount_loc section at build time. 136 137config TRACER_MAX_TRACE 138 bool 139 140config TRACE_CLOCK 141 bool 142 143config RING_BUFFER 144 bool 145 select TRACE_CLOCK 146 select IRQ_WORK 147 148config EVENT_TRACING 149 select CONTEXT_SWITCH_TRACER 150 select GLOB 151 bool 152 153config CONTEXT_SWITCH_TRACER 154 bool 155 156config RING_BUFFER_ALLOW_SWAP 157 bool 158 help 159 Allow the use of ring_buffer_swap_cpu. 160 Adds a very slight overhead to tracing when enabled. 161 162config PREEMPTIRQ_TRACEPOINTS 163 bool 164 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 165 select TRACING 166 default y 167 help 168 Create preempt/irq toggle tracepoints if needed, so that other parts 169 of the kernel can use them to generate or add hooks to them. 170 171# All tracer options should select GENERIC_TRACER. For those options that are 172# enabled by all tracers (context switch and event tracer) they select TRACING. 173# This allows those options to appear when no other tracer is selected. But the 174# options do not appear when something else selects it. We need the two options 175# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 176# hiding of the automatic options. 177 178config TRACING 179 bool 180 select RING_BUFFER 181 select STACKTRACE if STACKTRACE_SUPPORT 182 select TRACEPOINTS 183 select NOP_TRACER 184 select BINARY_PRINTF 185 select EVENT_TRACING 186 select TRACE_CLOCK 187 select NEED_TASKS_RCU 188 189config GENERIC_TRACER 190 bool 191 select TRACING 192 193# 194# Minimum requirements an architecture has to meet for us to 195# be able to offer generic tracing facilities: 196# 197config TRACING_SUPPORT 198 bool 199 depends on TRACE_IRQFLAGS_SUPPORT 200 depends on STACKTRACE_SUPPORT 201 default y 202 203menuconfig FTRACE 204 bool "Tracers" 205 depends on TRACING_SUPPORT 206 default y if DEBUG_KERNEL 207 help 208 Enable the kernel tracing infrastructure. 209 210if FTRACE 211 212config TRACEFS_AUTOMOUNT_DEPRECATED 213 bool "Automount tracefs on debugfs [DEPRECATED]" 214 depends on TRACING 215 default y 216 help 217 The tracing interface was moved from /sys/kernel/debug/tracing 218 to /sys/kernel/tracing in 2015, but the tracing file system 219 was still automounted in /sys/kernel/debug for backward 220 compatibility with tooling. 221 222 The new interface has been around for more than 10 years and 223 the old debug mount will soon be removed. 224 225config BOOTTIME_TRACING 226 bool "Boot-time Tracing support" 227 depends on TRACING 228 select BOOT_CONFIG 229 help 230 Enable developer to setup ftrace subsystem via supplemental 231 kernel cmdline at boot time for debugging (tracing) driver 232 initialization and boot process. 233 234config FUNCTION_TRACER 235 bool "Kernel Function Tracer" 236 depends on HAVE_FUNCTION_TRACER 237 select KALLSYMS 238 select GENERIC_TRACER 239 select CONTEXT_SWITCH_TRACER 240 select GLOB 241 select NEED_TASKS_RCU 242 select TASKS_RUDE_RCU 243 help 244 Enable the kernel to trace every kernel function. This is done 245 by using a compiler feature to insert a small, 5-byte No-Operation 246 instruction at the beginning of every kernel function, which NOP 247 sequence is then dynamically patched into a tracer call when 248 tracing is enabled by the administrator. If it's runtime disabled 249 (the bootup default), then the overhead of the instructions is very 250 small and not measurable even in micro-benchmarks (at least on 251 x86, but may have impact on other architectures). 252 253config FUNCTION_GRAPH_TRACER 254 bool "Kernel Function Graph Tracer" 255 depends on HAVE_FUNCTION_GRAPH_TRACER 256 depends on FUNCTION_TRACER 257 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 258 default y 259 help 260 Enable the kernel to trace a function at both its return 261 and its entry. 262 Its first purpose is to trace the duration of functions and 263 draw a call graph for each thread with some information like 264 the return value. This is done by setting the current return 265 address on the current task structure into a stack of calls. 266 267config FUNCTION_GRAPH_RETVAL 268 bool "Kernel Function Graph Return Value" 269 depends on HAVE_FUNCTION_GRAPH_FREGS 270 depends on FUNCTION_GRAPH_TRACER 271 default n 272 help 273 Support recording and printing the function return value when 274 using function graph tracer. It can be helpful to locate functions 275 that return errors. This feature is off by default, and you can 276 enable it via the trace option funcgraph-retval. 277 See Documentation/trace/ftrace.rst 278 279config FUNCTION_GRAPH_RETADDR 280 bool "Kernel Function Graph Return Address" 281 depends on FUNCTION_GRAPH_TRACER 282 default n 283 help 284 Support recording and printing the function return address when 285 using function graph tracer. It can be helpful to locate code line that 286 the function is called. This feature is off by default, and you can 287 enable it via the trace option funcgraph-retaddr. 288 289config FUNCTION_TRACE_ARGS 290 bool 291 depends on PROBE_EVENTS_BTF_ARGS 292 default y 293 help 294 If supported with function argument access API and BTF, then 295 the function tracer and function graph tracer will support printing 296 of function arguments. This feature is off by default, and can be 297 enabled via the trace option func-args (for the function tracer) and 298 funcgraph-args (for the function graph tracer) 299 300config DYNAMIC_FTRACE 301 bool 302 depends on FUNCTION_TRACER 303 depends on HAVE_DYNAMIC_FTRACE 304 default y 305 help 306 This option will modify all the calls to function tracing 307 dynamically (will patch them out of the binary image and 308 replace them with a No-Op instruction) on boot up. During 309 compile time, a table is made of all the locations that ftrace 310 can function trace, and this table is linked into the kernel 311 image. When this is enabled, functions can be individually 312 enabled, and the functions not enabled will not affect 313 performance of the system. 314 315 See the files in /sys/kernel/tracing: 316 available_filter_functions 317 set_ftrace_filter 318 set_ftrace_notrace 319 320 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 321 otherwise has native performance as long as no tracing is active. 322 323config DYNAMIC_FTRACE_WITH_REGS 324 def_bool y 325 depends on DYNAMIC_FTRACE 326 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 327 328config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 329 def_bool y 330 depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS 331 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 332 333config DYNAMIC_FTRACE_WITH_CALL_OPS 334 def_bool y 335 depends on HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 336 337config DYNAMIC_FTRACE_WITH_ARGS 338 def_bool y 339 depends on DYNAMIC_FTRACE 340 depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS 341 342config DYNAMIC_FTRACE_WITH_JMP 343 def_bool y 344 depends on DYNAMIC_FTRACE 345 depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS 346 depends on HAVE_DYNAMIC_FTRACE_WITH_JMP 347 348config FUNCTION_SELF_TRACING 349 bool "Function trace tracing code" 350 depends on FUNCTION_TRACER 351 help 352 Normally all the tracing code is set to notrace, where the function 353 tracer will ignore all the tracing functions. Sometimes it is useful 354 for debugging to trace some of the tracing infratructure itself. 355 Enable this to allow some of the tracing infrastructure to be traced 356 by the function tracer. Note, this will likely add noise to function 357 tracing if events and other tracing features are enabled along with 358 function tracing. 359 360 If unsure, say N. 361 362config FPROBE 363 bool "Kernel Function Probe (fprobe)" 364 depends on HAVE_FUNCTION_GRAPH_FREGS && HAVE_FTRACE_GRAPH_FUNC 365 depends on DYNAMIC_FTRACE_WITH_ARGS 366 select FUNCTION_GRAPH_TRACER 367 default n 368 help 369 This option enables kernel function probe (fprobe) based on ftrace. 370 The fprobe is similar to kprobes, but probes only for kernel function 371 entries and exits. This also can probe multiple functions by one 372 fprobe. 373 374 If unsure, say N. 375 376config FUNCTION_PROFILER 377 bool "Kernel function profiler" 378 depends on FUNCTION_TRACER 379 default n 380 help 381 This option enables the kernel function profiler. A file is created 382 in debugfs called function_profile_enabled which defaults to zero. 383 When a 1 is echoed into this file profiling begins, and when a 384 zero is entered, profiling stops. A "functions" file is created in 385 the trace_stat directory; this file shows the list of functions that 386 have been hit and their counters. 387 388 If in doubt, say N. 389 390config STACK_TRACER 391 bool "Trace max stack" 392 depends on HAVE_FUNCTION_TRACER 393 select FUNCTION_TRACER 394 select STACKTRACE 395 select KALLSYMS 396 help 397 This special tracer records the maximum stack footprint of the 398 kernel and displays it in /sys/kernel/tracing/stack_trace. 399 400 This tracer works by hooking into every function call that the 401 kernel executes, and keeping a maximum stack depth value and 402 stack-trace saved. If this is configured with DYNAMIC_FTRACE 403 then it will not have any overhead while the stack tracer 404 is disabled. 405 406 To enable the stack tracer on bootup, pass in 'stacktrace' 407 on the kernel command line. 408 409 The stack tracer can also be enabled or disabled via the 410 sysctl kernel.stack_tracer_enabled 411 412 Say N if unsure. 413 414config TRACE_PREEMPT_TOGGLE 415 bool 416 help 417 Enables hooks which will be called when preemption is first disabled, 418 and last enabled. 419 420config IRQSOFF_TRACER 421 bool "Interrupts-off Latency Tracer" 422 default n 423 depends on TRACE_IRQFLAGS_SUPPORT 424 select TRACE_IRQFLAGS 425 select GENERIC_TRACER 426 select TRACER_MAX_TRACE 427 select RING_BUFFER_ALLOW_SWAP 428 select TRACER_SNAPSHOT 429 select TRACER_SNAPSHOT_PER_CPU_SWAP 430 help 431 This option measures the time spent in irqs-off critical 432 sections, with microsecond accuracy. 433 434 The default measurement method is a maximum search, which is 435 disabled by default and can be runtime (re-)started 436 via: 437 438 echo 0 > /sys/kernel/tracing/tracing_max_latency 439 440 (Note that kernel size and overhead increase with this option 441 enabled. This option and the preempt-off timing option can be 442 used together or separately.) 443 444config PREEMPT_TRACER 445 bool "Preemption-off Latency Tracer" 446 default n 447 depends on PREEMPTION 448 select GENERIC_TRACER 449 select TRACER_MAX_TRACE 450 select RING_BUFFER_ALLOW_SWAP 451 select TRACER_SNAPSHOT 452 select TRACER_SNAPSHOT_PER_CPU_SWAP 453 select TRACE_PREEMPT_TOGGLE 454 help 455 This option measures the time spent in preemption-off critical 456 sections, with microsecond accuracy. 457 458 The default measurement method is a maximum search, which is 459 disabled by default and can be runtime (re-)started 460 via: 461 462 echo 0 > /sys/kernel/tracing/tracing_max_latency 463 464 (Note that kernel size and overhead increase with this option 465 enabled. This option and the irqs-off timing option can be 466 used together or separately.) 467 468config SCHED_TRACER 469 bool "Scheduling Latency Tracer" 470 select GENERIC_TRACER 471 select CONTEXT_SWITCH_TRACER 472 select TRACER_MAX_TRACE 473 select TRACER_SNAPSHOT 474 help 475 This tracer tracks the latency of the highest priority task 476 to be scheduled in, starting from the point it has woken up. 477 478config HWLAT_TRACER 479 bool "Tracer to detect hardware latencies (like SMIs)" 480 select GENERIC_TRACER 481 select TRACER_MAX_TRACE 482 help 483 This tracer, when enabled will create one or more kernel threads, 484 depending on what the cpumask file is set to, which each thread 485 spinning in a loop looking for interruptions caused by 486 something other than the kernel. For example, if a 487 System Management Interrupt (SMI) takes a noticeable amount of 488 time, this tracer will detect it. This is useful for testing 489 if a system is reliable for Real Time tasks. 490 491 Some files are created in the tracing directory when this 492 is enabled: 493 494 hwlat_detector/width - time in usecs for how long to spin for 495 hwlat_detector/window - time in usecs between the start of each 496 iteration 497 498 A kernel thread is created that will spin with interrupts disabled 499 for "width" microseconds in every "window" cycle. It will not spin 500 for "window - width" microseconds, where the system can 501 continue to operate. 502 503 The output will appear in the trace and trace_pipe files. 504 505 When the tracer is not running, it has no affect on the system, 506 but when it is running, it can cause the system to be 507 periodically non responsive. Do not run this tracer on a 508 production system. 509 510 To enable this tracer, echo in "hwlat" into the current_tracer 511 file. Every time a latency is greater than tracing_thresh, it will 512 be recorded into the ring buffer. 513 514config OSNOISE_TRACER 515 bool "OS Noise tracer" 516 select GENERIC_TRACER 517 select TRACER_MAX_TRACE 518 help 519 In the context of high-performance computing (HPC), the Operating 520 System Noise (osnoise) refers to the interference experienced by an 521 application due to activities inside the operating system. In the 522 context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread 523 can cause noise to the system. Moreover, hardware-related jobs can 524 also cause noise, for example, via SMIs. 525 526 The osnoise tracer leverages the hwlat_detector by running a similar 527 loop with preemption, SoftIRQs and IRQs enabled, thus allowing all 528 the sources of osnoise during its execution. The osnoise tracer takes 529 note of the entry and exit point of any source of interferences, 530 increasing a per-cpu interference counter. It saves an interference 531 counter for each source of interference. The interference counter for 532 NMI, IRQs, SoftIRQs, and threads is increased anytime the tool 533 observes these interferences' entry events. When a noise happens 534 without any interference from the operating system level, the 535 hardware noise counter increases, pointing to a hardware-related 536 noise. In this way, osnoise can account for any source of 537 interference. At the end of the period, the osnoise tracer prints 538 the sum of all noise, the max single noise, the percentage of CPU 539 available for the thread, and the counters for the noise sources. 540 541 In addition to the tracer, a set of tracepoints were added to 542 facilitate the identification of the osnoise source. 543 544 The output will appear in the trace and trace_pipe files. 545 546 To enable this tracer, echo in "osnoise" into the current_tracer 547 file. 548 549config TIMERLAT_TRACER 550 bool "Timerlat tracer" 551 select OSNOISE_TRACER 552 select GENERIC_TRACER 553 help 554 The timerlat tracer aims to help the preemptive kernel developers 555 to find sources of wakeup latencies of real-time threads. 556 557 The tracer creates a per-cpu kernel thread with real-time priority. 558 The tracer thread sets a periodic timer to wakeup itself, and goes 559 to sleep waiting for the timer to fire. At the wakeup, the thread 560 then computes a wakeup latency value as the difference between 561 the current time and the absolute time that the timer was set 562 to expire. 563 564 The tracer prints two lines at every activation. The first is the 565 timer latency observed at the hardirq context before the 566 activation of the thread. The second is the timer latency observed 567 by the thread, which is the same level that cyclictest reports. The 568 ACTIVATION ID field serves to relate the irq execution to its 569 respective thread execution. 570 571 The tracer is build on top of osnoise tracer, and the osnoise: 572 events can be used to trace the source of interference from NMI, 573 IRQs and other threads. It also enables the capture of the 574 stacktrace at the IRQ context, which helps to identify the code 575 path that can cause thread delay. 576 577config MMIOTRACE 578 bool "Memory mapped IO tracing" 579 depends on HAVE_MMIOTRACE_SUPPORT && PCI 580 select GENERIC_TRACER 581 help 582 Mmiotrace traces Memory Mapped I/O access and is meant for 583 debugging and reverse engineering. It is called from the ioremap 584 implementation and works via page faults. Tracing is disabled by 585 default and can be enabled at run-time. 586 587 See Documentation/trace/mmiotrace.rst. 588 If you are not helping to develop drivers, say N. 589 590config ENABLE_DEFAULT_TRACERS 591 bool "Trace process context switches and events" 592 depends on !GENERIC_TRACER 593 select TRACING 594 help 595 This tracer hooks to various trace points in the kernel, 596 allowing the user to pick and choose which trace point they 597 want to trace. It also includes the sched_switch tracer plugin. 598 599config FTRACE_SYSCALLS 600 bool "Trace syscalls" 601 depends on HAVE_SYSCALL_TRACEPOINTS 602 select GENERIC_TRACER 603 select KALLSYMS 604 help 605 Basic tracer to catch the syscall entry and exit events. 606 607config TRACE_SYSCALL_BUF_SIZE_DEFAULT 608 int "System call user read max size" 609 range 0 165 610 default 63 611 depends on FTRACE_SYSCALLS 612 help 613 Some system call trace events will record the data from a user 614 space address that one of the parameters point to. The amount of 615 data per event is limited. That limit is set by this config and 616 this config also affects how much user space data perf can read. 617 618 For a tracing instance, this size may be changed by writing into 619 its syscall_user_buf_size file. 620 621config TRACER_SNAPSHOT 622 bool "Create a snapshot trace buffer" 623 select TRACER_MAX_TRACE 624 help 625 Allow tracing users to take snapshot of the current buffer using the 626 ftrace interface, e.g.: 627 628 echo 1 > /sys/kernel/tracing/snapshot 629 cat snapshot 630 631config TRACER_SNAPSHOT_PER_CPU_SWAP 632 bool "Allow snapshot to swap per CPU" 633 depends on TRACER_SNAPSHOT 634 select RING_BUFFER_ALLOW_SWAP 635 help 636 Allow doing a snapshot of a single CPU buffer instead of a 637 full swap (all buffers). If this is set, then the following is 638 allowed: 639 640 echo 1 > /sys/kernel/tracing/per_cpu/cpu2/snapshot 641 642 After which, only the tracing buffer for CPU 2 was swapped with 643 the main tracing buffer, and the other CPU buffers remain the same. 644 645 When this is enabled, this adds a little more overhead to the 646 trace recording, as it needs to add some checks to synchronize 647 recording with swaps. But this does not affect the performance 648 of the overall system. This is enabled by default when the preempt 649 or irq latency tracers are enabled, as those need to swap as well 650 and already adds the overhead (plus a lot more). 651 652config TRACE_BRANCH_PROFILING 653 bool 654 select GENERIC_TRACER 655 656choice 657 prompt "Branch Profiling" 658 default BRANCH_PROFILE_NONE 659 help 660 The branch profiling is a software profiler. It will add hooks 661 into the C conditionals to test which path a branch takes. 662 663 The likely/unlikely profiler only looks at the conditions that 664 are annotated with a likely or unlikely macro. 665 666 The "all branch" profiler will profile every if-statement in the 667 kernel. This profiler will also enable the likely/unlikely 668 profiler. 669 670 Either of the above profilers adds a bit of overhead to the system. 671 If unsure, choose "No branch profiling". 672 673config BRANCH_PROFILE_NONE 674 bool "No branch profiling" 675 help 676 No branch profiling. Branch profiling adds a bit of overhead. 677 Only enable it if you want to analyse the branching behavior. 678 Otherwise keep it disabled. 679 680config PROFILE_ANNOTATED_BRANCHES 681 bool "Trace likely/unlikely profiler" 682 select TRACE_BRANCH_PROFILING 683 help 684 This tracer profiles all likely and unlikely macros 685 in the kernel. It will display the results in: 686 687 /sys/kernel/tracing/trace_stat/branch_annotated 688 689 Note: this will add a significant overhead; only turn this 690 on if you need to profile the system's use of these macros. 691 692config PROFILE_ALL_BRANCHES 693 bool "Profile all if conditionals" if !FORTIFY_SOURCE 694 select TRACE_BRANCH_PROFILING 695 help 696 This tracer profiles all branch conditions. Every if () 697 taken in the kernel is recorded whether it hit or miss. 698 The results will be displayed in: 699 700 /sys/kernel/tracing/trace_stat/branch_all 701 702 This option also enables the likely/unlikely profiler. 703 704 This configuration, when enabled, will impose a great overhead 705 on the system. This should only be enabled when the system 706 is to be analyzed in much detail. 707endchoice 708 709config TRACING_BRANCHES 710 bool 711 help 712 Selected by tracers that will trace the likely and unlikely 713 conditions. This prevents the tracers themselves from being 714 profiled. Profiling the tracing infrastructure can only happen 715 when the likelys and unlikelys are not being traced. 716 717config BRANCH_TRACER 718 bool "Trace likely/unlikely instances" 719 depends on TRACE_BRANCH_PROFILING 720 select TRACING_BRANCHES 721 help 722 This traces the events of likely and unlikely condition 723 calls in the kernel. The difference between this and the 724 "Trace likely/unlikely profiler" is that this is not a 725 histogram of the callers, but actually places the calling 726 events into a running trace buffer to see when and where the 727 events happened, as well as their results. 728 729 Say N if unsure. 730 731config BLK_DEV_IO_TRACE 732 bool "Support for tracing block IO actions" 733 depends on SYSFS 734 depends on BLOCK 735 select RELAY 736 select DEBUG_FS 737 select TRACEPOINTS 738 select GENERIC_TRACER 739 select STACKTRACE 740 help 741 Say Y here if you want to be able to trace the block layer actions 742 on a given queue. Tracing allows you to see any traffic happening 743 on a block device queue. For more information (and the userspace 744 support tools needed), fetch the blktrace tools from: 745 746 git://git.kernel.dk/blktrace.git 747 748 Tracing also is possible using the ftrace interface, e.g.: 749 750 echo 1 > /sys/block/sda/sda1/trace/enable 751 echo blk > /sys/kernel/tracing/current_tracer 752 cat /sys/kernel/tracing/trace_pipe 753 754 If unsure, say N. 755 756config FPROBE_EVENTS 757 depends on FPROBE 758 depends on HAVE_REGS_AND_STACK_ACCESS_API 759 bool "Enable fprobe-based dynamic events" 760 select TRACING 761 select PROBE_EVENTS 762 select DYNAMIC_EVENTS 763 default y 764 help 765 This allows user to add tracing events on the function entry and 766 exit via ftrace interface. The syntax is same as the kprobe events 767 and the kprobe events on function entry and exit will be 768 transparently converted to this fprobe events. 769 770config PROBE_EVENTS_BTF_ARGS 771 depends on HAVE_FUNCTION_ARG_ACCESS_API 772 depends on FPROBE_EVENTS || KPROBE_EVENTS 773 depends on DEBUG_INFO_BTF && BPF_SYSCALL 774 bool "Support BTF function arguments for probe events" 775 default y 776 help 777 The user can specify the arguments of the probe event using the names 778 of the arguments of the probed function, when the probe location is a 779 kernel function entry or a tracepoint. 780 This is available only if BTF (BPF Type Format) support is enabled. 781 782config KPROBE_EVENTS 783 depends on KPROBES 784 depends on HAVE_REGS_AND_STACK_ACCESS_API 785 bool "Enable kprobes-based dynamic events" 786 select TRACING 787 select PROBE_EVENTS 788 select DYNAMIC_EVENTS 789 default y 790 help 791 This allows the user to add tracing events (similar to tracepoints) 792 on the fly via the ftrace interface. See 793 Documentation/trace/kprobetrace.rst for more details. 794 795 Those events can be inserted wherever kprobes can probe, and record 796 various register and memory values. 797 798 This option is also required by perf-probe subcommand of perf tools. 799 If you want to use perf tools, this option is strongly recommended. 800 801config KPROBE_EVENTS_ON_NOTRACE 802 bool "Do NOT protect notrace function from kprobe events" 803 depends on KPROBE_EVENTS 804 depends on DYNAMIC_FTRACE 805 default n 806 help 807 This is only for the developers who want to debug ftrace itself 808 using kprobe events. 809 810 If kprobes can use ftrace instead of breakpoint, ftrace related 811 functions are protected from kprobe-events to prevent an infinite 812 recursion or any unexpected execution path which leads to a kernel 813 crash. 814 815 This option disables such protection and allows you to put kprobe 816 events on ftrace functions for debugging ftrace by itself. 817 Note that this might let you shoot yourself in the foot. 818 819 If unsure, say N. 820 821config UPROBE_EVENTS 822 bool "Enable uprobes-based dynamic events" 823 depends on ARCH_SUPPORTS_UPROBES 824 depends on MMU 825 depends on PERF_EVENTS 826 select UPROBES 827 select PROBE_EVENTS 828 select DYNAMIC_EVENTS 829 select TRACING 830 default y 831 help 832 This allows the user to add tracing events on top of userspace 833 dynamic events (similar to tracepoints) on the fly via the trace 834 events interface. Those events can be inserted wherever uprobes 835 can probe, and record various registers. 836 This option is required if you plan to use perf-probe subcommand 837 of perf tools on user space applications. 838 839config EPROBE_EVENTS 840 bool "Enable event-based dynamic events" 841 depends on TRACING 842 depends on HAVE_REGS_AND_STACK_ACCESS_API 843 select PROBE_EVENTS 844 select DYNAMIC_EVENTS 845 default y 846 help 847 Eprobes are dynamic events that can be placed on other existing 848 events. It can be used to limit what fields are recorded in 849 an event or even dereference a field of an event. It can 850 convert the type of an event field. For example, turn an 851 address into a string. 852 853config BPF_EVENTS 854 depends on BPF_SYSCALL 855 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 856 bool 857 default y 858 help 859 This allows the user to attach BPF programs to kprobe, uprobe, and 860 tracepoint events. 861 862config DYNAMIC_EVENTS 863 def_bool n 864 865config PROBE_EVENTS 866 def_bool n 867 868config BPF_KPROBE_OVERRIDE 869 bool "Enable BPF programs to override a kprobed function" 870 depends on BPF_EVENTS 871 depends on FUNCTION_ERROR_INJECTION 872 default n 873 help 874 Allows BPF to override the execution of a probed function and 875 set a different return value. This is used for error injection. 876 877config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 878 bool 879 depends on DYNAMIC_FTRACE 880 881config FTRACE_MCOUNT_USE_CC 882 def_bool y 883 depends on $(cc-option,-mrecord-mcount) 884 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 885 depends on DYNAMIC_FTRACE 886 887config FTRACE_MCOUNT_USE_OBJTOOL 888 def_bool y 889 depends on HAVE_OBJTOOL_MCOUNT 890 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 891 depends on !FTRACE_MCOUNT_USE_CC 892 depends on DYNAMIC_FTRACE 893 select OBJTOOL 894 895config FTRACE_MCOUNT_USE_RECORDMCOUNT 896 def_bool y 897 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 898 depends on !FTRACE_MCOUNT_USE_CC 899 depends on !FTRACE_MCOUNT_USE_OBJTOOL 900 depends on DYNAMIC_FTRACE 901 902config TRACING_MAP 903 bool 904 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 905 help 906 tracing_map is a special-purpose lock-free map for tracing, 907 separated out as a stand-alone facility in order to allow it 908 to be shared between multiple tracers. It isn't meant to be 909 generally used outside of that context, and is normally 910 selected by tracers that use it. 911 912config SYNTH_EVENTS 913 bool "Synthetic trace events" 914 select TRACING 915 select DYNAMIC_EVENTS 916 default n 917 help 918 Synthetic events are user-defined trace events that can be 919 used to combine data from other trace events or in fact any 920 data source. Synthetic events can be generated indirectly 921 via the trace() action of histogram triggers or directly 922 by way of an in-kernel API. 923 924 See Documentation/trace/events.rst or 925 Documentation/trace/histogram.rst for details and examples. 926 927 If in doubt, say N. 928 929config USER_EVENTS 930 bool "User trace events" 931 select TRACING 932 select DYNAMIC_EVENTS 933 help 934 User trace events are user-defined trace events that 935 can be used like an existing kernel trace event. User trace 936 events are generated by writing to a tracefs file. User 937 processes can determine if their tracing events should be 938 generated by registering a value and bit with the kernel 939 that reflects when it is enabled or not. 940 941 See Documentation/trace/user_events.rst. 942 If in doubt, say N. 943 944config HIST_TRIGGERS 945 bool "Histogram triggers" 946 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 947 select TRACING_MAP 948 select TRACING 949 select DYNAMIC_EVENTS 950 select SYNTH_EVENTS 951 default n 952 help 953 Hist triggers allow one or more arbitrary trace event fields 954 to be aggregated into hash tables and dumped to stdout by 955 reading a debugfs/tracefs file. They're useful for 956 gathering quick and dirty (though precise) summaries of 957 event activity as an initial guide for further investigation 958 using more advanced tools. 959 960 Inter-event tracing of quantities such as latencies is also 961 supported using hist triggers under this option. 962 963 See Documentation/trace/histogram.rst. 964 If in doubt, say N. 965 966config TRACE_EVENT_INJECT 967 bool "Trace event injection" 968 depends on TRACING 969 help 970 Allow user-space to inject a specific trace event into the ring 971 buffer. This is mainly used for testing purpose. 972 973 If unsure, say N. 974 975config TRACEPOINT_BENCHMARK 976 bool "Add tracepoint that benchmarks tracepoints" 977 help 978 This option creates the tracepoint "benchmark:benchmark_event". 979 When the tracepoint is enabled, it kicks off a kernel thread that 980 goes into an infinite loop (calling cond_resched() to let other tasks 981 run), and calls the tracepoint. Each iteration will record the time 982 it took to write to the tracepoint and the next iteration that 983 data will be passed to the tracepoint itself. That is, the tracepoint 984 will report the time it took to do the previous tracepoint. 985 The string written to the tracepoint is a static string of 128 bytes 986 to keep the time the same. The initial string is simply a write of 987 "START". The second string records the cold cache time of the first 988 write which is not added to the rest of the calculations. 989 990 As it is a tight loop, it benchmarks as hot cache. That's fine because 991 we care most about hot paths that are probably in cache already. 992 993 An example of the output: 994 995 START 996 first=3672 [COLD CACHED] 997 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 998 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 999 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 1000 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 1001 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 1002 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 1003 1004 1005config RING_BUFFER_BENCHMARK 1006 tristate "Ring buffer benchmark stress tester" 1007 depends on RING_BUFFER 1008 help 1009 This option creates a test to stress the ring buffer and benchmark it. 1010 It creates its own ring buffer such that it will not interfere with 1011 any other users of the ring buffer (such as ftrace). It then creates 1012 a producer and consumer that will run for 10 seconds and sleep for 1013 10 seconds. Each interval it will print out the number of events 1014 it recorded and give a rough estimate of how long each iteration took. 1015 1016 It does not disable interrupts or raise its priority, so it may be 1017 affected by processes that are running. 1018 1019 If unsure, say N. 1020 1021config TRACE_EVAL_MAP_FILE 1022 bool "Show eval mappings for trace events" 1023 depends on TRACING 1024 help 1025 The "print fmt" of the trace events will show the enum/sizeof names 1026 instead of their values. This can cause problems for user space tools 1027 that use this string to parse the raw data as user space does not know 1028 how to convert the string to its value. 1029 1030 To fix this, there's a special macro in the kernel that can be used 1031 to convert an enum/sizeof into its value. If this macro is used, then 1032 the print fmt strings will be converted to their values. 1033 1034 If something does not get converted properly, this option can be 1035 used to show what enums/sizeof the kernel tried to convert. 1036 1037 This option is for debugging the conversions. A file is created 1038 in the tracing directory called "eval_map" that will show the 1039 names matched with their values and what trace event system they 1040 belong too. 1041 1042 Normally, the mapping of the strings to values will be freed after 1043 boot up or module load. With this option, they will not be freed, as 1044 they are needed for the "eval_map" file. Enabling this option will 1045 increase the memory footprint of the running kernel. 1046 1047 If unsure, say N. 1048 1049config FTRACE_RECORD_RECURSION 1050 bool "Record functions that recurse in function tracing" 1051 depends on FUNCTION_TRACER 1052 help 1053 All callbacks that attach to the function tracing have some sort 1054 of protection against recursion. Even though the protection exists, 1055 it adds overhead. This option will create a file in the tracefs 1056 file system called "recursed_functions" that will list the functions 1057 that triggered a recursion. 1058 1059 This will add more overhead to cases that have recursion. 1060 1061 If unsure, say N 1062 1063config FTRACE_RECORD_RECURSION_SIZE 1064 int "Max number of recursed functions to record" 1065 default 128 1066 depends on FTRACE_RECORD_RECURSION 1067 help 1068 This defines the limit of number of functions that can be 1069 listed in the "recursed_functions" file, that lists all 1070 the functions that caused a recursion to happen. 1071 This file can be reset, but the limit can not change in 1072 size at runtime. 1073 1074config FTRACE_VALIDATE_RCU_IS_WATCHING 1075 bool "Validate RCU is on during ftrace execution" 1076 depends on FUNCTION_TRACER 1077 depends on ARCH_WANTS_NO_INSTR 1078 help 1079 All callbacks that attach to the function tracing have some sort of 1080 protection against recursion. This option is only to verify that 1081 ftrace (and other users of ftrace_test_recursion_trylock()) are not 1082 called outside of RCU, as if they are, it can cause a race. But it 1083 also has a noticeable overhead when enabled. 1084 1085 If unsure, say N 1086 1087config RING_BUFFER_RECORD_RECURSION 1088 bool "Record functions that recurse in the ring buffer" 1089 depends on FTRACE_RECORD_RECURSION 1090 # default y, because it is coupled with FTRACE_RECORD_RECURSION 1091 default y 1092 help 1093 The ring buffer has its own internal recursion. Although when 1094 recursion happens it won't cause harm because of the protection, 1095 but it does cause unwanted overhead. Enabling this option will 1096 place where recursion was detected into the ftrace "recursed_functions" 1097 file. 1098 1099 This will add more overhead to cases that have recursion. 1100 1101config GCOV_PROFILE_FTRACE 1102 bool "Enable GCOV profiling on ftrace subsystem" 1103 depends on GCOV_KERNEL 1104 help 1105 Enable GCOV profiling on ftrace subsystem for checking 1106 which functions/lines are tested. 1107 1108 If unsure, say N. 1109 1110 Note that on a kernel compiled with this config, ftrace will 1111 run significantly slower. 1112 1113config FTRACE_SELFTEST 1114 bool 1115 1116config FTRACE_STARTUP_TEST 1117 bool "Perform a startup test on ftrace" 1118 depends on GENERIC_TRACER 1119 select FTRACE_SELFTEST 1120 help 1121 This option performs a series of startup tests on ftrace. On bootup 1122 a series of tests are made to verify that the tracer is 1123 functioning properly. It will do tests on all the configured 1124 tracers of ftrace. 1125 1126config EVENT_TRACE_STARTUP_TEST 1127 bool "Run selftest on trace events" 1128 depends on FTRACE_STARTUP_TEST 1129 default y 1130 help 1131 This option performs a test on all trace events in the system. 1132 It basically just enables each event and runs some code that 1133 will trigger events (not necessarily the event it enables) 1134 This may take some time run as there are a lot of events. 1135 1136config EVENT_TRACE_TEST_SYSCALLS 1137 bool "Run selftest on syscall events" 1138 depends on EVENT_TRACE_STARTUP_TEST 1139 help 1140 This option will also enable testing every syscall event. 1141 It only enables the event and disables it and runs various loads 1142 with the event enabled. This adds a bit more time for kernel boot 1143 up since it runs this on every system call defined. 1144 1145 TBD - enable a way to actually call the syscalls as we test their 1146 events 1147 1148config FTRACE_SORT_STARTUP_TEST 1149 bool "Verify compile time sorting of ftrace functions" 1150 depends on DYNAMIC_FTRACE 1151 depends on BUILDTIME_MCOUNT_SORT 1152 help 1153 Sorting of the mcount_loc sections that is used to find the 1154 where the ftrace knows where to patch functions for tracing 1155 and other callbacks is done at compile time. But if the sort 1156 is not done correctly, it will cause non-deterministic failures. 1157 When this is set, the sorted sections will be verified that they 1158 are in deed sorted and will warn if they are not. 1159 1160 If unsure, say N 1161 1162config RING_BUFFER_STARTUP_TEST 1163 bool "Ring buffer startup self test" 1164 depends on RING_BUFFER 1165 help 1166 Run a simple self test on the ring buffer on boot up. Late in the 1167 kernel boot sequence, the test will start that kicks off 1168 a thread per cpu. Each thread will write various size events 1169 into the ring buffer. Another thread is created to send IPIs 1170 to each of the threads, where the IPI handler will also write 1171 to the ring buffer, to test/stress the nesting ability. 1172 If any anomalies are discovered, a warning will be displayed 1173 and all ring buffers will be disabled. 1174 1175 The test runs for 10 seconds. This will slow your boot time 1176 by at least 10 more seconds. 1177 1178 At the end of the test, statistics and more checks are done. 1179 It will output the stats of each per cpu buffer: What 1180 was written, the sizes, what was read, what was lost, and 1181 other similar details. 1182 1183 If unsure, say N 1184 1185config RING_BUFFER_VALIDATE_TIME_DELTAS 1186 bool "Verify ring buffer time stamp deltas" 1187 depends on RING_BUFFER 1188 help 1189 This will audit the time stamps on the ring buffer sub 1190 buffer to make sure that all the time deltas for the 1191 events on a sub buffer matches the current time stamp. 1192 This audit is performed for every event that is not 1193 interrupted, or interrupting another event. A check 1194 is also made when traversing sub buffers to make sure 1195 that all the deltas on the previous sub buffer do not 1196 add up to be greater than the current time stamp. 1197 1198 NOTE: This adds significant overhead to recording of events, 1199 and should only be used to test the logic of the ring buffer. 1200 Do not use it on production systems. 1201 1202 Only say Y if you understand what this does, and you 1203 still want it enabled. Otherwise say N 1204 1205config MMIOTRACE_TEST 1206 tristate "Test module for mmiotrace" 1207 depends on MMIOTRACE && m 1208 help 1209 This is a dumb module for testing mmiotrace. It is very dangerous 1210 as it will write garbage to IO memory starting at a given address. 1211 However, it should be safe to use on e.g. unused portion of VRAM. 1212 1213 Say N, unless you absolutely know what you are doing. 1214 1215config PREEMPTIRQ_DELAY_TEST 1216 tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers" 1217 depends on m 1218 help 1219 Select this option to build a test module that can help test latency 1220 tracers by executing a preempt or irq disable section with a user 1221 configurable delay. The module busy waits for the duration of the 1222 critical section. 1223 1224 For example, the following invocation generates a burst of three 1225 irq-disabled critical sections for 500us: 1226 modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3 1227 1228 What's more, if you want to attach the test on the cpu which the latency 1229 tracer is running on, specify cpu_affinity=cpu_num at the end of the 1230 command. 1231 1232 If unsure, say N 1233 1234config SYNTH_EVENT_GEN_TEST 1235 tristate "Test module for in-kernel synthetic event generation" 1236 depends on SYNTH_EVENTS && m 1237 help 1238 This option creates a test module to check the base 1239 functionality of in-kernel synthetic event definition and 1240 generation. 1241 1242 To test, insert the module, and then check the trace buffer 1243 for the generated sample events. 1244 1245 If unsure, say N. 1246 1247config KPROBE_EVENT_GEN_TEST 1248 tristate "Test module for in-kernel kprobe event generation" 1249 depends on KPROBE_EVENTS && m 1250 help 1251 This option creates a test module to check the base 1252 functionality of in-kernel kprobe event definition. 1253 1254 To test, insert the module, and then check the trace buffer 1255 for the generated kprobe events. 1256 1257 If unsure, say N. 1258 1259config HIST_TRIGGERS_DEBUG 1260 bool "Hist trigger debug support" 1261 depends on HIST_TRIGGERS 1262 help 1263 Add "hist_debug" file for each event, which when read will 1264 dump out a bunch of internal details about the hist triggers 1265 defined on that event. 1266 1267 The hist_debug file serves a couple of purposes: 1268 1269 - Helps developers verify that nothing is broken. 1270 1271 - Provides educational information to support the details 1272 of the hist trigger internals as described by 1273 Documentation/trace/histogram-design.rst. 1274 1275 The hist_debug output only covers the data structures 1276 related to the histogram definitions themselves and doesn't 1277 display the internals of map buckets or variable values of 1278 running histograms. 1279 1280 If unsure, say N. 1281 1282source "kernel/trace/rv/Kconfig" 1283 1284endif # FTRACE 1285