1# SPDX-License-Identifier: GPL-2.0-only 2# 3# Architectures that offer an FUNCTION_TRACER implementation should 4# select HAVE_FUNCTION_TRACER: 5# 6 7config USER_STACKTRACE_SUPPORT 8 bool 9 10config NOP_TRACER 11 bool 12 13config HAVE_RETHOOK 14 bool 15 16config RETHOOK 17 bool 18 depends on HAVE_RETHOOK 19 help 20 Enable generic return hooking feature. This is an internal 21 API, which will be used by other function-entry hooking 22 features like fprobe and kprobes. 23 24config HAVE_FUNCTION_TRACER 25 bool 26 help 27 See Documentation/trace/ftrace-design.rst 28 29config HAVE_FUNCTION_GRAPH_TRACER 30 bool 31 help 32 See Documentation/trace/ftrace-design.rst 33 34config HAVE_FUNCTION_GRAPH_FREGS 35 bool 36 37config HAVE_FTRACE_GRAPH_FUNC 38 bool 39 help 40 True if ftrace_graph_func() is defined. 41 42config HAVE_DYNAMIC_FTRACE 43 bool 44 help 45 See Documentation/trace/ftrace-design.rst 46 47config HAVE_DYNAMIC_FTRACE_WITH_REGS 48 bool 49 50config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 51 bool 52 53config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 54 bool 55 56config HAVE_EXTRA_IPI_TRACEPOINTS 57 bool 58 help 59 For architectures that use ipi_raise, ipi_entry and ipi_exit 60 tracepoints. 61 62config HAVE_DYNAMIC_FTRACE_WITH_ARGS 63 bool 64 help 65 If this is set, then arguments and stack can be found from 66 the ftrace_regs passed into the function callback regs parameter 67 by default, even without setting the REGS flag in the ftrace_ops. 68 This allows for use of ftrace_regs_get_argument() and 69 ftrace_regs_get_stack_pointer(). 70 71config HAVE_FTRACE_REGS_HAVING_PT_REGS 72 bool 73 help 74 If this is set, ftrace_regs has pt_regs, thus it can convert to 75 pt_regs without allocating memory. 76 77config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE 78 bool 79 help 80 If the architecture generates __patchable_function_entries sections 81 but does not want them included in the ftrace locations. 82 83config HAVE_DYNAMIC_FTRACE_WITH_JMP 84 bool 85 help 86 If the architecture supports to replace the __fentry__ with a 87 "jmp" instruction. 88 89config HAVE_SYSCALL_TRACEPOINTS 90 bool 91 help 92 See Documentation/trace/ftrace-design.rst 93 94config HAVE_FENTRY 95 bool 96 help 97 Arch supports the gcc options -pg with -mfentry 98 99config HAVE_NOP_MCOUNT 100 bool 101 help 102 Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount 103 104config HAVE_OBJTOOL_MCOUNT 105 bool 106 help 107 Arch supports objtool --mcount 108 109config HAVE_OBJTOOL_NOP_MCOUNT 110 bool 111 help 112 Arch supports the objtool options --mcount with --mnop. 113 An architecture can select this if it wants to enable nop'ing 114 of ftrace locations. 115 116config HAVE_C_RECORDMCOUNT 117 bool 118 help 119 C version of recordmcount available? 120 121config HAVE_BUILDTIME_MCOUNT_SORT 122 bool 123 help 124 An architecture selects this if it sorts the mcount_loc section 125 at build time. 126 127config BUILDTIME_MCOUNT_SORT 128 bool 129 default y 130 depends on HAVE_BUILDTIME_MCOUNT_SORT && DYNAMIC_FTRACE 131 help 132 Sort the mcount_loc section at build time. 133 134config TRACER_MAX_TRACE 135 bool 136 137config TRACE_CLOCK 138 bool 139 140config RING_BUFFER 141 bool 142 select TRACE_CLOCK 143 select IRQ_WORK 144 145config EVENT_TRACING 146 select CONTEXT_SWITCH_TRACER 147 select GLOB 148 bool 149 150config CONTEXT_SWITCH_TRACER 151 bool 152 153config RING_BUFFER_ALLOW_SWAP 154 bool 155 help 156 Allow the use of ring_buffer_swap_cpu. 157 Adds a very slight overhead to tracing when enabled. 158 159config PREEMPTIRQ_TRACEPOINTS 160 bool 161 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 162 select TRACING 163 default y 164 help 165 Create preempt/irq toggle tracepoints if needed, so that other parts 166 of the kernel can use them to generate or add hooks to them. 167 168# All tracer options should select GENERIC_TRACER. For those options that are 169# enabled by all tracers (context switch and event tracer) they select TRACING. 170# This allows those options to appear when no other tracer is selected. But the 171# options do not appear when something else selects it. We need the two options 172# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 173# hiding of the automatic options. 174 175config TRACING 176 bool 177 select RING_BUFFER 178 select STACKTRACE if STACKTRACE_SUPPORT 179 select TRACEPOINTS 180 select NOP_TRACER 181 select BINARY_PRINTF 182 select EVENT_TRACING 183 select TRACE_CLOCK 184 select NEED_TASKS_RCU 185 186config GENERIC_TRACER 187 bool 188 select TRACING 189 190# 191# Minimum requirements an architecture has to meet for us to 192# be able to offer generic tracing facilities: 193# 194config TRACING_SUPPORT 195 bool 196 depends on TRACE_IRQFLAGS_SUPPORT 197 depends on STACKTRACE_SUPPORT 198 default y 199 200menuconfig FTRACE 201 bool "Tracers" 202 depends on TRACING_SUPPORT 203 default y if DEBUG_KERNEL 204 help 205 Enable the kernel tracing infrastructure. 206 207if FTRACE 208 209config TRACEFS_AUTOMOUNT_DEPRECATED 210 bool "Automount tracefs on debugfs [DEPRECATED]" 211 depends on TRACING 212 default y 213 help 214 The tracing interface was moved from /sys/kernel/debug/tracing 215 to /sys/kernel/tracing in 2015, but the tracing file system 216 was still automounted in /sys/kernel/debug for backward 217 compatibility with tooling. 218 219 The new interface has been around for more than 10 years and 220 the old debug mount will soon be removed. 221 222config BOOTTIME_TRACING 223 bool "Boot-time Tracing support" 224 depends on TRACING 225 select BOOT_CONFIG 226 help 227 Enable developer to setup ftrace subsystem via supplemental 228 kernel cmdline at boot time for debugging (tracing) driver 229 initialization and boot process. 230 231config FUNCTION_TRACER 232 bool "Kernel Function Tracer" 233 depends on HAVE_FUNCTION_TRACER 234 select KALLSYMS 235 select GENERIC_TRACER 236 select CONTEXT_SWITCH_TRACER 237 select GLOB 238 select NEED_TASKS_RCU 239 select TASKS_RUDE_RCU 240 help 241 Enable the kernel to trace every kernel function. This is done 242 by using a compiler feature to insert a small, 5-byte No-Operation 243 instruction at the beginning of every kernel function, which NOP 244 sequence is then dynamically patched into a tracer call when 245 tracing is enabled by the administrator. If it's runtime disabled 246 (the bootup default), then the overhead of the instructions is very 247 small and not measurable even in micro-benchmarks (at least on 248 x86, but may have impact on other architectures). 249 250config FUNCTION_GRAPH_TRACER 251 bool "Kernel Function Graph Tracer" 252 depends on HAVE_FUNCTION_GRAPH_TRACER 253 depends on FUNCTION_TRACER 254 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 255 default y 256 help 257 Enable the kernel to trace a function at both its return 258 and its entry. 259 Its first purpose is to trace the duration of functions and 260 draw a call graph for each thread with some information like 261 the return value. This is done by setting the current return 262 address on the current task structure into a stack of calls. 263 264config FUNCTION_GRAPH_RETVAL 265 bool "Kernel Function Graph Return Value" 266 depends on HAVE_FUNCTION_GRAPH_FREGS 267 depends on FUNCTION_GRAPH_TRACER 268 default n 269 help 270 Support recording and printing the function return value when 271 using function graph tracer. It can be helpful to locate functions 272 that return errors. This feature is off by default, and you can 273 enable it via the trace option funcgraph-retval. 274 See Documentation/trace/ftrace.rst 275 276config FUNCTION_GRAPH_RETADDR 277 bool "Kernel Function Graph Return Address" 278 depends on FUNCTION_GRAPH_TRACER 279 default n 280 help 281 Support recording and printing the function return address when 282 using function graph tracer. It can be helpful to locate code line that 283 the function is called. This feature is off by default, and you can 284 enable it via the trace option funcgraph-retaddr. 285 286config FUNCTION_TRACE_ARGS 287 bool 288 depends on PROBE_EVENTS_BTF_ARGS 289 default y 290 help 291 If supported with function argument access API and BTF, then 292 the function tracer and function graph tracer will support printing 293 of function arguments. This feature is off by default, and can be 294 enabled via the trace option func-args (for the function tracer) and 295 funcgraph-args (for the function graph tracer) 296 297config DYNAMIC_FTRACE 298 bool 299 depends on FUNCTION_TRACER 300 depends on HAVE_DYNAMIC_FTRACE 301 default y 302 help 303 This option will modify all the calls to function tracing 304 dynamically (will patch them out of the binary image and 305 replace them with a No-Op instruction) on boot up. During 306 compile time, a table is made of all the locations that ftrace 307 can function trace, and this table is linked into the kernel 308 image. When this is enabled, functions can be individually 309 enabled, and the functions not enabled will not affect 310 performance of the system. 311 312 See the files in /sys/kernel/tracing: 313 available_filter_functions 314 set_ftrace_filter 315 set_ftrace_notrace 316 317 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 318 otherwise has native performance as long as no tracing is active. 319 320config DYNAMIC_FTRACE_WITH_REGS 321 def_bool y 322 depends on DYNAMIC_FTRACE 323 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 324 325config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 326 def_bool y 327 depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS 328 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 329 330config DYNAMIC_FTRACE_WITH_CALL_OPS 331 def_bool y 332 depends on HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 333 334config DYNAMIC_FTRACE_WITH_ARGS 335 def_bool y 336 depends on DYNAMIC_FTRACE 337 depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS 338 339config DYNAMIC_FTRACE_WITH_JMP 340 def_bool y 341 depends on DYNAMIC_FTRACE 342 depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS 343 depends on HAVE_DYNAMIC_FTRACE_WITH_JMP 344 345config FUNCTION_SELF_TRACING 346 bool "Function trace tracing code" 347 depends on FUNCTION_TRACER 348 help 349 Normally all the tracing code is set to notrace, where the function 350 tracer will ignore all the tracing functions. Sometimes it is useful 351 for debugging to trace some of the tracing infratructure itself. 352 Enable this to allow some of the tracing infrastructure to be traced 353 by the function tracer. Note, this will likely add noise to function 354 tracing if events and other tracing features are enabled along with 355 function tracing. 356 357 If unsure, say N. 358 359config FPROBE 360 bool "Kernel Function Probe (fprobe)" 361 depends on HAVE_FUNCTION_GRAPH_FREGS && HAVE_FTRACE_GRAPH_FUNC 362 depends on DYNAMIC_FTRACE_WITH_ARGS 363 select FUNCTION_GRAPH_TRACER 364 default n 365 help 366 This option enables kernel function probe (fprobe) based on ftrace. 367 The fprobe is similar to kprobes, but probes only for kernel function 368 entries and exits. This also can probe multiple functions by one 369 fprobe. 370 371 If unsure, say N. 372 373config FUNCTION_PROFILER 374 bool "Kernel function profiler" 375 depends on FUNCTION_TRACER 376 default n 377 help 378 This option enables the kernel function profiler. A file is created 379 in debugfs called function_profile_enabled which defaults to zero. 380 When a 1 is echoed into this file profiling begins, and when a 381 zero is entered, profiling stops. A "functions" file is created in 382 the trace_stat directory; this file shows the list of functions that 383 have been hit and their counters. 384 385 If in doubt, say N. 386 387config STACK_TRACER 388 bool "Trace max stack" 389 depends on HAVE_FUNCTION_TRACER 390 select FUNCTION_TRACER 391 select STACKTRACE 392 select KALLSYMS 393 help 394 This special tracer records the maximum stack footprint of the 395 kernel and displays it in /sys/kernel/tracing/stack_trace. 396 397 This tracer works by hooking into every function call that the 398 kernel executes, and keeping a maximum stack depth value and 399 stack-trace saved. If this is configured with DYNAMIC_FTRACE 400 then it will not have any overhead while the stack tracer 401 is disabled. 402 403 To enable the stack tracer on bootup, pass in 'stacktrace' 404 on the kernel command line. 405 406 The stack tracer can also be enabled or disabled via the 407 sysctl kernel.stack_tracer_enabled 408 409 Say N if unsure. 410 411config TRACE_PREEMPT_TOGGLE 412 bool 413 help 414 Enables hooks which will be called when preemption is first disabled, 415 and last enabled. 416 417config IRQSOFF_TRACER 418 bool "Interrupts-off Latency Tracer" 419 default n 420 depends on TRACE_IRQFLAGS_SUPPORT 421 select TRACE_IRQFLAGS 422 select GENERIC_TRACER 423 select TRACER_MAX_TRACE 424 select RING_BUFFER_ALLOW_SWAP 425 select TRACER_SNAPSHOT 426 select TRACER_SNAPSHOT_PER_CPU_SWAP 427 help 428 This option measures the time spent in irqs-off critical 429 sections, with microsecond accuracy. 430 431 The default measurement method is a maximum search, which is 432 disabled by default and can be runtime (re-)started 433 via: 434 435 echo 0 > /sys/kernel/tracing/tracing_max_latency 436 437 (Note that kernel size and overhead increase with this option 438 enabled. This option and the preempt-off timing option can be 439 used together or separately.) 440 441config PREEMPT_TRACER 442 bool "Preemption-off Latency Tracer" 443 default n 444 depends on PREEMPTION 445 select GENERIC_TRACER 446 select TRACER_MAX_TRACE 447 select RING_BUFFER_ALLOW_SWAP 448 select TRACER_SNAPSHOT 449 select TRACER_SNAPSHOT_PER_CPU_SWAP 450 select TRACE_PREEMPT_TOGGLE 451 help 452 This option measures the time spent in preemption-off critical 453 sections, with microsecond accuracy. 454 455 The default measurement method is a maximum search, which is 456 disabled by default and can be runtime (re-)started 457 via: 458 459 echo 0 > /sys/kernel/tracing/tracing_max_latency 460 461 (Note that kernel size and overhead increase with this option 462 enabled. This option and the irqs-off timing option can be 463 used together or separately.) 464 465config SCHED_TRACER 466 bool "Scheduling Latency Tracer" 467 select GENERIC_TRACER 468 select CONTEXT_SWITCH_TRACER 469 select TRACER_MAX_TRACE 470 select TRACER_SNAPSHOT 471 help 472 This tracer tracks the latency of the highest priority task 473 to be scheduled in, starting from the point it has woken up. 474 475config HWLAT_TRACER 476 bool "Tracer to detect hardware latencies (like SMIs)" 477 select GENERIC_TRACER 478 select TRACER_MAX_TRACE 479 help 480 This tracer, when enabled will create one or more kernel threads, 481 depending on what the cpumask file is set to, which each thread 482 spinning in a loop looking for interruptions caused by 483 something other than the kernel. For example, if a 484 System Management Interrupt (SMI) takes a noticeable amount of 485 time, this tracer will detect it. This is useful for testing 486 if a system is reliable for Real Time tasks. 487 488 Some files are created in the tracing directory when this 489 is enabled: 490 491 hwlat_detector/width - time in usecs for how long to spin for 492 hwlat_detector/window - time in usecs between the start of each 493 iteration 494 495 A kernel thread is created that will spin with interrupts disabled 496 for "width" microseconds in every "window" cycle. It will not spin 497 for "window - width" microseconds, where the system can 498 continue to operate. 499 500 The output will appear in the trace and trace_pipe files. 501 502 When the tracer is not running, it has no affect on the system, 503 but when it is running, it can cause the system to be 504 periodically non responsive. Do not run this tracer on a 505 production system. 506 507 To enable this tracer, echo in "hwlat" into the current_tracer 508 file. Every time a latency is greater than tracing_thresh, it will 509 be recorded into the ring buffer. 510 511config OSNOISE_TRACER 512 bool "OS Noise tracer" 513 select GENERIC_TRACER 514 select TRACER_MAX_TRACE 515 help 516 In the context of high-performance computing (HPC), the Operating 517 System Noise (osnoise) refers to the interference experienced by an 518 application due to activities inside the operating system. In the 519 context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread 520 can cause noise to the system. Moreover, hardware-related jobs can 521 also cause noise, for example, via SMIs. 522 523 The osnoise tracer leverages the hwlat_detector by running a similar 524 loop with preemption, SoftIRQs and IRQs enabled, thus allowing all 525 the sources of osnoise during its execution. The osnoise tracer takes 526 note of the entry and exit point of any source of interferences, 527 increasing a per-cpu interference counter. It saves an interference 528 counter for each source of interference. The interference counter for 529 NMI, IRQs, SoftIRQs, and threads is increased anytime the tool 530 observes these interferences' entry events. When a noise happens 531 without any interference from the operating system level, the 532 hardware noise counter increases, pointing to a hardware-related 533 noise. In this way, osnoise can account for any source of 534 interference. At the end of the period, the osnoise tracer prints 535 the sum of all noise, the max single noise, the percentage of CPU 536 available for the thread, and the counters for the noise sources. 537 538 In addition to the tracer, a set of tracepoints were added to 539 facilitate the identification of the osnoise source. 540 541 The output will appear in the trace and trace_pipe files. 542 543 To enable this tracer, echo in "osnoise" into the current_tracer 544 file. 545 546config TIMERLAT_TRACER 547 bool "Timerlat tracer" 548 select OSNOISE_TRACER 549 select GENERIC_TRACER 550 help 551 The timerlat tracer aims to help the preemptive kernel developers 552 to find sources of wakeup latencies of real-time threads. 553 554 The tracer creates a per-cpu kernel thread with real-time priority. 555 The tracer thread sets a periodic timer to wakeup itself, and goes 556 to sleep waiting for the timer to fire. At the wakeup, the thread 557 then computes a wakeup latency value as the difference between 558 the current time and the absolute time that the timer was set 559 to expire. 560 561 The tracer prints two lines at every activation. The first is the 562 timer latency observed at the hardirq context before the 563 activation of the thread. The second is the timer latency observed 564 by the thread, which is the same level that cyclictest reports. The 565 ACTIVATION ID field serves to relate the irq execution to its 566 respective thread execution. 567 568 The tracer is build on top of osnoise tracer, and the osnoise: 569 events can be used to trace the source of interference from NMI, 570 IRQs and other threads. It also enables the capture of the 571 stacktrace at the IRQ context, which helps to identify the code 572 path that can cause thread delay. 573 574config MMIOTRACE 575 bool "Memory mapped IO tracing" 576 depends on HAVE_MMIOTRACE_SUPPORT && PCI 577 select GENERIC_TRACER 578 help 579 Mmiotrace traces Memory Mapped I/O access and is meant for 580 debugging and reverse engineering. It is called from the ioremap 581 implementation and works via page faults. Tracing is disabled by 582 default and can be enabled at run-time. 583 584 See Documentation/trace/mmiotrace.rst. 585 If you are not helping to develop drivers, say N. 586 587config ENABLE_DEFAULT_TRACERS 588 bool "Trace process context switches and events" 589 depends on !GENERIC_TRACER 590 select TRACING 591 help 592 This tracer hooks to various trace points in the kernel, 593 allowing the user to pick and choose which trace point they 594 want to trace. It also includes the sched_switch tracer plugin. 595 596config FTRACE_SYSCALLS 597 bool "Trace syscalls" 598 depends on HAVE_SYSCALL_TRACEPOINTS 599 select GENERIC_TRACER 600 select KALLSYMS 601 help 602 Basic tracer to catch the syscall entry and exit events. 603 604config TRACE_SYSCALL_BUF_SIZE_DEFAULT 605 int "System call user read max size" 606 range 0 165 607 default 63 608 depends on FTRACE_SYSCALLS 609 help 610 Some system call trace events will record the data from a user 611 space address that one of the parameters point to. The amount of 612 data per event is limited. That limit is set by this config and 613 this config also affects how much user space data perf can read. 614 615 For a tracing instance, this size may be changed by writing into 616 its syscall_user_buf_size file. 617 618config TRACER_SNAPSHOT 619 bool "Create a snapshot trace buffer" 620 select TRACER_MAX_TRACE 621 help 622 Allow tracing users to take snapshot of the current buffer using the 623 ftrace interface, e.g.: 624 625 echo 1 > /sys/kernel/tracing/snapshot 626 cat snapshot 627 628config TRACER_SNAPSHOT_PER_CPU_SWAP 629 bool "Allow snapshot to swap per CPU" 630 depends on TRACER_SNAPSHOT 631 select RING_BUFFER_ALLOW_SWAP 632 help 633 Allow doing a snapshot of a single CPU buffer instead of a 634 full swap (all buffers). If this is set, then the following is 635 allowed: 636 637 echo 1 > /sys/kernel/tracing/per_cpu/cpu2/snapshot 638 639 After which, only the tracing buffer for CPU 2 was swapped with 640 the main tracing buffer, and the other CPU buffers remain the same. 641 642 When this is enabled, this adds a little more overhead to the 643 trace recording, as it needs to add some checks to synchronize 644 recording with swaps. But this does not affect the performance 645 of the overall system. This is enabled by default when the preempt 646 or irq latency tracers are enabled, as those need to swap as well 647 and already adds the overhead (plus a lot more). 648 649config TRACE_BRANCH_PROFILING 650 bool 651 select GENERIC_TRACER 652 653choice 654 prompt "Branch Profiling" 655 default BRANCH_PROFILE_NONE 656 help 657 The branch profiling is a software profiler. It will add hooks 658 into the C conditionals to test which path a branch takes. 659 660 The likely/unlikely profiler only looks at the conditions that 661 are annotated with a likely or unlikely macro. 662 663 The "all branch" profiler will profile every if-statement in the 664 kernel. This profiler will also enable the likely/unlikely 665 profiler. 666 667 Either of the above profilers adds a bit of overhead to the system. 668 If unsure, choose "No branch profiling". 669 670config BRANCH_PROFILE_NONE 671 bool "No branch profiling" 672 help 673 No branch profiling. Branch profiling adds a bit of overhead. 674 Only enable it if you want to analyse the branching behavior. 675 Otherwise keep it disabled. 676 677config PROFILE_ANNOTATED_BRANCHES 678 bool "Trace likely/unlikely profiler" 679 select TRACE_BRANCH_PROFILING 680 help 681 This tracer profiles all likely and unlikely macros 682 in the kernel. It will display the results in: 683 684 /sys/kernel/tracing/trace_stat/branch_annotated 685 686 Note: this will add a significant overhead; only turn this 687 on if you need to profile the system's use of these macros. 688 689config PROFILE_ALL_BRANCHES 690 bool "Profile all if conditionals" if !FORTIFY_SOURCE 691 select TRACE_BRANCH_PROFILING 692 help 693 This tracer profiles all branch conditions. Every if () 694 taken in the kernel is recorded whether it hit or miss. 695 The results will be displayed in: 696 697 /sys/kernel/tracing/trace_stat/branch_all 698 699 This option also enables the likely/unlikely profiler. 700 701 This configuration, when enabled, will impose a great overhead 702 on the system. This should only be enabled when the system 703 is to be analyzed in much detail. 704endchoice 705 706config TRACING_BRANCHES 707 bool 708 help 709 Selected by tracers that will trace the likely and unlikely 710 conditions. This prevents the tracers themselves from being 711 profiled. Profiling the tracing infrastructure can only happen 712 when the likelys and unlikelys are not being traced. 713 714config BRANCH_TRACER 715 bool "Trace likely/unlikely instances" 716 depends on TRACE_BRANCH_PROFILING 717 select TRACING_BRANCHES 718 help 719 This traces the events of likely and unlikely condition 720 calls in the kernel. The difference between this and the 721 "Trace likely/unlikely profiler" is that this is not a 722 histogram of the callers, but actually places the calling 723 events into a running trace buffer to see when and where the 724 events happened, as well as their results. 725 726 Say N if unsure. 727 728config BLK_DEV_IO_TRACE 729 bool "Support for tracing block IO actions" 730 depends on SYSFS 731 depends on BLOCK 732 select RELAY 733 select DEBUG_FS 734 select TRACEPOINTS 735 select GENERIC_TRACER 736 select STACKTRACE 737 help 738 Say Y here if you want to be able to trace the block layer actions 739 on a given queue. Tracing allows you to see any traffic happening 740 on a block device queue. For more information (and the userspace 741 support tools needed), fetch the blktrace tools from: 742 743 git://git.kernel.dk/blktrace.git 744 745 Tracing also is possible using the ftrace interface, e.g.: 746 747 echo 1 > /sys/block/sda/sda1/trace/enable 748 echo blk > /sys/kernel/tracing/current_tracer 749 cat /sys/kernel/tracing/trace_pipe 750 751 If unsure, say N. 752 753config FPROBE_EVENTS 754 depends on FPROBE 755 depends on HAVE_REGS_AND_STACK_ACCESS_API 756 bool "Enable fprobe-based dynamic events" 757 select TRACING 758 select PROBE_EVENTS 759 select DYNAMIC_EVENTS 760 default y 761 help 762 This allows user to add tracing events on the function entry and 763 exit via ftrace interface. The syntax is same as the kprobe events 764 and the kprobe events on function entry and exit will be 765 transparently converted to this fprobe events. 766 767config PROBE_EVENTS_BTF_ARGS 768 depends on HAVE_FUNCTION_ARG_ACCESS_API 769 depends on FPROBE_EVENTS || KPROBE_EVENTS 770 depends on DEBUG_INFO_BTF && BPF_SYSCALL 771 bool "Support BTF function arguments for probe events" 772 default y 773 help 774 The user can specify the arguments of the probe event using the names 775 of the arguments of the probed function, when the probe location is a 776 kernel function entry or a tracepoint. 777 This is available only if BTF (BPF Type Format) support is enabled. 778 779config KPROBE_EVENTS 780 depends on KPROBES 781 depends on HAVE_REGS_AND_STACK_ACCESS_API 782 bool "Enable kprobes-based dynamic events" 783 select TRACING 784 select PROBE_EVENTS 785 select DYNAMIC_EVENTS 786 default y 787 help 788 This allows the user to add tracing events (similar to tracepoints) 789 on the fly via the ftrace interface. See 790 Documentation/trace/kprobetrace.rst for more details. 791 792 Those events can be inserted wherever kprobes can probe, and record 793 various register and memory values. 794 795 This option is also required by perf-probe subcommand of perf tools. 796 If you want to use perf tools, this option is strongly recommended. 797 798config KPROBE_EVENTS_ON_NOTRACE 799 bool "Do NOT protect notrace function from kprobe events" 800 depends on KPROBE_EVENTS 801 depends on DYNAMIC_FTRACE 802 default n 803 help 804 This is only for the developers who want to debug ftrace itself 805 using kprobe events. 806 807 If kprobes can use ftrace instead of breakpoint, ftrace related 808 functions are protected from kprobe-events to prevent an infinite 809 recursion or any unexpected execution path which leads to a kernel 810 crash. 811 812 This option disables such protection and allows you to put kprobe 813 events on ftrace functions for debugging ftrace by itself. 814 Note that this might let you shoot yourself in the foot. 815 816 If unsure, say N. 817 818config UPROBE_EVENTS 819 bool "Enable uprobes-based dynamic events" 820 depends on ARCH_SUPPORTS_UPROBES 821 depends on MMU 822 depends on PERF_EVENTS 823 select UPROBES 824 select PROBE_EVENTS 825 select DYNAMIC_EVENTS 826 select TRACING 827 default y 828 help 829 This allows the user to add tracing events on top of userspace 830 dynamic events (similar to tracepoints) on the fly via the trace 831 events interface. Those events can be inserted wherever uprobes 832 can probe, and record various registers. 833 This option is required if you plan to use perf-probe subcommand 834 of perf tools on user space applications. 835 836config EPROBE_EVENTS 837 bool "Enable event-based dynamic events" 838 depends on TRACING 839 depends on HAVE_REGS_AND_STACK_ACCESS_API 840 select PROBE_EVENTS 841 select DYNAMIC_EVENTS 842 default y 843 help 844 Eprobes are dynamic events that can be placed on other existing 845 events. It can be used to limit what fields are recorded in 846 an event or even dereference a field of an event. It can 847 convert the type of an event field. For example, turn an 848 address into a string. 849 850config BPF_EVENTS 851 depends on BPF_SYSCALL 852 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 853 bool 854 default y 855 help 856 This allows the user to attach BPF programs to kprobe, uprobe, and 857 tracepoint events. 858 859config DYNAMIC_EVENTS 860 def_bool n 861 862config PROBE_EVENTS 863 def_bool n 864 865config BPF_KPROBE_OVERRIDE 866 bool "Enable BPF programs to override a kprobed function" 867 depends on BPF_EVENTS 868 depends on FUNCTION_ERROR_INJECTION 869 default n 870 help 871 Allows BPF to override the execution of a probed function and 872 set a different return value. This is used for error injection. 873 874config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 875 bool 876 depends on DYNAMIC_FTRACE 877 878config FTRACE_MCOUNT_USE_CC 879 def_bool y 880 depends on $(cc-option,-mrecord-mcount) 881 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 882 depends on DYNAMIC_FTRACE 883 884config FTRACE_MCOUNT_USE_OBJTOOL 885 def_bool y 886 depends on HAVE_OBJTOOL_MCOUNT 887 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 888 depends on !FTRACE_MCOUNT_USE_CC 889 depends on DYNAMIC_FTRACE 890 select OBJTOOL 891 892config FTRACE_MCOUNT_USE_RECORDMCOUNT 893 def_bool y 894 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 895 depends on !FTRACE_MCOUNT_USE_CC 896 depends on !FTRACE_MCOUNT_USE_OBJTOOL 897 depends on DYNAMIC_FTRACE 898 899config TRACING_MAP 900 bool 901 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 902 help 903 tracing_map is a special-purpose lock-free map for tracing, 904 separated out as a stand-alone facility in order to allow it 905 to be shared between multiple tracers. It isn't meant to be 906 generally used outside of that context, and is normally 907 selected by tracers that use it. 908 909config SYNTH_EVENTS 910 bool "Synthetic trace events" 911 select TRACING 912 select DYNAMIC_EVENTS 913 default n 914 help 915 Synthetic events are user-defined trace events that can be 916 used to combine data from other trace events or in fact any 917 data source. Synthetic events can be generated indirectly 918 via the trace() action of histogram triggers or directly 919 by way of an in-kernel API. 920 921 See Documentation/trace/events.rst or 922 Documentation/trace/histogram.rst for details and examples. 923 924 If in doubt, say N. 925 926config USER_EVENTS 927 bool "User trace events" 928 select TRACING 929 select DYNAMIC_EVENTS 930 help 931 User trace events are user-defined trace events that 932 can be used like an existing kernel trace event. User trace 933 events are generated by writing to a tracefs file. User 934 processes can determine if their tracing events should be 935 generated by registering a value and bit with the kernel 936 that reflects when it is enabled or not. 937 938 See Documentation/trace/user_events.rst. 939 If in doubt, say N. 940 941config HIST_TRIGGERS 942 bool "Histogram triggers" 943 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 944 select TRACING_MAP 945 select TRACING 946 select DYNAMIC_EVENTS 947 select SYNTH_EVENTS 948 default n 949 help 950 Hist triggers allow one or more arbitrary trace event fields 951 to be aggregated into hash tables and dumped to stdout by 952 reading a debugfs/tracefs file. They're useful for 953 gathering quick and dirty (though precise) summaries of 954 event activity as an initial guide for further investigation 955 using more advanced tools. 956 957 Inter-event tracing of quantities such as latencies is also 958 supported using hist triggers under this option. 959 960 See Documentation/trace/histogram.rst. 961 If in doubt, say N. 962 963config TRACE_EVENT_INJECT 964 bool "Trace event injection" 965 depends on TRACING 966 help 967 Allow user-space to inject a specific trace event into the ring 968 buffer. This is mainly used for testing purpose. 969 970 If unsure, say N. 971 972config TRACEPOINT_BENCHMARK 973 bool "Add tracepoint that benchmarks tracepoints" 974 help 975 This option creates the tracepoint "benchmark:benchmark_event". 976 When the tracepoint is enabled, it kicks off a kernel thread that 977 goes into an infinite loop (calling cond_resched() to let other tasks 978 run), and calls the tracepoint. Each iteration will record the time 979 it took to write to the tracepoint and the next iteration that 980 data will be passed to the tracepoint itself. That is, the tracepoint 981 will report the time it took to do the previous tracepoint. 982 The string written to the tracepoint is a static string of 128 bytes 983 to keep the time the same. The initial string is simply a write of 984 "START". The second string records the cold cache time of the first 985 write which is not added to the rest of the calculations. 986 987 As it is a tight loop, it benchmarks as hot cache. That's fine because 988 we care most about hot paths that are probably in cache already. 989 990 An example of the output: 991 992 START 993 first=3672 [COLD CACHED] 994 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 995 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 996 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 997 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 998 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 999 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 1000 1001 1002config RING_BUFFER_BENCHMARK 1003 tristate "Ring buffer benchmark stress tester" 1004 depends on RING_BUFFER 1005 help 1006 This option creates a test to stress the ring buffer and benchmark it. 1007 It creates its own ring buffer such that it will not interfere with 1008 any other users of the ring buffer (such as ftrace). It then creates 1009 a producer and consumer that will run for 10 seconds and sleep for 1010 10 seconds. Each interval it will print out the number of events 1011 it recorded and give a rough estimate of how long each iteration took. 1012 1013 It does not disable interrupts or raise its priority, so it may be 1014 affected by processes that are running. 1015 1016 If unsure, say N. 1017 1018config TRACE_EVAL_MAP_FILE 1019 bool "Show eval mappings for trace events" 1020 depends on TRACING 1021 help 1022 The "print fmt" of the trace events will show the enum/sizeof names 1023 instead of their values. This can cause problems for user space tools 1024 that use this string to parse the raw data as user space does not know 1025 how to convert the string to its value. 1026 1027 To fix this, there's a special macro in the kernel that can be used 1028 to convert an enum/sizeof into its value. If this macro is used, then 1029 the print fmt strings will be converted to their values. 1030 1031 If something does not get converted properly, this option can be 1032 used to show what enums/sizeof the kernel tried to convert. 1033 1034 This option is for debugging the conversions. A file is created 1035 in the tracing directory called "eval_map" that will show the 1036 names matched with their values and what trace event system they 1037 belong too. 1038 1039 Normally, the mapping of the strings to values will be freed after 1040 boot up or module load. With this option, they will not be freed, as 1041 they are needed for the "eval_map" file. Enabling this option will 1042 increase the memory footprint of the running kernel. 1043 1044 If unsure, say N. 1045 1046config FTRACE_RECORD_RECURSION 1047 bool "Record functions that recurse in function tracing" 1048 depends on FUNCTION_TRACER 1049 help 1050 All callbacks that attach to the function tracing have some sort 1051 of protection against recursion. Even though the protection exists, 1052 it adds overhead. This option will create a file in the tracefs 1053 file system called "recursed_functions" that will list the functions 1054 that triggered a recursion. 1055 1056 This will add more overhead to cases that have recursion. 1057 1058 If unsure, say N 1059 1060config FTRACE_RECORD_RECURSION_SIZE 1061 int "Max number of recursed functions to record" 1062 default 128 1063 depends on FTRACE_RECORD_RECURSION 1064 help 1065 This defines the limit of number of functions that can be 1066 listed in the "recursed_functions" file, that lists all 1067 the functions that caused a recursion to happen. 1068 This file can be reset, but the limit can not change in 1069 size at runtime. 1070 1071config FTRACE_VALIDATE_RCU_IS_WATCHING 1072 bool "Validate RCU is on during ftrace execution" 1073 depends on FUNCTION_TRACER 1074 depends on ARCH_WANTS_NO_INSTR 1075 help 1076 All callbacks that attach to the function tracing have some sort of 1077 protection against recursion. This option is only to verify that 1078 ftrace (and other users of ftrace_test_recursion_trylock()) are not 1079 called outside of RCU, as if they are, it can cause a race. But it 1080 also has a noticeable overhead when enabled. 1081 1082 If unsure, say N 1083 1084config RING_BUFFER_RECORD_RECURSION 1085 bool "Record functions that recurse in the ring buffer" 1086 depends on FTRACE_RECORD_RECURSION 1087 # default y, because it is coupled with FTRACE_RECORD_RECURSION 1088 default y 1089 help 1090 The ring buffer has its own internal recursion. Although when 1091 recursion happens it won't cause harm because of the protection, 1092 but it does cause unwanted overhead. Enabling this option will 1093 place where recursion was detected into the ftrace "recursed_functions" 1094 file. 1095 1096 This will add more overhead to cases that have recursion. 1097 1098config GCOV_PROFILE_FTRACE 1099 bool "Enable GCOV profiling on ftrace subsystem" 1100 depends on GCOV_KERNEL 1101 help 1102 Enable GCOV profiling on ftrace subsystem for checking 1103 which functions/lines are tested. 1104 1105 If unsure, say N. 1106 1107 Note that on a kernel compiled with this config, ftrace will 1108 run significantly slower. 1109 1110config FTRACE_SELFTEST 1111 bool 1112 1113config FTRACE_STARTUP_TEST 1114 bool "Perform a startup test on ftrace" 1115 depends on GENERIC_TRACER 1116 select FTRACE_SELFTEST 1117 help 1118 This option performs a series of startup tests on ftrace. On bootup 1119 a series of tests are made to verify that the tracer is 1120 functioning properly. It will do tests on all the configured 1121 tracers of ftrace. 1122 1123config EVENT_TRACE_STARTUP_TEST 1124 bool "Run selftest on trace events" 1125 depends on FTRACE_STARTUP_TEST 1126 default y 1127 help 1128 This option performs a test on all trace events in the system. 1129 It basically just enables each event and runs some code that 1130 will trigger events (not necessarily the event it enables) 1131 This may take some time run as there are a lot of events. 1132 1133config EVENT_TRACE_TEST_SYSCALLS 1134 bool "Run selftest on syscall events" 1135 depends on EVENT_TRACE_STARTUP_TEST 1136 help 1137 This option will also enable testing every syscall event. 1138 It only enables the event and disables it and runs various loads 1139 with the event enabled. This adds a bit more time for kernel boot 1140 up since it runs this on every system call defined. 1141 1142 TBD - enable a way to actually call the syscalls as we test their 1143 events 1144 1145config FTRACE_SORT_STARTUP_TEST 1146 bool "Verify compile time sorting of ftrace functions" 1147 depends on DYNAMIC_FTRACE 1148 depends on BUILDTIME_MCOUNT_SORT 1149 help 1150 Sorting of the mcount_loc sections that is used to find the 1151 where the ftrace knows where to patch functions for tracing 1152 and other callbacks is done at compile time. But if the sort 1153 is not done correctly, it will cause non-deterministic failures. 1154 When this is set, the sorted sections will be verified that they 1155 are in deed sorted and will warn if they are not. 1156 1157 If unsure, say N 1158 1159config RING_BUFFER_STARTUP_TEST 1160 bool "Ring buffer startup self test" 1161 depends on RING_BUFFER 1162 help 1163 Run a simple self test on the ring buffer on boot up. Late in the 1164 kernel boot sequence, the test will start that kicks off 1165 a thread per cpu. Each thread will write various size events 1166 into the ring buffer. Another thread is created to send IPIs 1167 to each of the threads, where the IPI handler will also write 1168 to the ring buffer, to test/stress the nesting ability. 1169 If any anomalies are discovered, a warning will be displayed 1170 and all ring buffers will be disabled. 1171 1172 The test runs for 10 seconds. This will slow your boot time 1173 by at least 10 more seconds. 1174 1175 At the end of the test, statistics and more checks are done. 1176 It will output the stats of each per cpu buffer: What 1177 was written, the sizes, what was read, what was lost, and 1178 other similar details. 1179 1180 If unsure, say N 1181 1182config RING_BUFFER_VALIDATE_TIME_DELTAS 1183 bool "Verify ring buffer time stamp deltas" 1184 depends on RING_BUFFER 1185 help 1186 This will audit the time stamps on the ring buffer sub 1187 buffer to make sure that all the time deltas for the 1188 events on a sub buffer matches the current time stamp. 1189 This audit is performed for every event that is not 1190 interrupted, or interrupting another event. A check 1191 is also made when traversing sub buffers to make sure 1192 that all the deltas on the previous sub buffer do not 1193 add up to be greater than the current time stamp. 1194 1195 NOTE: This adds significant overhead to recording of events, 1196 and should only be used to test the logic of the ring buffer. 1197 Do not use it on production systems. 1198 1199 Only say Y if you understand what this does, and you 1200 still want it enabled. Otherwise say N 1201 1202config MMIOTRACE_TEST 1203 tristate "Test module for mmiotrace" 1204 depends on MMIOTRACE && m 1205 help 1206 This is a dumb module for testing mmiotrace. It is very dangerous 1207 as it will write garbage to IO memory starting at a given address. 1208 However, it should be safe to use on e.g. unused portion of VRAM. 1209 1210 Say N, unless you absolutely know what you are doing. 1211 1212config PREEMPTIRQ_DELAY_TEST 1213 tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers" 1214 depends on m 1215 help 1216 Select this option to build a test module that can help test latency 1217 tracers by executing a preempt or irq disable section with a user 1218 configurable delay. The module busy waits for the duration of the 1219 critical section. 1220 1221 For example, the following invocation generates a burst of three 1222 irq-disabled critical sections for 500us: 1223 modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3 1224 1225 What's more, if you want to attach the test on the cpu which the latency 1226 tracer is running on, specify cpu_affinity=cpu_num at the end of the 1227 command. 1228 1229 If unsure, say N 1230 1231config SYNTH_EVENT_GEN_TEST 1232 tristate "Test module for in-kernel synthetic event generation" 1233 depends on SYNTH_EVENTS && m 1234 help 1235 This option creates a test module to check the base 1236 functionality of in-kernel synthetic event definition and 1237 generation. 1238 1239 To test, insert the module, and then check the trace buffer 1240 for the generated sample events. 1241 1242 If unsure, say N. 1243 1244config KPROBE_EVENT_GEN_TEST 1245 tristate "Test module for in-kernel kprobe event generation" 1246 depends on KPROBE_EVENTS && m 1247 help 1248 This option creates a test module to check the base 1249 functionality of in-kernel kprobe event definition. 1250 1251 To test, insert the module, and then check the trace buffer 1252 for the generated kprobe events. 1253 1254 If unsure, say N. 1255 1256config HIST_TRIGGERS_DEBUG 1257 bool "Hist trigger debug support" 1258 depends on HIST_TRIGGERS 1259 help 1260 Add "hist_debug" file for each event, which when read will 1261 dump out a bunch of internal details about the hist triggers 1262 defined on that event. 1263 1264 The hist_debug file serves a couple of purposes: 1265 1266 - Helps developers verify that nothing is broken. 1267 1268 - Provides educational information to support the details 1269 of the hist trigger internals as described by 1270 Documentation/trace/histogram-design.rst. 1271 1272 The hist_debug output only covers the data structures 1273 related to the histogram definitions themselves and doesn't 1274 display the internals of map buckets or variable values of 1275 running histograms. 1276 1277 If unsure, say N. 1278 1279source "kernel/trace/rv/Kconfig" 1280 1281endif # FTRACE 1282