1# SPDX-License-Identifier: GPL-2.0-only 2# 3# Architectures that offer an FUNCTION_TRACER implementation should 4# select HAVE_FUNCTION_TRACER: 5# 6 7config USER_STACKTRACE_SUPPORT 8 bool 9 10config NOP_TRACER 11 bool 12 13config HAVE_RETHOOK 14 bool 15 16config RETHOOK 17 bool 18 depends on HAVE_RETHOOK 19 help 20 Enable generic return hooking feature. This is an internal 21 API, which will be used by other function-entry hooking 22 features like fprobe and kprobes. 23 24config HAVE_FUNCTION_TRACER 25 bool 26 help 27 See Documentation/trace/ftrace-design.rst 28 29config HAVE_FUNCTION_GRAPH_TRACER 30 bool 31 help 32 See Documentation/trace/ftrace-design.rst 33 34config HAVE_FUNCTION_GRAPH_FREGS 35 bool 36 37config HAVE_FTRACE_GRAPH_FUNC 38 bool 39 help 40 True if ftrace_graph_func() is defined. 41 42config HAVE_DYNAMIC_FTRACE 43 bool 44 help 45 See Documentation/trace/ftrace-design.rst 46 47config HAVE_DYNAMIC_FTRACE_WITH_REGS 48 bool 49 50config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 51 bool 52 53config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 54 bool 55 56config HAVE_EXTRA_IPI_TRACEPOINTS 57 bool 58 help 59 For architectures that use ipi_raise, ipi_entry and ipi_exit 60 tracepoints. 61 62config HAVE_DYNAMIC_FTRACE_WITH_ARGS 63 bool 64 help 65 If this is set, then arguments and stack can be found from 66 the ftrace_regs passed into the function callback regs parameter 67 by default, even without setting the REGS flag in the ftrace_ops. 68 This allows for use of ftrace_regs_get_argument() and 69 ftrace_regs_get_stack_pointer(). 70 71config HAVE_FTRACE_REGS_HAVING_PT_REGS 72 bool 73 help 74 If this is set, ftrace_regs has pt_regs, thus it can convert to 75 pt_regs without allocating memory. 76 77config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE 78 bool 79 help 80 If the architecture generates __patchable_function_entries sections 81 but does not want them included in the ftrace locations. 82 83config HAVE_DYNAMIC_FTRACE_WITH_JMP 84 bool 85 help 86 If the architecture supports to replace the __fentry__ with a 87 "jmp" instruction. 88 89config HAVE_SYSCALL_TRACEPOINTS 90 bool 91 help 92 See Documentation/trace/ftrace-design.rst 93 94config HAVE_FENTRY 95 bool 96 help 97 Arch supports the gcc options -pg with -mfentry 98 99config HAVE_NOP_MCOUNT 100 bool 101 help 102 Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount 103 104config HAVE_OBJTOOL_MCOUNT 105 bool 106 help 107 Arch supports objtool --mcount 108 109config HAVE_OBJTOOL_NOP_MCOUNT 110 bool 111 help 112 Arch supports the objtool options --mcount with --mnop. 113 An architecture can select this if it wants to enable nop'ing 114 of ftrace locations. 115 116config HAVE_C_RECORDMCOUNT 117 bool 118 help 119 C version of recordmcount available? 120 121config HAVE_BUILDTIME_MCOUNT_SORT 122 bool 123 help 124 An architecture selects this if it sorts the mcount_loc section 125 at build time. 126 127config BUILDTIME_MCOUNT_SORT 128 bool 129 default y 130 depends on HAVE_BUILDTIME_MCOUNT_SORT && DYNAMIC_FTRACE 131 help 132 Sort the mcount_loc section at build time. 133 134config TRACER_MAX_TRACE 135 bool 136 137config TRACE_CLOCK 138 bool 139 140config RING_BUFFER 141 bool 142 select TRACE_CLOCK 143 select IRQ_WORK 144 145config EVENT_TRACING 146 select CONTEXT_SWITCH_TRACER 147 select GLOB 148 bool 149 150config CONTEXT_SWITCH_TRACER 151 bool 152 153config RING_BUFFER_ALLOW_SWAP 154 bool 155 help 156 Allow the use of ring_buffer_swap_cpu. 157 Adds a very slight overhead to tracing when enabled. 158 159config PREEMPTIRQ_TRACEPOINTS 160 bool 161 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 162 select TRACING 163 default y 164 help 165 Create preempt/irq toggle tracepoints if needed, so that other parts 166 of the kernel can use them to generate or add hooks to them. 167 168# All tracer options should select GENERIC_TRACER. For those options that are 169# enabled by all tracers (context switch and event tracer) they select TRACING. 170# This allows those options to appear when no other tracer is selected. But the 171# options do not appear when something else selects it. We need the two options 172# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 173# hiding of the automatic options. 174 175config TRACING 176 bool 177 select RING_BUFFER 178 select STACKTRACE if STACKTRACE_SUPPORT 179 select TRACEPOINTS 180 select NOP_TRACER 181 select BINARY_PRINTF 182 select EVENT_TRACING 183 select TRACE_CLOCK 184 select NEED_TASKS_RCU 185 186config GENERIC_TRACER 187 bool 188 select TRACING 189 190# 191# Minimum requirements an architecture has to meet for us to 192# be able to offer generic tracing facilities: 193# 194config TRACING_SUPPORT 195 bool 196 depends on TRACE_IRQFLAGS_SUPPORT 197 depends on STACKTRACE_SUPPORT 198 default y 199 200menuconfig FTRACE 201 bool "Tracers" 202 depends on TRACING_SUPPORT 203 default y if DEBUG_KERNEL 204 help 205 Enable the kernel tracing infrastructure. 206 207if FTRACE 208 209config TRACEFS_AUTOMOUNT_DEPRECATED 210 bool "Automount tracefs on debugfs [DEPRECATED]" 211 depends on TRACING 212 default y 213 help 214 The tracing interface was moved from /sys/kernel/debug/tracing 215 to /sys/kernel/tracing in 2015, but the tracing file system 216 was still automounted in /sys/kernel/debug for backward 217 compatibility with tooling. 218 219 The new interface has been around for more than 10 years and 220 the old debug mount will soon be removed. 221 222config BOOTTIME_TRACING 223 bool "Boot-time Tracing support" 224 depends on TRACING 225 select BOOT_CONFIG 226 help 227 Enable developer to setup ftrace subsystem via supplemental 228 kernel cmdline at boot time for debugging (tracing) driver 229 initialization and boot process. 230 231config FUNCTION_TRACER 232 bool "Kernel Function Tracer" 233 depends on HAVE_FUNCTION_TRACER 234 select KALLSYMS 235 select GENERIC_TRACER 236 select CONTEXT_SWITCH_TRACER 237 select GLOB 238 select NEED_TASKS_RCU 239 select TASKS_RUDE_RCU 240 help 241 Enable the kernel to trace every kernel function. This is done 242 by using a compiler feature to insert a small, 5-byte No-Operation 243 instruction at the beginning of every kernel function, which NOP 244 sequence is then dynamically patched into a tracer call when 245 tracing is enabled by the administrator. If it's runtime disabled 246 (the bootup default), then the overhead of the instructions is very 247 small and not measurable even in micro-benchmarks (at least on 248 x86, but may have impact on other architectures). 249 250config FUNCTION_GRAPH_TRACER 251 bool "Kernel Function Graph Tracer" 252 depends on HAVE_FUNCTION_GRAPH_TRACER 253 depends on FUNCTION_TRACER 254 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 255 default y 256 help 257 Enable the kernel to trace a function at both its return 258 and its entry. 259 Its first purpose is to trace the duration of functions and 260 draw a call graph for each thread with some information like 261 the return value. This is done by setting the current return 262 address on the current task structure into a stack of calls. 263 264config FUNCTION_GRAPH_RETVAL 265 bool "Kernel Function Graph Return Value" 266 depends on HAVE_FUNCTION_GRAPH_FREGS 267 depends on FUNCTION_GRAPH_TRACER 268 default n 269 help 270 Support recording and printing the function return value when 271 using function graph tracer. It can be helpful to locate functions 272 that return errors. This feature is off by default, and you can 273 enable it via the trace option funcgraph-retval. 274 See Documentation/trace/ftrace.rst 275 276config FUNCTION_GRAPH_RETADDR 277 bool "Kernel Function Graph Return Address" 278 depends on FUNCTION_GRAPH_TRACER 279 default n 280 help 281 Support recording and printing the function return address when 282 using function graph tracer. It can be helpful to locate code line that 283 the function is called. This feature is off by default, and you can 284 enable it via the trace option funcgraph-retaddr. 285 286config FUNCTION_TRACE_ARGS 287 bool 288 depends on PROBE_EVENTS_BTF_ARGS 289 default y 290 help 291 If supported with function argument access API and BTF, then 292 the function tracer and function graph tracer will support printing 293 of function arguments. This feature is off by default, and can be 294 enabled via the trace option func-args (for the function tracer) and 295 funcgraph-args (for the function graph tracer) 296 297config DYNAMIC_FTRACE 298 bool 299 depends on FUNCTION_TRACER 300 depends on HAVE_DYNAMIC_FTRACE 301 default y 302 help 303 This option will modify all the calls to function tracing 304 dynamically (will patch them out of the binary image and 305 replace them with a No-Op instruction) on boot up. During 306 compile time, a table is made of all the locations that ftrace 307 can function trace, and this table is linked into the kernel 308 image. When this is enabled, functions can be individually 309 enabled, and the functions not enabled will not affect 310 performance of the system. 311 312 See the files in /sys/kernel/tracing: 313 available_filter_functions 314 set_ftrace_filter 315 set_ftrace_notrace 316 317 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 318 otherwise has native performance as long as no tracing is active. 319 320config DYNAMIC_FTRACE_WITH_REGS 321 def_bool y 322 depends on DYNAMIC_FTRACE 323 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 324 325config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 326 def_bool y 327 depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS 328 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 329 330config DYNAMIC_FTRACE_WITH_CALL_OPS 331 def_bool y 332 depends on HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS 333 334config DYNAMIC_FTRACE_WITH_ARGS 335 def_bool y 336 depends on DYNAMIC_FTRACE 337 depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS 338 339config DYNAMIC_FTRACE_WITH_JMP 340 def_bool y 341 depends on DYNAMIC_FTRACE 342 depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS 343 depends on HAVE_DYNAMIC_FTRACE_WITH_JMP 344 345config FPROBE 346 bool "Kernel Function Probe (fprobe)" 347 depends on HAVE_FUNCTION_GRAPH_FREGS && HAVE_FTRACE_GRAPH_FUNC 348 depends on DYNAMIC_FTRACE_WITH_ARGS 349 select FUNCTION_GRAPH_TRACER 350 default n 351 help 352 This option enables kernel function probe (fprobe) based on ftrace. 353 The fprobe is similar to kprobes, but probes only for kernel function 354 entries and exits. This also can probe multiple functions by one 355 fprobe. 356 357 If unsure, say N. 358 359config FUNCTION_PROFILER 360 bool "Kernel function profiler" 361 depends on FUNCTION_TRACER 362 default n 363 help 364 This option enables the kernel function profiler. A file is created 365 in debugfs called function_profile_enabled which defaults to zero. 366 When a 1 is echoed into this file profiling begins, and when a 367 zero is entered, profiling stops. A "functions" file is created in 368 the trace_stat directory; this file shows the list of functions that 369 have been hit and their counters. 370 371 If in doubt, say N. 372 373config STACK_TRACER 374 bool "Trace max stack" 375 depends on HAVE_FUNCTION_TRACER 376 select FUNCTION_TRACER 377 select STACKTRACE 378 select KALLSYMS 379 help 380 This special tracer records the maximum stack footprint of the 381 kernel and displays it in /sys/kernel/tracing/stack_trace. 382 383 This tracer works by hooking into every function call that the 384 kernel executes, and keeping a maximum stack depth value and 385 stack-trace saved. If this is configured with DYNAMIC_FTRACE 386 then it will not have any overhead while the stack tracer 387 is disabled. 388 389 To enable the stack tracer on bootup, pass in 'stacktrace' 390 on the kernel command line. 391 392 The stack tracer can also be enabled or disabled via the 393 sysctl kernel.stack_tracer_enabled 394 395 Say N if unsure. 396 397config TRACE_PREEMPT_TOGGLE 398 bool 399 help 400 Enables hooks which will be called when preemption is first disabled, 401 and last enabled. 402 403config IRQSOFF_TRACER 404 bool "Interrupts-off Latency Tracer" 405 default n 406 depends on TRACE_IRQFLAGS_SUPPORT 407 select TRACE_IRQFLAGS 408 select GENERIC_TRACER 409 select TRACER_MAX_TRACE 410 select RING_BUFFER_ALLOW_SWAP 411 select TRACER_SNAPSHOT 412 select TRACER_SNAPSHOT_PER_CPU_SWAP 413 help 414 This option measures the time spent in irqs-off critical 415 sections, with microsecond accuracy. 416 417 The default measurement method is a maximum search, which is 418 disabled by default and can be runtime (re-)started 419 via: 420 421 echo 0 > /sys/kernel/tracing/tracing_max_latency 422 423 (Note that kernel size and overhead increase with this option 424 enabled. This option and the preempt-off timing option can be 425 used together or separately.) 426 427config PREEMPT_TRACER 428 bool "Preemption-off Latency Tracer" 429 default n 430 depends on PREEMPTION 431 select GENERIC_TRACER 432 select TRACER_MAX_TRACE 433 select RING_BUFFER_ALLOW_SWAP 434 select TRACER_SNAPSHOT 435 select TRACER_SNAPSHOT_PER_CPU_SWAP 436 select TRACE_PREEMPT_TOGGLE 437 help 438 This option measures the time spent in preemption-off critical 439 sections, with microsecond accuracy. 440 441 The default measurement method is a maximum search, which is 442 disabled by default and can be runtime (re-)started 443 via: 444 445 echo 0 > /sys/kernel/tracing/tracing_max_latency 446 447 (Note that kernel size and overhead increase with this option 448 enabled. This option and the irqs-off timing option can be 449 used together or separately.) 450 451config SCHED_TRACER 452 bool "Scheduling Latency Tracer" 453 select GENERIC_TRACER 454 select CONTEXT_SWITCH_TRACER 455 select TRACER_MAX_TRACE 456 select TRACER_SNAPSHOT 457 help 458 This tracer tracks the latency of the highest priority task 459 to be scheduled in, starting from the point it has woken up. 460 461config HWLAT_TRACER 462 bool "Tracer to detect hardware latencies (like SMIs)" 463 select GENERIC_TRACER 464 select TRACER_MAX_TRACE 465 help 466 This tracer, when enabled will create one or more kernel threads, 467 depending on what the cpumask file is set to, which each thread 468 spinning in a loop looking for interruptions caused by 469 something other than the kernel. For example, if a 470 System Management Interrupt (SMI) takes a noticeable amount of 471 time, this tracer will detect it. This is useful for testing 472 if a system is reliable for Real Time tasks. 473 474 Some files are created in the tracing directory when this 475 is enabled: 476 477 hwlat_detector/width - time in usecs for how long to spin for 478 hwlat_detector/window - time in usecs between the start of each 479 iteration 480 481 A kernel thread is created that will spin with interrupts disabled 482 for "width" microseconds in every "window" cycle. It will not spin 483 for "window - width" microseconds, where the system can 484 continue to operate. 485 486 The output will appear in the trace and trace_pipe files. 487 488 When the tracer is not running, it has no affect on the system, 489 but when it is running, it can cause the system to be 490 periodically non responsive. Do not run this tracer on a 491 production system. 492 493 To enable this tracer, echo in "hwlat" into the current_tracer 494 file. Every time a latency is greater than tracing_thresh, it will 495 be recorded into the ring buffer. 496 497config OSNOISE_TRACER 498 bool "OS Noise tracer" 499 select GENERIC_TRACER 500 select TRACER_MAX_TRACE 501 help 502 In the context of high-performance computing (HPC), the Operating 503 System Noise (osnoise) refers to the interference experienced by an 504 application due to activities inside the operating system. In the 505 context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread 506 can cause noise to the system. Moreover, hardware-related jobs can 507 also cause noise, for example, via SMIs. 508 509 The osnoise tracer leverages the hwlat_detector by running a similar 510 loop with preemption, SoftIRQs and IRQs enabled, thus allowing all 511 the sources of osnoise during its execution. The osnoise tracer takes 512 note of the entry and exit point of any source of interferences, 513 increasing a per-cpu interference counter. It saves an interference 514 counter for each source of interference. The interference counter for 515 NMI, IRQs, SoftIRQs, and threads is increased anytime the tool 516 observes these interferences' entry events. When a noise happens 517 without any interference from the operating system level, the 518 hardware noise counter increases, pointing to a hardware-related 519 noise. In this way, osnoise can account for any source of 520 interference. At the end of the period, the osnoise tracer prints 521 the sum of all noise, the max single noise, the percentage of CPU 522 available for the thread, and the counters for the noise sources. 523 524 In addition to the tracer, a set of tracepoints were added to 525 facilitate the identification of the osnoise source. 526 527 The output will appear in the trace and trace_pipe files. 528 529 To enable this tracer, echo in "osnoise" into the current_tracer 530 file. 531 532config TIMERLAT_TRACER 533 bool "Timerlat tracer" 534 select OSNOISE_TRACER 535 select GENERIC_TRACER 536 help 537 The timerlat tracer aims to help the preemptive kernel developers 538 to find sources of wakeup latencies of real-time threads. 539 540 The tracer creates a per-cpu kernel thread with real-time priority. 541 The tracer thread sets a periodic timer to wakeup itself, and goes 542 to sleep waiting for the timer to fire. At the wakeup, the thread 543 then computes a wakeup latency value as the difference between 544 the current time and the absolute time that the timer was set 545 to expire. 546 547 The tracer prints two lines at every activation. The first is the 548 timer latency observed at the hardirq context before the 549 activation of the thread. The second is the timer latency observed 550 by the thread, which is the same level that cyclictest reports. The 551 ACTIVATION ID field serves to relate the irq execution to its 552 respective thread execution. 553 554 The tracer is build on top of osnoise tracer, and the osnoise: 555 events can be used to trace the source of interference from NMI, 556 IRQs and other threads. It also enables the capture of the 557 stacktrace at the IRQ context, which helps to identify the code 558 path that can cause thread delay. 559 560config MMIOTRACE 561 bool "Memory mapped IO tracing" 562 depends on HAVE_MMIOTRACE_SUPPORT && PCI 563 select GENERIC_TRACER 564 help 565 Mmiotrace traces Memory Mapped I/O access and is meant for 566 debugging and reverse engineering. It is called from the ioremap 567 implementation and works via page faults. Tracing is disabled by 568 default and can be enabled at run-time. 569 570 See Documentation/trace/mmiotrace.rst. 571 If you are not helping to develop drivers, say N. 572 573config ENABLE_DEFAULT_TRACERS 574 bool "Trace process context switches and events" 575 depends on !GENERIC_TRACER 576 select TRACING 577 help 578 This tracer hooks to various trace points in the kernel, 579 allowing the user to pick and choose which trace point they 580 want to trace. It also includes the sched_switch tracer plugin. 581 582config FTRACE_SYSCALLS 583 bool "Trace syscalls" 584 depends on HAVE_SYSCALL_TRACEPOINTS 585 select GENERIC_TRACER 586 select KALLSYMS 587 help 588 Basic tracer to catch the syscall entry and exit events. 589 590config TRACER_SNAPSHOT 591 bool "Create a snapshot trace buffer" 592 select TRACER_MAX_TRACE 593 help 594 Allow tracing users to take snapshot of the current buffer using the 595 ftrace interface, e.g.: 596 597 echo 1 > /sys/kernel/tracing/snapshot 598 cat snapshot 599 600config TRACER_SNAPSHOT_PER_CPU_SWAP 601 bool "Allow snapshot to swap per CPU" 602 depends on TRACER_SNAPSHOT 603 select RING_BUFFER_ALLOW_SWAP 604 help 605 Allow doing a snapshot of a single CPU buffer instead of a 606 full swap (all buffers). If this is set, then the following is 607 allowed: 608 609 echo 1 > /sys/kernel/tracing/per_cpu/cpu2/snapshot 610 611 After which, only the tracing buffer for CPU 2 was swapped with 612 the main tracing buffer, and the other CPU buffers remain the same. 613 614 When this is enabled, this adds a little more overhead to the 615 trace recording, as it needs to add some checks to synchronize 616 recording with swaps. But this does not affect the performance 617 of the overall system. This is enabled by default when the preempt 618 or irq latency tracers are enabled, as those need to swap as well 619 and already adds the overhead (plus a lot more). 620 621config TRACE_BRANCH_PROFILING 622 bool 623 select GENERIC_TRACER 624 625choice 626 prompt "Branch Profiling" 627 default BRANCH_PROFILE_NONE 628 help 629 The branch profiling is a software profiler. It will add hooks 630 into the C conditionals to test which path a branch takes. 631 632 The likely/unlikely profiler only looks at the conditions that 633 are annotated with a likely or unlikely macro. 634 635 The "all branch" profiler will profile every if-statement in the 636 kernel. This profiler will also enable the likely/unlikely 637 profiler. 638 639 Either of the above profilers adds a bit of overhead to the system. 640 If unsure, choose "No branch profiling". 641 642config BRANCH_PROFILE_NONE 643 bool "No branch profiling" 644 help 645 No branch profiling. Branch profiling adds a bit of overhead. 646 Only enable it if you want to analyse the branching behavior. 647 Otherwise keep it disabled. 648 649config PROFILE_ANNOTATED_BRANCHES 650 bool "Trace likely/unlikely profiler" 651 select TRACE_BRANCH_PROFILING 652 help 653 This tracer profiles all likely and unlikely macros 654 in the kernel. It will display the results in: 655 656 /sys/kernel/tracing/trace_stat/branch_annotated 657 658 Note: this will add a significant overhead; only turn this 659 on if you need to profile the system's use of these macros. 660 661config PROFILE_ALL_BRANCHES 662 bool "Profile all if conditionals" if !FORTIFY_SOURCE 663 select TRACE_BRANCH_PROFILING 664 help 665 This tracer profiles all branch conditions. Every if () 666 taken in the kernel is recorded whether it hit or miss. 667 The results will be displayed in: 668 669 /sys/kernel/tracing/trace_stat/branch_all 670 671 This option also enables the likely/unlikely profiler. 672 673 This configuration, when enabled, will impose a great overhead 674 on the system. This should only be enabled when the system 675 is to be analyzed in much detail. 676endchoice 677 678config TRACING_BRANCHES 679 bool 680 help 681 Selected by tracers that will trace the likely and unlikely 682 conditions. This prevents the tracers themselves from being 683 profiled. Profiling the tracing infrastructure can only happen 684 when the likelys and unlikelys are not being traced. 685 686config BRANCH_TRACER 687 bool "Trace likely/unlikely instances" 688 depends on TRACE_BRANCH_PROFILING 689 select TRACING_BRANCHES 690 help 691 This traces the events of likely and unlikely condition 692 calls in the kernel. The difference between this and the 693 "Trace likely/unlikely profiler" is that this is not a 694 histogram of the callers, but actually places the calling 695 events into a running trace buffer to see when and where the 696 events happened, as well as their results. 697 698 Say N if unsure. 699 700config BLK_DEV_IO_TRACE 701 bool "Support for tracing block IO actions" 702 depends on SYSFS 703 depends on BLOCK 704 select RELAY 705 select DEBUG_FS 706 select TRACEPOINTS 707 select GENERIC_TRACER 708 select STACKTRACE 709 help 710 Say Y here if you want to be able to trace the block layer actions 711 on a given queue. Tracing allows you to see any traffic happening 712 on a block device queue. For more information (and the userspace 713 support tools needed), fetch the blktrace tools from: 714 715 git://git.kernel.dk/blktrace.git 716 717 Tracing also is possible using the ftrace interface, e.g.: 718 719 echo 1 > /sys/block/sda/sda1/trace/enable 720 echo blk > /sys/kernel/tracing/current_tracer 721 cat /sys/kernel/tracing/trace_pipe 722 723 If unsure, say N. 724 725config FPROBE_EVENTS 726 depends on FPROBE 727 depends on HAVE_REGS_AND_STACK_ACCESS_API 728 bool "Enable fprobe-based dynamic events" 729 select TRACING 730 select PROBE_EVENTS 731 select DYNAMIC_EVENTS 732 default y 733 help 734 This allows user to add tracing events on the function entry and 735 exit via ftrace interface. The syntax is same as the kprobe events 736 and the kprobe events on function entry and exit will be 737 transparently converted to this fprobe events. 738 739config PROBE_EVENTS_BTF_ARGS 740 depends on HAVE_FUNCTION_ARG_ACCESS_API 741 depends on FPROBE_EVENTS || KPROBE_EVENTS 742 depends on DEBUG_INFO_BTF && BPF_SYSCALL 743 bool "Support BTF function arguments for probe events" 744 default y 745 help 746 The user can specify the arguments of the probe event using the names 747 of the arguments of the probed function, when the probe location is a 748 kernel function entry or a tracepoint. 749 This is available only if BTF (BPF Type Format) support is enabled. 750 751config KPROBE_EVENTS 752 depends on KPROBES 753 depends on HAVE_REGS_AND_STACK_ACCESS_API 754 bool "Enable kprobes-based dynamic events" 755 select TRACING 756 select PROBE_EVENTS 757 select DYNAMIC_EVENTS 758 default y 759 help 760 This allows the user to add tracing events (similar to tracepoints) 761 on the fly via the ftrace interface. See 762 Documentation/trace/kprobetrace.rst for more details. 763 764 Those events can be inserted wherever kprobes can probe, and record 765 various register and memory values. 766 767 This option is also required by perf-probe subcommand of perf tools. 768 If you want to use perf tools, this option is strongly recommended. 769 770config KPROBE_EVENTS_ON_NOTRACE 771 bool "Do NOT protect notrace function from kprobe events" 772 depends on KPROBE_EVENTS 773 depends on DYNAMIC_FTRACE 774 default n 775 help 776 This is only for the developers who want to debug ftrace itself 777 using kprobe events. 778 779 If kprobes can use ftrace instead of breakpoint, ftrace related 780 functions are protected from kprobe-events to prevent an infinite 781 recursion or any unexpected execution path which leads to a kernel 782 crash. 783 784 This option disables such protection and allows you to put kprobe 785 events on ftrace functions for debugging ftrace by itself. 786 Note that this might let you shoot yourself in the foot. 787 788 If unsure, say N. 789 790config UPROBE_EVENTS 791 bool "Enable uprobes-based dynamic events" 792 depends on ARCH_SUPPORTS_UPROBES 793 depends on MMU 794 depends on PERF_EVENTS 795 select UPROBES 796 select PROBE_EVENTS 797 select DYNAMIC_EVENTS 798 select TRACING 799 default y 800 help 801 This allows the user to add tracing events on top of userspace 802 dynamic events (similar to tracepoints) on the fly via the trace 803 events interface. Those events can be inserted wherever uprobes 804 can probe, and record various registers. 805 This option is required if you plan to use perf-probe subcommand 806 of perf tools on user space applications. 807 808config EPROBE_EVENTS 809 bool "Enable event-based dynamic events" 810 depends on TRACING 811 depends on HAVE_REGS_AND_STACK_ACCESS_API 812 select PROBE_EVENTS 813 select DYNAMIC_EVENTS 814 default y 815 help 816 Eprobes are dynamic events that can be placed on other existing 817 events. It can be used to limit what fields are recorded in 818 an event or even dereference a field of an event. It can 819 convert the type of an event field. For example, turn an 820 address into a string. 821 822config BPF_EVENTS 823 depends on BPF_SYSCALL 824 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 825 bool 826 default y 827 help 828 This allows the user to attach BPF programs to kprobe, uprobe, and 829 tracepoint events. 830 831config DYNAMIC_EVENTS 832 def_bool n 833 834config PROBE_EVENTS 835 def_bool n 836 837config BPF_KPROBE_OVERRIDE 838 bool "Enable BPF programs to override a kprobed function" 839 depends on BPF_EVENTS 840 depends on FUNCTION_ERROR_INJECTION 841 default n 842 help 843 Allows BPF to override the execution of a probed function and 844 set a different return value. This is used for error injection. 845 846config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 847 bool 848 depends on DYNAMIC_FTRACE 849 850config FTRACE_MCOUNT_USE_CC 851 def_bool y 852 depends on $(cc-option,-mrecord-mcount) 853 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 854 depends on DYNAMIC_FTRACE 855 856config FTRACE_MCOUNT_USE_OBJTOOL 857 def_bool y 858 depends on HAVE_OBJTOOL_MCOUNT 859 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 860 depends on !FTRACE_MCOUNT_USE_CC 861 depends on DYNAMIC_FTRACE 862 select OBJTOOL 863 864config FTRACE_MCOUNT_USE_RECORDMCOUNT 865 def_bool y 866 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 867 depends on !FTRACE_MCOUNT_USE_CC 868 depends on !FTRACE_MCOUNT_USE_OBJTOOL 869 depends on DYNAMIC_FTRACE 870 871config TRACING_MAP 872 bool 873 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 874 help 875 tracing_map is a special-purpose lock-free map for tracing, 876 separated out as a stand-alone facility in order to allow it 877 to be shared between multiple tracers. It isn't meant to be 878 generally used outside of that context, and is normally 879 selected by tracers that use it. 880 881config SYNTH_EVENTS 882 bool "Synthetic trace events" 883 select TRACING 884 select DYNAMIC_EVENTS 885 default n 886 help 887 Synthetic events are user-defined trace events that can be 888 used to combine data from other trace events or in fact any 889 data source. Synthetic events can be generated indirectly 890 via the trace() action of histogram triggers or directly 891 by way of an in-kernel API. 892 893 See Documentation/trace/events.rst or 894 Documentation/trace/histogram.rst for details and examples. 895 896 If in doubt, say N. 897 898config USER_EVENTS 899 bool "User trace events" 900 select TRACING 901 select DYNAMIC_EVENTS 902 help 903 User trace events are user-defined trace events that 904 can be used like an existing kernel trace event. User trace 905 events are generated by writing to a tracefs file. User 906 processes can determine if their tracing events should be 907 generated by registering a value and bit with the kernel 908 that reflects when it is enabled or not. 909 910 See Documentation/trace/user_events.rst. 911 If in doubt, say N. 912 913config HIST_TRIGGERS 914 bool "Histogram triggers" 915 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 916 select TRACING_MAP 917 select TRACING 918 select DYNAMIC_EVENTS 919 select SYNTH_EVENTS 920 default n 921 help 922 Hist triggers allow one or more arbitrary trace event fields 923 to be aggregated into hash tables and dumped to stdout by 924 reading a debugfs/tracefs file. They're useful for 925 gathering quick and dirty (though precise) summaries of 926 event activity as an initial guide for further investigation 927 using more advanced tools. 928 929 Inter-event tracing of quantities such as latencies is also 930 supported using hist triggers under this option. 931 932 See Documentation/trace/histogram.rst. 933 If in doubt, say N. 934 935config TRACE_EVENT_INJECT 936 bool "Trace event injection" 937 depends on TRACING 938 help 939 Allow user-space to inject a specific trace event into the ring 940 buffer. This is mainly used for testing purpose. 941 942 If unsure, say N. 943 944config TRACEPOINT_BENCHMARK 945 bool "Add tracepoint that benchmarks tracepoints" 946 help 947 This option creates the tracepoint "benchmark:benchmark_event". 948 When the tracepoint is enabled, it kicks off a kernel thread that 949 goes into an infinite loop (calling cond_resched() to let other tasks 950 run), and calls the tracepoint. Each iteration will record the time 951 it took to write to the tracepoint and the next iteration that 952 data will be passed to the tracepoint itself. That is, the tracepoint 953 will report the time it took to do the previous tracepoint. 954 The string written to the tracepoint is a static string of 128 bytes 955 to keep the time the same. The initial string is simply a write of 956 "START". The second string records the cold cache time of the first 957 write which is not added to the rest of the calculations. 958 959 As it is a tight loop, it benchmarks as hot cache. That's fine because 960 we care most about hot paths that are probably in cache already. 961 962 An example of the output: 963 964 START 965 first=3672 [COLD CACHED] 966 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 967 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 968 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 969 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 970 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 971 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 972 973 974config RING_BUFFER_BENCHMARK 975 tristate "Ring buffer benchmark stress tester" 976 depends on RING_BUFFER 977 help 978 This option creates a test to stress the ring buffer and benchmark it. 979 It creates its own ring buffer such that it will not interfere with 980 any other users of the ring buffer (such as ftrace). It then creates 981 a producer and consumer that will run for 10 seconds and sleep for 982 10 seconds. Each interval it will print out the number of events 983 it recorded and give a rough estimate of how long each iteration took. 984 985 It does not disable interrupts or raise its priority, so it may be 986 affected by processes that are running. 987 988 If unsure, say N. 989 990config TRACE_EVAL_MAP_FILE 991 bool "Show eval mappings for trace events" 992 depends on TRACING 993 help 994 The "print fmt" of the trace events will show the enum/sizeof names 995 instead of their values. This can cause problems for user space tools 996 that use this string to parse the raw data as user space does not know 997 how to convert the string to its value. 998 999 To fix this, there's a special macro in the kernel that can be used 1000 to convert an enum/sizeof into its value. If this macro is used, then 1001 the print fmt strings will be converted to their values. 1002 1003 If something does not get converted properly, this option can be 1004 used to show what enums/sizeof the kernel tried to convert. 1005 1006 This option is for debugging the conversions. A file is created 1007 in the tracing directory called "eval_map" that will show the 1008 names matched with their values and what trace event system they 1009 belong too. 1010 1011 Normally, the mapping of the strings to values will be freed after 1012 boot up or module load. With this option, they will not be freed, as 1013 they are needed for the "eval_map" file. Enabling this option will 1014 increase the memory footprint of the running kernel. 1015 1016 If unsure, say N. 1017 1018config FTRACE_RECORD_RECURSION 1019 bool "Record functions that recurse in function tracing" 1020 depends on FUNCTION_TRACER 1021 help 1022 All callbacks that attach to the function tracing have some sort 1023 of protection against recursion. Even though the protection exists, 1024 it adds overhead. This option will create a file in the tracefs 1025 file system called "recursed_functions" that will list the functions 1026 that triggered a recursion. 1027 1028 This will add more overhead to cases that have recursion. 1029 1030 If unsure, say N 1031 1032config FTRACE_RECORD_RECURSION_SIZE 1033 int "Max number of recursed functions to record" 1034 default 128 1035 depends on FTRACE_RECORD_RECURSION 1036 help 1037 This defines the limit of number of functions that can be 1038 listed in the "recursed_functions" file, that lists all 1039 the functions that caused a recursion to happen. 1040 This file can be reset, but the limit can not change in 1041 size at runtime. 1042 1043config FTRACE_VALIDATE_RCU_IS_WATCHING 1044 bool "Validate RCU is on during ftrace execution" 1045 depends on FUNCTION_TRACER 1046 depends on ARCH_WANTS_NO_INSTR 1047 help 1048 All callbacks that attach to the function tracing have some sort of 1049 protection against recursion. This option is only to verify that 1050 ftrace (and other users of ftrace_test_recursion_trylock()) are not 1051 called outside of RCU, as if they are, it can cause a race. But it 1052 also has a noticeable overhead when enabled. 1053 1054 If unsure, say N 1055 1056config RING_BUFFER_RECORD_RECURSION 1057 bool "Record functions that recurse in the ring buffer" 1058 depends on FTRACE_RECORD_RECURSION 1059 # default y, because it is coupled with FTRACE_RECORD_RECURSION 1060 default y 1061 help 1062 The ring buffer has its own internal recursion. Although when 1063 recursion happens it won't cause harm because of the protection, 1064 but it does cause unwanted overhead. Enabling this option will 1065 place where recursion was detected into the ftrace "recursed_functions" 1066 file. 1067 1068 This will add more overhead to cases that have recursion. 1069 1070config GCOV_PROFILE_FTRACE 1071 bool "Enable GCOV profiling on ftrace subsystem" 1072 depends on GCOV_KERNEL 1073 help 1074 Enable GCOV profiling on ftrace subsystem for checking 1075 which functions/lines are tested. 1076 1077 If unsure, say N. 1078 1079 Note that on a kernel compiled with this config, ftrace will 1080 run significantly slower. 1081 1082config FTRACE_SELFTEST 1083 bool 1084 1085config FTRACE_STARTUP_TEST 1086 bool "Perform a startup test on ftrace" 1087 depends on GENERIC_TRACER 1088 select FTRACE_SELFTEST 1089 help 1090 This option performs a series of startup tests on ftrace. On bootup 1091 a series of tests are made to verify that the tracer is 1092 functioning properly. It will do tests on all the configured 1093 tracers of ftrace. 1094 1095config EVENT_TRACE_STARTUP_TEST 1096 bool "Run selftest on trace events" 1097 depends on FTRACE_STARTUP_TEST 1098 default y 1099 help 1100 This option performs a test on all trace events in the system. 1101 It basically just enables each event and runs some code that 1102 will trigger events (not necessarily the event it enables) 1103 This may take some time run as there are a lot of events. 1104 1105config EVENT_TRACE_TEST_SYSCALLS 1106 bool "Run selftest on syscall events" 1107 depends on EVENT_TRACE_STARTUP_TEST 1108 help 1109 This option will also enable testing every syscall event. 1110 It only enables the event and disables it and runs various loads 1111 with the event enabled. This adds a bit more time for kernel boot 1112 up since it runs this on every system call defined. 1113 1114 TBD - enable a way to actually call the syscalls as we test their 1115 events 1116 1117config FTRACE_SORT_STARTUP_TEST 1118 bool "Verify compile time sorting of ftrace functions" 1119 depends on DYNAMIC_FTRACE 1120 depends on BUILDTIME_MCOUNT_SORT 1121 help 1122 Sorting of the mcount_loc sections that is used to find the 1123 where the ftrace knows where to patch functions for tracing 1124 and other callbacks is done at compile time. But if the sort 1125 is not done correctly, it will cause non-deterministic failures. 1126 When this is set, the sorted sections will be verified that they 1127 are in deed sorted and will warn if they are not. 1128 1129 If unsure, say N 1130 1131config RING_BUFFER_STARTUP_TEST 1132 bool "Ring buffer startup self test" 1133 depends on RING_BUFFER 1134 help 1135 Run a simple self test on the ring buffer on boot up. Late in the 1136 kernel boot sequence, the test will start that kicks off 1137 a thread per cpu. Each thread will write various size events 1138 into the ring buffer. Another thread is created to send IPIs 1139 to each of the threads, where the IPI handler will also write 1140 to the ring buffer, to test/stress the nesting ability. 1141 If any anomalies are discovered, a warning will be displayed 1142 and all ring buffers will be disabled. 1143 1144 The test runs for 10 seconds. This will slow your boot time 1145 by at least 10 more seconds. 1146 1147 At the end of the test, statistics and more checks are done. 1148 It will output the stats of each per cpu buffer: What 1149 was written, the sizes, what was read, what was lost, and 1150 other similar details. 1151 1152 If unsure, say N 1153 1154config RING_BUFFER_VALIDATE_TIME_DELTAS 1155 bool "Verify ring buffer time stamp deltas" 1156 depends on RING_BUFFER 1157 help 1158 This will audit the time stamps on the ring buffer sub 1159 buffer to make sure that all the time deltas for the 1160 events on a sub buffer matches the current time stamp. 1161 This audit is performed for every event that is not 1162 interrupted, or interrupting another event. A check 1163 is also made when traversing sub buffers to make sure 1164 that all the deltas on the previous sub buffer do not 1165 add up to be greater than the current time stamp. 1166 1167 NOTE: This adds significant overhead to recording of events, 1168 and should only be used to test the logic of the ring buffer. 1169 Do not use it on production systems. 1170 1171 Only say Y if you understand what this does, and you 1172 still want it enabled. Otherwise say N 1173 1174config MMIOTRACE_TEST 1175 tristate "Test module for mmiotrace" 1176 depends on MMIOTRACE && m 1177 help 1178 This is a dumb module for testing mmiotrace. It is very dangerous 1179 as it will write garbage to IO memory starting at a given address. 1180 However, it should be safe to use on e.g. unused portion of VRAM. 1181 1182 Say N, unless you absolutely know what you are doing. 1183 1184config PREEMPTIRQ_DELAY_TEST 1185 tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers" 1186 depends on m 1187 help 1188 Select this option to build a test module that can help test latency 1189 tracers by executing a preempt or irq disable section with a user 1190 configurable delay. The module busy waits for the duration of the 1191 critical section. 1192 1193 For example, the following invocation generates a burst of three 1194 irq-disabled critical sections for 500us: 1195 modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3 1196 1197 What's more, if you want to attach the test on the cpu which the latency 1198 tracer is running on, specify cpu_affinity=cpu_num at the end of the 1199 command. 1200 1201 If unsure, say N 1202 1203config SYNTH_EVENT_GEN_TEST 1204 tristate "Test module for in-kernel synthetic event generation" 1205 depends on SYNTH_EVENTS && m 1206 help 1207 This option creates a test module to check the base 1208 functionality of in-kernel synthetic event definition and 1209 generation. 1210 1211 To test, insert the module, and then check the trace buffer 1212 for the generated sample events. 1213 1214 If unsure, say N. 1215 1216config KPROBE_EVENT_GEN_TEST 1217 tristate "Test module for in-kernel kprobe event generation" 1218 depends on KPROBE_EVENTS && m 1219 help 1220 This option creates a test module to check the base 1221 functionality of in-kernel kprobe event definition. 1222 1223 To test, insert the module, and then check the trace buffer 1224 for the generated kprobe events. 1225 1226 If unsure, say N. 1227 1228config HIST_TRIGGERS_DEBUG 1229 bool "Hist trigger debug support" 1230 depends on HIST_TRIGGERS 1231 help 1232 Add "hist_debug" file for each event, which when read will 1233 dump out a bunch of internal details about the hist triggers 1234 defined on that event. 1235 1236 The hist_debug file serves a couple of purposes: 1237 1238 - Helps developers verify that nothing is broken. 1239 1240 - Provides educational information to support the details 1241 of the hist trigger internals as described by 1242 Documentation/trace/histogram-design.rst. 1243 1244 The hist_debug output only covers the data structures 1245 related to the histogram definitions themselves and doesn't 1246 display the internals of map buckets or variable values of 1247 running histograms. 1248 1249 If unsure, say N. 1250 1251source "kernel/trace/rv/Kconfig" 1252 1253endif # FTRACE 1254