xref: /linux/kernel/trace/Kconfig (revision f93a7d0caccd6ab76dacfd620013cfc41f49fb8d)
1# SPDX-License-Identifier: GPL-2.0-only
2#
3# Architectures that offer an FUNCTION_TRACER implementation should
4#  select HAVE_FUNCTION_TRACER:
5#
6
7config USER_STACKTRACE_SUPPORT
8	bool
9
10config NOP_TRACER
11	bool
12
13config HAVE_RETHOOK
14	bool
15
16config RETHOOK
17	bool
18	depends on HAVE_RETHOOK
19	help
20	  Enable generic return hooking feature. This is an internal
21	  API, which will be used by other function-entry hooking
22	  features like fprobe and kprobes.
23
24config HAVE_FUNCTION_TRACER
25	bool
26	help
27	  See Documentation/trace/ftrace-design.rst
28
29config HAVE_FUNCTION_GRAPH_TRACER
30	bool
31	help
32	  See Documentation/trace/ftrace-design.rst
33
34config HAVE_FUNCTION_GRAPH_FREGS
35	bool
36
37config HAVE_FTRACE_GRAPH_FUNC
38	bool
39	help
40	  True if ftrace_graph_func() is defined.
41
42config HAVE_DYNAMIC_FTRACE
43	bool
44	help
45	  See Documentation/trace/ftrace-design.rst
46
47config HAVE_DYNAMIC_FTRACE_WITH_REGS
48	bool
49
50config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
51	bool
52
53config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS
54	bool
55
56config HAVE_EXTRA_IPI_TRACEPOINTS
57	bool
58	help
59	 For architectures that use ipi_raise, ipi_entry and ipi_exit
60	 tracepoints.
61
62config HAVE_DYNAMIC_FTRACE_WITH_ARGS
63	bool
64	help
65	 If this is set, then arguments and stack can be found from
66	 the ftrace_regs passed into the function callback regs parameter
67	 by default, even without setting the REGS flag in the ftrace_ops.
68	 This allows for use of ftrace_regs_get_argument() and
69	 ftrace_regs_get_stack_pointer().
70
71config HAVE_FTRACE_REGS_HAVING_PT_REGS
72	bool
73	help
74	 If this is set, ftrace_regs has pt_regs, thus it can convert to
75	 pt_regs without allocating memory.
76
77config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
78	bool
79	help
80	  If the architecture generates __patchable_function_entries sections
81	  but does not want them included in the ftrace locations.
82
83config HAVE_SYSCALL_TRACEPOINTS
84	bool
85	help
86	  See Documentation/trace/ftrace-design.rst
87
88config HAVE_FENTRY
89	bool
90	help
91	  Arch supports the gcc options -pg with -mfentry
92
93config HAVE_NOP_MCOUNT
94	bool
95	help
96	  Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount
97
98config HAVE_OBJTOOL_MCOUNT
99	bool
100	help
101	  Arch supports objtool --mcount
102
103config HAVE_OBJTOOL_NOP_MCOUNT
104	bool
105	help
106	  Arch supports the objtool options --mcount with --mnop.
107	  An architecture can select this if it wants to enable nop'ing
108	  of ftrace locations.
109
110config HAVE_C_RECORDMCOUNT
111	bool
112	help
113	  C version of recordmcount available?
114
115config HAVE_BUILDTIME_MCOUNT_SORT
116       bool
117       help
118         An architecture selects this if it sorts the mcount_loc section
119	 at build time.
120
121config BUILDTIME_MCOUNT_SORT
122       bool
123       default y
124       depends on HAVE_BUILDTIME_MCOUNT_SORT && DYNAMIC_FTRACE
125       help
126         Sort the mcount_loc section at build time.
127
128config TRACER_MAX_TRACE
129	bool
130
131config TRACE_CLOCK
132	bool
133
134config RING_BUFFER
135	bool
136	select TRACE_CLOCK
137	select IRQ_WORK
138
139config EVENT_TRACING
140	select CONTEXT_SWITCH_TRACER
141	select GLOB
142	bool
143
144config CONTEXT_SWITCH_TRACER
145	bool
146
147config RING_BUFFER_ALLOW_SWAP
148	bool
149	help
150	 Allow the use of ring_buffer_swap_cpu.
151	 Adds a very slight overhead to tracing when enabled.
152
153config PREEMPTIRQ_TRACEPOINTS
154	bool
155	depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
156	select TRACING
157	default y
158	help
159	  Create preempt/irq toggle tracepoints if needed, so that other parts
160	  of the kernel can use them to generate or add hooks to them.
161
162# All tracer options should select GENERIC_TRACER. For those options that are
163# enabled by all tracers (context switch and event tracer) they select TRACING.
164# This allows those options to appear when no other tracer is selected. But the
165# options do not appear when something else selects it. We need the two options
166# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
167# hiding of the automatic options.
168
169config TRACING
170	bool
171	select RING_BUFFER
172	select STACKTRACE if STACKTRACE_SUPPORT
173	select TRACEPOINTS
174	select NOP_TRACER
175	select BINARY_PRINTF
176	select EVENT_TRACING
177	select TRACE_CLOCK
178	select NEED_TASKS_RCU
179
180config GENERIC_TRACER
181	bool
182	select TRACING
183
184#
185# Minimum requirements an architecture has to meet for us to
186# be able to offer generic tracing facilities:
187#
188config TRACING_SUPPORT
189	bool
190	depends on TRACE_IRQFLAGS_SUPPORT
191	depends on STACKTRACE_SUPPORT
192	default y
193
194menuconfig FTRACE
195	bool "Tracers"
196	depends on TRACING_SUPPORT
197	default y if DEBUG_KERNEL
198	help
199	  Enable the kernel tracing infrastructure.
200
201if FTRACE
202
203config TRACEFS_AUTOMOUNT_DEPRECATED
204	bool "Automount tracefs on debugfs [DEPRECATED]"
205	depends on TRACING
206	default y
207	help
208	  The tracing interface was moved from /sys/kernel/debug/tracing
209	  to /sys/kernel/tracing in 2015, but the tracing file system
210	  was still automounted in /sys/kernel/debug for backward
211	  compatibility with tooling.
212
213	  The new interface has been around for more than 10 years and
214	  the old debug mount will soon be removed.
215
216config BOOTTIME_TRACING
217	bool "Boot-time Tracing support"
218	depends on TRACING
219	select BOOT_CONFIG
220	help
221	  Enable developer to setup ftrace subsystem via supplemental
222	  kernel cmdline at boot time for debugging (tracing) driver
223	  initialization and boot process.
224
225config FUNCTION_TRACER
226	bool "Kernel Function Tracer"
227	depends on HAVE_FUNCTION_TRACER
228	select KALLSYMS
229	select GENERIC_TRACER
230	select CONTEXT_SWITCH_TRACER
231	select GLOB
232	select NEED_TASKS_RCU
233	select TASKS_RUDE_RCU
234	help
235	  Enable the kernel to trace every kernel function. This is done
236	  by using a compiler feature to insert a small, 5-byte No-Operation
237	  instruction at the beginning of every kernel function, which NOP
238	  sequence is then dynamically patched into a tracer call when
239	  tracing is enabled by the administrator. If it's runtime disabled
240	  (the bootup default), then the overhead of the instructions is very
241	  small and not measurable even in micro-benchmarks (at least on
242	  x86, but may have impact on other architectures).
243
244config FUNCTION_GRAPH_TRACER
245	bool "Kernel Function Graph Tracer"
246	depends on HAVE_FUNCTION_GRAPH_TRACER
247	depends on FUNCTION_TRACER
248	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
249	default y
250	help
251	  Enable the kernel to trace a function at both its return
252	  and its entry.
253	  Its first purpose is to trace the duration of functions and
254	  draw a call graph for each thread with some information like
255	  the return value. This is done by setting the current return
256	  address on the current task structure into a stack of calls.
257
258config FUNCTION_GRAPH_RETVAL
259	bool "Kernel Function Graph Return Value"
260	depends on HAVE_FUNCTION_GRAPH_FREGS
261	depends on FUNCTION_GRAPH_TRACER
262	default n
263	help
264	  Support recording and printing the function return value when
265	  using function graph tracer. It can be helpful to locate functions
266	  that return errors. This feature is off by default, and you can
267	  enable it via the trace option funcgraph-retval.
268	  See Documentation/trace/ftrace.rst
269
270config FUNCTION_GRAPH_RETADDR
271	bool "Kernel Function Graph Return Address"
272	depends on FUNCTION_GRAPH_TRACER
273	default n
274	help
275	  Support recording and printing the function return address when
276	  using function graph tracer. It can be helpful to locate code line that
277	  the function is called. This feature is off by default, and you can
278	  enable it via the trace option funcgraph-retaddr.
279
280config FUNCTION_TRACE_ARGS
281       bool
282	depends on PROBE_EVENTS_BTF_ARGS
283	default y
284	help
285	  If supported with function argument access API and BTF, then
286	  the function tracer and function graph tracer will support printing
287	  of function arguments. This feature is off by default, and can be
288	  enabled via the trace option func-args (for the function tracer) and
289	  funcgraph-args (for the function graph tracer)
290
291config DYNAMIC_FTRACE
292	bool
293	depends on FUNCTION_TRACER
294	depends on HAVE_DYNAMIC_FTRACE
295	default y
296	help
297	  This option will modify all the calls to function tracing
298	  dynamically (will patch them out of the binary image and
299	  replace them with a No-Op instruction) on boot up. During
300	  compile time, a table is made of all the locations that ftrace
301	  can function trace, and this table is linked into the kernel
302	  image. When this is enabled, functions can be individually
303	  enabled, and the functions not enabled will not affect
304	  performance of the system.
305
306	  See the files in /sys/kernel/tracing:
307	    available_filter_functions
308	    set_ftrace_filter
309	    set_ftrace_notrace
310
311	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
312	  otherwise has native performance as long as no tracing is active.
313
314config DYNAMIC_FTRACE_WITH_REGS
315	def_bool y
316	depends on DYNAMIC_FTRACE
317	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
318
319config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
320	def_bool y
321	depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS
322	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
323
324config DYNAMIC_FTRACE_WITH_CALL_OPS
325	def_bool y
326	depends on HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS
327
328config DYNAMIC_FTRACE_WITH_ARGS
329	def_bool y
330	depends on DYNAMIC_FTRACE
331	depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
332
333config FUNCTION_SELF_TRACING
334	bool "Function trace tracing code"
335	depends on FUNCTION_TRACER
336	help
337	  Normally all the tracing code is set to notrace, where the function
338	  tracer will ignore all the tracing functions. Sometimes it is useful
339	  for debugging to trace some of the tracing infratructure itself.
340	  Enable this to allow some of the tracing infrastructure to be traced
341	  by the function tracer. Note, this will likely add noise to function
342	  tracing if events and other tracing features are enabled along with
343	  function tracing.
344
345	  If unsure, say N.
346
347config FPROBE
348	bool "Kernel Function Probe (fprobe)"
349	depends on HAVE_FUNCTION_GRAPH_FREGS && HAVE_FTRACE_GRAPH_FUNC
350	depends on DYNAMIC_FTRACE_WITH_ARGS
351	select FUNCTION_GRAPH_TRACER
352	default n
353	help
354	  This option enables kernel function probe (fprobe) based on ftrace.
355	  The fprobe is similar to kprobes, but probes only for kernel function
356	  entries and exits. This also can probe multiple functions by one
357	  fprobe.
358
359	  If unsure, say N.
360
361config FUNCTION_PROFILER
362	bool "Kernel function profiler"
363	depends on FUNCTION_TRACER
364	default n
365	help
366	  This option enables the kernel function profiler. A file is created
367	  in debugfs called function_profile_enabled which defaults to zero.
368	  When a 1 is echoed into this file profiling begins, and when a
369	  zero is entered, profiling stops. A "functions" file is created in
370	  the trace_stat directory; this file shows the list of functions that
371	  have been hit and their counters.
372
373	  If in doubt, say N.
374
375config STACK_TRACER
376	bool "Trace max stack"
377	depends on HAVE_FUNCTION_TRACER
378	select FUNCTION_TRACER
379	select STACKTRACE
380	select KALLSYMS
381	help
382	  This special tracer records the maximum stack footprint of the
383	  kernel and displays it in /sys/kernel/tracing/stack_trace.
384
385	  This tracer works by hooking into every function call that the
386	  kernel executes, and keeping a maximum stack depth value and
387	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
388	  then it will not have any overhead while the stack tracer
389	  is disabled.
390
391	  To enable the stack tracer on bootup, pass in 'stacktrace'
392	  on the kernel command line.
393
394	  The stack tracer can also be enabled or disabled via the
395	  sysctl kernel.stack_tracer_enabled
396
397	  Say N if unsure.
398
399config TRACE_PREEMPT_TOGGLE
400	bool
401	help
402	  Enables hooks which will be called when preemption is first disabled,
403	  and last enabled.
404
405config IRQSOFF_TRACER
406	bool "Interrupts-off Latency Tracer"
407	default n
408	depends on TRACE_IRQFLAGS_SUPPORT
409	select TRACE_IRQFLAGS
410	select GENERIC_TRACER
411	select TRACER_MAX_TRACE
412	select RING_BUFFER_ALLOW_SWAP
413	select TRACER_SNAPSHOT
414	select TRACER_SNAPSHOT_PER_CPU_SWAP
415	help
416	  This option measures the time spent in irqs-off critical
417	  sections, with microsecond accuracy.
418
419	  The default measurement method is a maximum search, which is
420	  disabled by default and can be runtime (re-)started
421	  via:
422
423	      echo 0 > /sys/kernel/tracing/tracing_max_latency
424
425	  (Note that kernel size and overhead increase with this option
426	  enabled. This option and the preempt-off timing option can be
427	  used together or separately.)
428
429config PREEMPT_TRACER
430	bool "Preemption-off Latency Tracer"
431	default n
432	depends on PREEMPTION
433	select GENERIC_TRACER
434	select TRACER_MAX_TRACE
435	select RING_BUFFER_ALLOW_SWAP
436	select TRACER_SNAPSHOT
437	select TRACER_SNAPSHOT_PER_CPU_SWAP
438	select TRACE_PREEMPT_TOGGLE
439	help
440	  This option measures the time spent in preemption-off critical
441	  sections, with microsecond accuracy.
442
443	  The default measurement method is a maximum search, which is
444	  disabled by default and can be runtime (re-)started
445	  via:
446
447	      echo 0 > /sys/kernel/tracing/tracing_max_latency
448
449	  (Note that kernel size and overhead increase with this option
450	  enabled. This option and the irqs-off timing option can be
451	  used together or separately.)
452
453config SCHED_TRACER
454	bool "Scheduling Latency Tracer"
455	select GENERIC_TRACER
456	select CONTEXT_SWITCH_TRACER
457	select TRACER_MAX_TRACE
458	select TRACER_SNAPSHOT
459	help
460	  This tracer tracks the latency of the highest priority task
461	  to be scheduled in, starting from the point it has woken up.
462
463config HWLAT_TRACER
464	bool "Tracer to detect hardware latencies (like SMIs)"
465	select GENERIC_TRACER
466	select TRACER_MAX_TRACE
467	help
468	 This tracer, when enabled will create one or more kernel threads,
469	 depending on what the cpumask file is set to, which each thread
470	 spinning in a loop looking for interruptions caused by
471	 something other than the kernel. For example, if a
472	 System Management Interrupt (SMI) takes a noticeable amount of
473	 time, this tracer will detect it. This is useful for testing
474	 if a system is reliable for Real Time tasks.
475
476	 Some files are created in the tracing directory when this
477	 is enabled:
478
479	   hwlat_detector/width   - time in usecs for how long to spin for
480	   hwlat_detector/window  - time in usecs between the start of each
481				     iteration
482
483	 A kernel thread is created that will spin with interrupts disabled
484	 for "width" microseconds in every "window" cycle. It will not spin
485	 for "window - width" microseconds, where the system can
486	 continue to operate.
487
488	 The output will appear in the trace and trace_pipe files.
489
490	 When the tracer is not running, it has no affect on the system,
491	 but when it is running, it can cause the system to be
492	 periodically non responsive. Do not run this tracer on a
493	 production system.
494
495	 To enable this tracer, echo in "hwlat" into the current_tracer
496	 file. Every time a latency is greater than tracing_thresh, it will
497	 be recorded into the ring buffer.
498
499config OSNOISE_TRACER
500	bool "OS Noise tracer"
501	select GENERIC_TRACER
502	select TRACER_MAX_TRACE
503	help
504	  In the context of high-performance computing (HPC), the Operating
505	  System Noise (osnoise) refers to the interference experienced by an
506	  application due to activities inside the operating system. In the
507	  context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread
508	  can cause noise to the system. Moreover, hardware-related jobs can
509	  also cause noise, for example, via SMIs.
510
511	  The osnoise tracer leverages the hwlat_detector by running a similar
512	  loop with preemption, SoftIRQs and IRQs enabled, thus allowing all
513	  the sources of osnoise during its execution. The osnoise tracer takes
514	  note of the entry and exit point of any source of interferences,
515	  increasing a per-cpu interference counter. It saves an interference
516	  counter for each source of interference. The interference counter for
517	  NMI, IRQs, SoftIRQs, and threads is increased anytime the tool
518	  observes these interferences' entry events. When a noise happens
519	  without any interference from the operating system level, the
520	  hardware noise counter increases, pointing to a hardware-related
521	  noise. In this way, osnoise can account for any source of
522	  interference. At the end of the period, the osnoise tracer prints
523	  the sum of all noise, the max single noise, the percentage of CPU
524	  available for the thread, and the counters for the noise sources.
525
526	  In addition to the tracer, a set of tracepoints were added to
527	  facilitate the identification of the osnoise source.
528
529	  The output will appear in the trace and trace_pipe files.
530
531	  To enable this tracer, echo in "osnoise" into the current_tracer
532          file.
533
534config TIMERLAT_TRACER
535	bool "Timerlat tracer"
536	select OSNOISE_TRACER
537	select GENERIC_TRACER
538	help
539	  The timerlat tracer aims to help the preemptive kernel developers
540	  to find sources of wakeup latencies of real-time threads.
541
542	  The tracer creates a per-cpu kernel thread with real-time priority.
543	  The tracer thread sets a periodic timer to wakeup itself, and goes
544	  to sleep waiting for the timer to fire. At the wakeup, the thread
545	  then computes a wakeup latency value as the difference between
546	  the current time and the absolute time that the timer was set
547	  to expire.
548
549	  The tracer prints two lines at every activation. The first is the
550	  timer latency observed at the hardirq context before the
551	  activation of the thread. The second is the timer latency observed
552	  by the thread, which is the same level that cyclictest reports. The
553	  ACTIVATION ID field serves to relate the irq execution to its
554	  respective thread execution.
555
556	  The tracer is build on top of osnoise tracer, and the osnoise:
557	  events can be used to trace the source of interference from NMI,
558	  IRQs and other threads. It also enables the capture of the
559	  stacktrace at the IRQ context, which helps to identify the code
560	  path that can cause thread delay.
561
562config MMIOTRACE
563	bool "Memory mapped IO tracing"
564	depends on HAVE_MMIOTRACE_SUPPORT && PCI
565	select GENERIC_TRACER
566	help
567	  Mmiotrace traces Memory Mapped I/O access and is meant for
568	  debugging and reverse engineering. It is called from the ioremap
569	  implementation and works via page faults. Tracing is disabled by
570	  default and can be enabled at run-time.
571
572	  See Documentation/trace/mmiotrace.rst.
573	  If you are not helping to develop drivers, say N.
574
575config ENABLE_DEFAULT_TRACERS
576	bool "Trace process context switches and events"
577	depends on !GENERIC_TRACER
578	select TRACING
579	help
580	  This tracer hooks to various trace points in the kernel,
581	  allowing the user to pick and choose which trace point they
582	  want to trace. It also includes the sched_switch tracer plugin.
583
584config FTRACE_SYSCALLS
585	bool "Trace syscalls"
586	depends on HAVE_SYSCALL_TRACEPOINTS
587	select GENERIC_TRACER
588	select KALLSYMS
589	help
590	  Basic tracer to catch the syscall entry and exit events.
591
592config TRACE_SYSCALL_BUF_SIZE_DEFAULT
593	int "System call user read max size"
594	range 0 165
595	default 63
596	depends on FTRACE_SYSCALLS
597	help
598	 Some system call trace events will record the data from a user
599	 space address that one of the parameters point to. The amount of
600	 data per event is limited. That limit is set by this config and
601	 this config also affects how much user space data perf can read.
602
603	 For a tracing instance, this size may be changed by writing into
604	 its syscall_user_buf_size file.
605
606config TRACER_SNAPSHOT
607	bool "Create a snapshot trace buffer"
608	select TRACER_MAX_TRACE
609	help
610	  Allow tracing users to take snapshot of the current buffer using the
611	  ftrace interface, e.g.:
612
613	      echo 1 > /sys/kernel/tracing/snapshot
614	      cat snapshot
615
616config TRACER_SNAPSHOT_PER_CPU_SWAP
617	bool "Allow snapshot to swap per CPU"
618	depends on TRACER_SNAPSHOT
619	select RING_BUFFER_ALLOW_SWAP
620	help
621	  Allow doing a snapshot of a single CPU buffer instead of a
622	  full swap (all buffers). If this is set, then the following is
623	  allowed:
624
625	      echo 1 > /sys/kernel/tracing/per_cpu/cpu2/snapshot
626
627	  After which, only the tracing buffer for CPU 2 was swapped with
628	  the main tracing buffer, and the other CPU buffers remain the same.
629
630	  When this is enabled, this adds a little more overhead to the
631	  trace recording, as it needs to add some checks to synchronize
632	  recording with swaps. But this does not affect the performance
633	  of the overall system. This is enabled by default when the preempt
634	  or irq latency tracers are enabled, as those need to swap as well
635	  and already adds the overhead (plus a lot more).
636
637config TRACE_BRANCH_PROFILING
638	bool
639	select GENERIC_TRACER
640
641choice
642	prompt "Branch Profiling"
643	default BRANCH_PROFILE_NONE
644	help
645	 The branch profiling is a software profiler. It will add hooks
646	 into the C conditionals to test which path a branch takes.
647
648	 The likely/unlikely profiler only looks at the conditions that
649	 are annotated with a likely or unlikely macro.
650
651	 The "all branch" profiler will profile every if-statement in the
652	 kernel. This profiler will also enable the likely/unlikely
653	 profiler.
654
655	 Either of the above profilers adds a bit of overhead to the system.
656	 If unsure, choose "No branch profiling".
657
658config BRANCH_PROFILE_NONE
659	bool "No branch profiling"
660	help
661	  No branch profiling. Branch profiling adds a bit of overhead.
662	  Only enable it if you want to analyse the branching behavior.
663	  Otherwise keep it disabled.
664
665config PROFILE_ANNOTATED_BRANCHES
666	bool "Trace likely/unlikely profiler"
667	select TRACE_BRANCH_PROFILING
668	help
669	  This tracer profiles all likely and unlikely macros
670	  in the kernel. It will display the results in:
671
672	  /sys/kernel/tracing/trace_stat/branch_annotated
673
674	  Note: this will add a significant overhead; only turn this
675	  on if you need to profile the system's use of these macros.
676
677config PROFILE_ALL_BRANCHES
678	bool "Profile all if conditionals" if !FORTIFY_SOURCE
679	select TRACE_BRANCH_PROFILING
680	help
681	  This tracer profiles all branch conditions. Every if ()
682	  taken in the kernel is recorded whether it hit or miss.
683	  The results will be displayed in:
684
685	  /sys/kernel/tracing/trace_stat/branch_all
686
687	  This option also enables the likely/unlikely profiler.
688
689	  This configuration, when enabled, will impose a great overhead
690	  on the system. This should only be enabled when the system
691	  is to be analyzed in much detail.
692endchoice
693
694config TRACING_BRANCHES
695	bool
696	help
697	  Selected by tracers that will trace the likely and unlikely
698	  conditions. This prevents the tracers themselves from being
699	  profiled. Profiling the tracing infrastructure can only happen
700	  when the likelys and unlikelys are not being traced.
701
702config BRANCH_TRACER
703	bool "Trace likely/unlikely instances"
704	depends on TRACE_BRANCH_PROFILING
705	select TRACING_BRANCHES
706	help
707	  This traces the events of likely and unlikely condition
708	  calls in the kernel.  The difference between this and the
709	  "Trace likely/unlikely profiler" is that this is not a
710	  histogram of the callers, but actually places the calling
711	  events into a running trace buffer to see when and where the
712	  events happened, as well as their results.
713
714	  Say N if unsure.
715
716config BLK_DEV_IO_TRACE
717	bool "Support for tracing block IO actions"
718	depends on SYSFS
719	depends on BLOCK
720	select RELAY
721	select DEBUG_FS
722	select TRACEPOINTS
723	select GENERIC_TRACER
724	select STACKTRACE
725	help
726	  Say Y here if you want to be able to trace the block layer actions
727	  on a given queue. Tracing allows you to see any traffic happening
728	  on a block device queue. For more information (and the userspace
729	  support tools needed), fetch the blktrace tools from:
730
731	  git://git.kernel.dk/blktrace.git
732
733	  Tracing also is possible using the ftrace interface, e.g.:
734
735	    echo 1 > /sys/block/sda/sda1/trace/enable
736	    echo blk > /sys/kernel/tracing/current_tracer
737	    cat /sys/kernel/tracing/trace_pipe
738
739	  If unsure, say N.
740
741config FPROBE_EVENTS
742	depends on FPROBE
743	depends on HAVE_REGS_AND_STACK_ACCESS_API
744	bool "Enable fprobe-based dynamic events"
745	select TRACING
746	select PROBE_EVENTS
747	select DYNAMIC_EVENTS
748	default y
749	help
750	  This allows user to add tracing events on the function entry and
751	  exit via ftrace interface. The syntax is same as the kprobe events
752	  and the kprobe events on function entry and exit will be
753	  transparently converted to this fprobe events.
754
755config PROBE_EVENTS_BTF_ARGS
756	depends on HAVE_FUNCTION_ARG_ACCESS_API
757	depends on FPROBE_EVENTS || KPROBE_EVENTS
758	depends on DEBUG_INFO_BTF && BPF_SYSCALL
759	bool "Support BTF function arguments for probe events"
760	default y
761	help
762	  The user can specify the arguments of the probe event using the names
763	  of the arguments of the probed function, when the probe location is a
764	  kernel function entry or a tracepoint.
765	  This is available only if BTF (BPF Type Format) support is enabled.
766
767config KPROBE_EVENTS
768	depends on KPROBES
769	depends on HAVE_REGS_AND_STACK_ACCESS_API
770	bool "Enable kprobes-based dynamic events"
771	select TRACING
772	select PROBE_EVENTS
773	select DYNAMIC_EVENTS
774	default y
775	help
776	  This allows the user to add tracing events (similar to tracepoints)
777	  on the fly via the ftrace interface. See
778	  Documentation/trace/kprobetrace.rst for more details.
779
780	  Those events can be inserted wherever kprobes can probe, and record
781	  various register and memory values.
782
783	  This option is also required by perf-probe subcommand of perf tools.
784	  If you want to use perf tools, this option is strongly recommended.
785
786config KPROBE_EVENTS_ON_NOTRACE
787	bool "Do NOT protect notrace function from kprobe events"
788	depends on KPROBE_EVENTS
789	depends on DYNAMIC_FTRACE
790	default n
791	help
792	  This is only for the developers who want to debug ftrace itself
793	  using kprobe events.
794
795	  If kprobes can use ftrace instead of breakpoint, ftrace related
796	  functions are protected from kprobe-events to prevent an infinite
797	  recursion or any unexpected execution path which leads to a kernel
798	  crash.
799
800	  This option disables such protection and allows you to put kprobe
801	  events on ftrace functions for debugging ftrace by itself.
802	  Note that this might let you shoot yourself in the foot.
803
804	  If unsure, say N.
805
806config UPROBE_EVENTS
807	bool "Enable uprobes-based dynamic events"
808	depends on ARCH_SUPPORTS_UPROBES
809	depends on MMU
810	depends on PERF_EVENTS
811	select UPROBES
812	select PROBE_EVENTS
813	select DYNAMIC_EVENTS
814	select TRACING
815	default y
816	help
817	  This allows the user to add tracing events on top of userspace
818	  dynamic events (similar to tracepoints) on the fly via the trace
819	  events interface. Those events can be inserted wherever uprobes
820	  can probe, and record various registers.
821	  This option is required if you plan to use perf-probe subcommand
822	  of perf tools on user space applications.
823
824config EPROBE_EVENTS
825	bool "Enable event-based dynamic events"
826	depends on TRACING
827	depends on HAVE_REGS_AND_STACK_ACCESS_API
828	select PROBE_EVENTS
829	select DYNAMIC_EVENTS
830	default y
831	help
832	  Eprobes are dynamic events that can be placed on other existing
833	  events. It can be used to limit what fields are recorded in
834	  an event or even dereference a field of an event. It can
835	  convert the type of an event field. For example, turn an
836	  address into a string.
837
838config BPF_EVENTS
839	depends on BPF_SYSCALL
840	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
841	bool
842	default y
843	help
844	  This allows the user to attach BPF programs to kprobe, uprobe, and
845	  tracepoint events.
846
847config DYNAMIC_EVENTS
848	def_bool n
849
850config PROBE_EVENTS
851	def_bool n
852
853config BPF_KPROBE_OVERRIDE
854	bool "Enable BPF programs to override a kprobed function"
855	depends on BPF_EVENTS
856	depends on FUNCTION_ERROR_INJECTION
857	default n
858	help
859	 Allows BPF to override the execution of a probed function and
860	 set a different return value.  This is used for error injection.
861
862config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
863	bool
864	depends on DYNAMIC_FTRACE
865
866config FTRACE_MCOUNT_USE_CC
867	def_bool y
868	depends on $(cc-option,-mrecord-mcount)
869	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
870	depends on DYNAMIC_FTRACE
871
872config FTRACE_MCOUNT_USE_OBJTOOL
873	def_bool y
874	depends on HAVE_OBJTOOL_MCOUNT
875	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
876	depends on !FTRACE_MCOUNT_USE_CC
877	depends on DYNAMIC_FTRACE
878	select OBJTOOL
879
880config FTRACE_MCOUNT_USE_RECORDMCOUNT
881	def_bool y
882	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
883	depends on !FTRACE_MCOUNT_USE_CC
884	depends on !FTRACE_MCOUNT_USE_OBJTOOL
885	depends on DYNAMIC_FTRACE
886
887config TRACING_MAP
888	bool
889	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
890	help
891	  tracing_map is a special-purpose lock-free map for tracing,
892	  separated out as a stand-alone facility in order to allow it
893	  to be shared between multiple tracers.  It isn't meant to be
894	  generally used outside of that context, and is normally
895	  selected by tracers that use it.
896
897config SYNTH_EVENTS
898	bool "Synthetic trace events"
899	select TRACING
900	select DYNAMIC_EVENTS
901	default n
902	help
903	  Synthetic events are user-defined trace events that can be
904	  used to combine data from other trace events or in fact any
905	  data source.  Synthetic events can be generated indirectly
906	  via the trace() action of histogram triggers or directly
907	  by way of an in-kernel API.
908
909	  See Documentation/trace/events.rst or
910	  Documentation/trace/histogram.rst for details and examples.
911
912	  If in doubt, say N.
913
914config USER_EVENTS
915	bool "User trace events"
916	select TRACING
917	select DYNAMIC_EVENTS
918	help
919	  User trace events are user-defined trace events that
920	  can be used like an existing kernel trace event.  User trace
921	  events are generated by writing to a tracefs file.  User
922	  processes can determine if their tracing events should be
923	  generated by registering a value and bit with the kernel
924	  that reflects when it is enabled or not.
925
926	  See Documentation/trace/user_events.rst.
927	  If in doubt, say N.
928
929config HIST_TRIGGERS
930	bool "Histogram triggers"
931	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
932	select TRACING_MAP
933	select TRACING
934	select DYNAMIC_EVENTS
935	select SYNTH_EVENTS
936	default n
937	help
938	  Hist triggers allow one or more arbitrary trace event fields
939	  to be aggregated into hash tables and dumped to stdout by
940	  reading a debugfs/tracefs file.  They're useful for
941	  gathering quick and dirty (though precise) summaries of
942	  event activity as an initial guide for further investigation
943	  using more advanced tools.
944
945	  Inter-event tracing of quantities such as latencies is also
946	  supported using hist triggers under this option.
947
948	  See Documentation/trace/histogram.rst.
949	  If in doubt, say N.
950
951config TRACE_EVENT_INJECT
952	bool "Trace event injection"
953	depends on TRACING
954	help
955	  Allow user-space to inject a specific trace event into the ring
956	  buffer. This is mainly used for testing purpose.
957
958	  If unsure, say N.
959
960config TRACEPOINT_BENCHMARK
961	bool "Add tracepoint that benchmarks tracepoints"
962	help
963	 This option creates the tracepoint "benchmark:benchmark_event".
964	 When the tracepoint is enabled, it kicks off a kernel thread that
965	 goes into an infinite loop (calling cond_resched() to let other tasks
966	 run), and calls the tracepoint. Each iteration will record the time
967	 it took to write to the tracepoint and the next iteration that
968	 data will be passed to the tracepoint itself. That is, the tracepoint
969	 will report the time it took to do the previous tracepoint.
970	 The string written to the tracepoint is a static string of 128 bytes
971	 to keep the time the same. The initial string is simply a write of
972	 "START". The second string records the cold cache time of the first
973	 write which is not added to the rest of the calculations.
974
975	 As it is a tight loop, it benchmarks as hot cache. That's fine because
976	 we care most about hot paths that are probably in cache already.
977
978	 An example of the output:
979
980	      START
981	      first=3672 [COLD CACHED]
982	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
983	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
984	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
985	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
986	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
987	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
988
989
990config RING_BUFFER_BENCHMARK
991	tristate "Ring buffer benchmark stress tester"
992	depends on RING_BUFFER
993	help
994	  This option creates a test to stress the ring buffer and benchmark it.
995	  It creates its own ring buffer such that it will not interfere with
996	  any other users of the ring buffer (such as ftrace). It then creates
997	  a producer and consumer that will run for 10 seconds and sleep for
998	  10 seconds. Each interval it will print out the number of events
999	  it recorded and give a rough estimate of how long each iteration took.
1000
1001	  It does not disable interrupts or raise its priority, so it may be
1002	  affected by processes that are running.
1003
1004	  If unsure, say N.
1005
1006config TRACE_EVAL_MAP_FILE
1007       bool "Show eval mappings for trace events"
1008       depends on TRACING
1009       help
1010	The "print fmt" of the trace events will show the enum/sizeof names
1011	instead of their values. This can cause problems for user space tools
1012	that use this string to parse the raw data as user space does not know
1013	how to convert the string to its value.
1014
1015	To fix this, there's a special macro in the kernel that can be used
1016	to convert an enum/sizeof into its value. If this macro is used, then
1017	the print fmt strings will be converted to their values.
1018
1019	If something does not get converted properly, this option can be
1020	used to show what enums/sizeof the kernel tried to convert.
1021
1022	This option is for debugging the conversions. A file is created
1023	in the tracing directory called "eval_map" that will show the
1024	names matched with their values and what trace event system they
1025	belong too.
1026
1027	Normally, the mapping of the strings to values will be freed after
1028	boot up or module load. With this option, they will not be freed, as
1029	they are needed for the "eval_map" file. Enabling this option will
1030	increase the memory footprint of the running kernel.
1031
1032	If unsure, say N.
1033
1034config FTRACE_RECORD_RECURSION
1035	bool "Record functions that recurse in function tracing"
1036	depends on FUNCTION_TRACER
1037	help
1038	  All callbacks that attach to the function tracing have some sort
1039	  of protection against recursion. Even though the protection exists,
1040	  it adds overhead. This option will create a file in the tracefs
1041	  file system called "recursed_functions" that will list the functions
1042	  that triggered a recursion.
1043
1044	  This will add more overhead to cases that have recursion.
1045
1046	  If unsure, say N
1047
1048config FTRACE_RECORD_RECURSION_SIZE
1049	int "Max number of recursed functions to record"
1050	default 128
1051	depends on FTRACE_RECORD_RECURSION
1052	help
1053	  This defines the limit of number of functions that can be
1054	  listed in the "recursed_functions" file, that lists all
1055	  the functions that caused a recursion to happen.
1056	  This file can be reset, but the limit can not change in
1057	  size at runtime.
1058
1059config FTRACE_VALIDATE_RCU_IS_WATCHING
1060	bool "Validate RCU is on during ftrace execution"
1061	depends on FUNCTION_TRACER
1062	depends on ARCH_WANTS_NO_INSTR
1063	help
1064	  All callbacks that attach to the function tracing have some sort of
1065	  protection against recursion. This option is only to verify that
1066	  ftrace (and other users of ftrace_test_recursion_trylock()) are not
1067	  called outside of RCU, as if they are, it can cause a race. But it
1068	  also has a noticeable overhead when enabled.
1069
1070	  If unsure, say N
1071
1072config RING_BUFFER_RECORD_RECURSION
1073	bool "Record functions that recurse in the ring buffer"
1074	depends on FTRACE_RECORD_RECURSION
1075	# default y, because it is coupled with FTRACE_RECORD_RECURSION
1076	default y
1077	help
1078	  The ring buffer has its own internal recursion. Although when
1079	  recursion happens it won't cause harm because of the protection,
1080	  but it does cause unwanted overhead. Enabling this option will
1081	  place where recursion was detected into the ftrace "recursed_functions"
1082	  file.
1083
1084	  This will add more overhead to cases that have recursion.
1085
1086config GCOV_PROFILE_FTRACE
1087	bool "Enable GCOV profiling on ftrace subsystem"
1088	depends on GCOV_KERNEL
1089	help
1090	  Enable GCOV profiling on ftrace subsystem for checking
1091	  which functions/lines are tested.
1092
1093	  If unsure, say N.
1094
1095	  Note that on a kernel compiled with this config, ftrace will
1096	  run significantly slower.
1097
1098config FTRACE_SELFTEST
1099	bool
1100
1101config FTRACE_STARTUP_TEST
1102	bool "Perform a startup test on ftrace"
1103	depends on GENERIC_TRACER
1104	select FTRACE_SELFTEST
1105	help
1106	  This option performs a series of startup tests on ftrace. On bootup
1107	  a series of tests are made to verify that the tracer is
1108	  functioning properly. It will do tests on all the configured
1109	  tracers of ftrace.
1110
1111config EVENT_TRACE_STARTUP_TEST
1112	bool "Run selftest on trace events"
1113	depends on FTRACE_STARTUP_TEST
1114	default y
1115	help
1116	  This option performs a test on all trace events in the system.
1117	  It basically just enables each event and runs some code that
1118	  will trigger events (not necessarily the event it enables)
1119	  This may take some time run as there are a lot of events.
1120
1121config EVENT_TRACE_TEST_SYSCALLS
1122	bool "Run selftest on syscall events"
1123	depends on EVENT_TRACE_STARTUP_TEST
1124	help
1125	 This option will also enable testing every syscall event.
1126	 It only enables the event and disables it and runs various loads
1127	 with the event enabled. This adds a bit more time for kernel boot
1128	 up since it runs this on every system call defined.
1129
1130	 TBD - enable a way to actually call the syscalls as we test their
1131	       events
1132
1133config FTRACE_SORT_STARTUP_TEST
1134       bool "Verify compile time sorting of ftrace functions"
1135       depends on DYNAMIC_FTRACE
1136       depends on BUILDTIME_MCOUNT_SORT
1137       help
1138	 Sorting of the mcount_loc sections that is used to find the
1139	 where the ftrace knows where to patch functions for tracing
1140	 and other callbacks is done at compile time. But if the sort
1141	 is not done correctly, it will cause non-deterministic failures.
1142	 When this is set, the sorted sections will be verified that they
1143	 are in deed sorted and will warn if they are not.
1144
1145	 If unsure, say N
1146
1147config RING_BUFFER_STARTUP_TEST
1148       bool "Ring buffer startup self test"
1149       depends on RING_BUFFER
1150       help
1151	 Run a simple self test on the ring buffer on boot up. Late in the
1152	 kernel boot sequence, the test will start that kicks off
1153	 a thread per cpu. Each thread will write various size events
1154	 into the ring buffer. Another thread is created to send IPIs
1155	 to each of the threads, where the IPI handler will also write
1156	 to the ring buffer, to test/stress the nesting ability.
1157	 If any anomalies are discovered, a warning will be displayed
1158	 and all ring buffers will be disabled.
1159
1160	 The test runs for 10 seconds. This will slow your boot time
1161	 by at least 10 more seconds.
1162
1163	 At the end of the test, statistics and more checks are done.
1164	 It will output the stats of each per cpu buffer: What
1165	 was written, the sizes, what was read, what was lost, and
1166	 other similar details.
1167
1168	 If unsure, say N
1169
1170config RING_BUFFER_VALIDATE_TIME_DELTAS
1171	bool "Verify ring buffer time stamp deltas"
1172	depends on RING_BUFFER
1173	help
1174	  This will audit the time stamps on the ring buffer sub
1175	  buffer to make sure that all the time deltas for the
1176	  events on a sub buffer matches the current time stamp.
1177	  This audit is performed for every event that is not
1178	  interrupted, or interrupting another event. A check
1179	  is also made when traversing sub buffers to make sure
1180	  that all the deltas on the previous sub buffer do not
1181	  add up to be greater than the current time stamp.
1182
1183	  NOTE: This adds significant overhead to recording of events,
1184	  and should only be used to test the logic of the ring buffer.
1185	  Do not use it on production systems.
1186
1187	  Only say Y if you understand what this does, and you
1188	  still want it enabled. Otherwise say N
1189
1190config MMIOTRACE_TEST
1191	tristate "Test module for mmiotrace"
1192	depends on MMIOTRACE && m
1193	help
1194	  This is a dumb module for testing mmiotrace. It is very dangerous
1195	  as it will write garbage to IO memory starting at a given address.
1196	  However, it should be safe to use on e.g. unused portion of VRAM.
1197
1198	  Say N, unless you absolutely know what you are doing.
1199
1200config PREEMPTIRQ_DELAY_TEST
1201	tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
1202	depends on m
1203	help
1204	  Select this option to build a test module that can help test latency
1205	  tracers by executing a preempt or irq disable section with a user
1206	  configurable delay. The module busy waits for the duration of the
1207	  critical section.
1208
1209	  For example, the following invocation generates a burst of three
1210	  irq-disabled critical sections for 500us:
1211	  modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
1212
1213	  What's more, if you want to attach the test on the cpu which the latency
1214	  tracer is running on, specify cpu_affinity=cpu_num at the end of the
1215	  command.
1216
1217	  If unsure, say N
1218
1219config SYNTH_EVENT_GEN_TEST
1220	tristate "Test module for in-kernel synthetic event generation"
1221	depends on SYNTH_EVENTS && m
1222	help
1223          This option creates a test module to check the base
1224          functionality of in-kernel synthetic event definition and
1225          generation.
1226
1227          To test, insert the module, and then check the trace buffer
1228	  for the generated sample events.
1229
1230	  If unsure, say N.
1231
1232config KPROBE_EVENT_GEN_TEST
1233	tristate "Test module for in-kernel kprobe event generation"
1234	depends on KPROBE_EVENTS && m
1235	help
1236          This option creates a test module to check the base
1237          functionality of in-kernel kprobe event definition.
1238
1239          To test, insert the module, and then check the trace buffer
1240	  for the generated kprobe events.
1241
1242	  If unsure, say N.
1243
1244config HIST_TRIGGERS_DEBUG
1245	bool "Hist trigger debug support"
1246	depends on HIST_TRIGGERS
1247	help
1248          Add "hist_debug" file for each event, which when read will
1249          dump out a bunch of internal details about the hist triggers
1250          defined on that event.
1251
1252          The hist_debug file serves a couple of purposes:
1253
1254            - Helps developers verify that nothing is broken.
1255
1256            - Provides educational information to support the details
1257              of the hist trigger internals as described by
1258              Documentation/trace/histogram-design.rst.
1259
1260          The hist_debug output only covers the data structures
1261          related to the histogram definitions themselves and doesn't
1262          display the internals of map buckets or variable values of
1263          running histograms.
1264
1265          If unsure, say N.
1266
1267source "kernel/trace/rv/Kconfig"
1268
1269endif # FTRACE
1270