xref: /linux/kernel/trace/Kconfig (revision a33f32244d8550da8b4a26e277ce07d5c6d158b5)
1#
2# Architectures that offer an FUNCTION_TRACER implementation should
3#  select HAVE_FUNCTION_TRACER:
4#
5
6config USER_STACKTRACE_SUPPORT
7	bool
8
9config NOP_TRACER
10	bool
11
12config HAVE_FTRACE_NMI_ENTER
13	bool
14	help
15	  See Documentation/trace/ftrace-design.txt
16
17config HAVE_FUNCTION_TRACER
18	bool
19	help
20	  See Documentation/trace/ftrace-design.txt
21
22config HAVE_FUNCTION_GRAPH_TRACER
23	bool
24	help
25	  See Documentation/trace/ftrace-design.txt
26
27config HAVE_FUNCTION_GRAPH_FP_TEST
28	bool
29	help
30	  See Documentation/trace/ftrace-design.txt
31
32config HAVE_FUNCTION_TRACE_MCOUNT_TEST
33	bool
34	help
35	  See Documentation/trace/ftrace-design.txt
36
37config HAVE_DYNAMIC_FTRACE
38	bool
39	help
40	  See Documentation/trace/ftrace-design.txt
41
42config HAVE_FTRACE_MCOUNT_RECORD
43	bool
44	help
45	  See Documentation/trace/ftrace-design.txt
46
47config HAVE_HW_BRANCH_TRACER
48	bool
49
50config HAVE_SYSCALL_TRACEPOINTS
51	bool
52	help
53	  See Documentation/trace/ftrace-design.txt
54
55config TRACER_MAX_TRACE
56	bool
57
58config RING_BUFFER
59	bool
60
61config FTRACE_NMI_ENTER
62       bool
63       depends on HAVE_FTRACE_NMI_ENTER
64       default y
65
66config EVENT_TRACING
67	select CONTEXT_SWITCH_TRACER
68	bool
69
70config CONTEXT_SWITCH_TRACER
71	bool
72
73config RING_BUFFER_ALLOW_SWAP
74	bool
75	help
76	 Allow the use of ring_buffer_swap_cpu.
77	 Adds a very slight overhead to tracing when enabled.
78
79# All tracer options should select GENERIC_TRACER. For those options that are
80# enabled by all tracers (context switch and event tracer) they select TRACING.
81# This allows those options to appear when no other tracer is selected. But the
82# options do not appear when something else selects it. We need the two options
83# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
84# hiding of the automatic options.
85
86config TRACING
87	bool
88	select DEBUG_FS
89	select RING_BUFFER
90	select STACKTRACE if STACKTRACE_SUPPORT
91	select TRACEPOINTS
92	select NOP_TRACER
93	select BINARY_PRINTF
94	select EVENT_TRACING
95
96config GENERIC_TRACER
97	bool
98	select TRACING
99
100#
101# Minimum requirements an architecture has to meet for us to
102# be able to offer generic tracing facilities:
103#
104config TRACING_SUPPORT
105	bool
106	# PPC32 has no irqflags tracing support, but it can use most of the
107	# tracers anyway, they were tested to build and work. Note that new
108	# exceptions to this list aren't welcomed, better implement the
109	# irqflags tracing for your architecture.
110	depends on TRACE_IRQFLAGS_SUPPORT || PPC32
111	depends on STACKTRACE_SUPPORT
112	default y
113
114if TRACING_SUPPORT
115
116menuconfig FTRACE
117	bool "Tracers"
118	default y if DEBUG_KERNEL
119	help
120	  Enable the kernel tracing infrastructure.
121
122if FTRACE
123
124config FUNCTION_TRACER
125	bool "Kernel Function Tracer"
126	depends on HAVE_FUNCTION_TRACER
127	select FRAME_POINTER
128	select KALLSYMS
129	select GENERIC_TRACER
130	select CONTEXT_SWITCH_TRACER
131	help
132	  Enable the kernel to trace every kernel function. This is done
133	  by using a compiler feature to insert a small, 5-byte No-Operation
134	  instruction at the beginning of every kernel function, which NOP
135	  sequence is then dynamically patched into a tracer call when
136	  tracing is enabled by the administrator. If it's runtime disabled
137	  (the bootup default), then the overhead of the instructions is very
138	  small and not measurable even in micro-benchmarks.
139
140config FUNCTION_GRAPH_TRACER
141	bool "Kernel Function Graph Tracer"
142	depends on HAVE_FUNCTION_GRAPH_TRACER
143	depends on FUNCTION_TRACER
144	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
145	default y
146	help
147	  Enable the kernel to trace a function at both its return
148	  and its entry.
149	  Its first purpose is to trace the duration of functions and
150	  draw a call graph for each thread with some information like
151	  the return value. This is done by setting the current return
152	  address on the current task structure into a stack of calls.
153
154
155config IRQSOFF_TRACER
156	bool "Interrupts-off Latency Tracer"
157	default n
158	depends on TRACE_IRQFLAGS_SUPPORT
159	depends on GENERIC_TIME
160	select TRACE_IRQFLAGS
161	select GENERIC_TRACER
162	select TRACER_MAX_TRACE
163	select RING_BUFFER_ALLOW_SWAP
164	help
165	  This option measures the time spent in irqs-off critical
166	  sections, with microsecond accuracy.
167
168	  The default measurement method is a maximum search, which is
169	  disabled by default and can be runtime (re-)started
170	  via:
171
172	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
173
174	  (Note that kernel size and overhead increase with this option
175	  enabled. This option and the preempt-off timing option can be
176	  used together or separately.)
177
178config PREEMPT_TRACER
179	bool "Preemption-off Latency Tracer"
180	default n
181	depends on GENERIC_TIME
182	depends on PREEMPT
183	select GENERIC_TRACER
184	select TRACER_MAX_TRACE
185	select RING_BUFFER_ALLOW_SWAP
186	help
187	  This option measures the time spent in preemption-off critical
188	  sections, with microsecond accuracy.
189
190	  The default measurement method is a maximum search, which is
191	  disabled by default and can be runtime (re-)started
192	  via:
193
194	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
195
196	  (Note that kernel size and overhead increase with this option
197	  enabled. This option and the irqs-off timing option can be
198	  used together or separately.)
199
200config SYSPROF_TRACER
201	bool "Sysprof Tracer"
202	depends on X86
203	select GENERIC_TRACER
204	select CONTEXT_SWITCH_TRACER
205	help
206	  This tracer provides the trace needed by the 'Sysprof' userspace
207	  tool.
208
209config SCHED_TRACER
210	bool "Scheduling Latency Tracer"
211	select GENERIC_TRACER
212	select CONTEXT_SWITCH_TRACER
213	select TRACER_MAX_TRACE
214	help
215	  This tracer tracks the latency of the highest priority task
216	  to be scheduled in, starting from the point it has woken up.
217
218config ENABLE_DEFAULT_TRACERS
219	bool "Trace process context switches and events"
220	depends on !GENERIC_TRACER
221	select TRACING
222	help
223	  This tracer hooks to various trace points in the kernel,
224	  allowing the user to pick and choose which trace point they
225	  want to trace. It also includes the sched_switch tracer plugin.
226
227config FTRACE_SYSCALLS
228	bool "Trace syscalls"
229	depends on HAVE_SYSCALL_TRACEPOINTS
230	select GENERIC_TRACER
231	select KALLSYMS
232	help
233	  Basic tracer to catch the syscall entry and exit events.
234
235config BOOT_TRACER
236	bool "Trace boot initcalls"
237	select GENERIC_TRACER
238	select CONTEXT_SWITCH_TRACER
239	help
240	  This tracer helps developers to optimize boot times: it records
241	  the timings of the initcalls and traces key events and the identity
242	  of tasks that can cause boot delays, such as context-switches.
243
244	  Its aim is to be parsed by the scripts/bootgraph.pl tool to
245	  produce pretty graphics about boot inefficiencies, giving a visual
246	  representation of the delays during initcalls - but the raw
247	  /debug/tracing/trace text output is readable too.
248
249	  You must pass in initcall_debug and ftrace=initcall to the kernel
250	  command line to enable this on bootup.
251
252config TRACE_BRANCH_PROFILING
253	bool
254	select GENERIC_TRACER
255
256choice
257	prompt "Branch Profiling"
258	default BRANCH_PROFILE_NONE
259	help
260	 The branch profiling is a software profiler. It will add hooks
261	 into the C conditionals to test which path a branch takes.
262
263	 The likely/unlikely profiler only looks at the conditions that
264	 are annotated with a likely or unlikely macro.
265
266	 The "all branch" profiler will profile every if-statement in the
267	 kernel. This profiler will also enable the likely/unlikely
268	 profiler.
269
270	 Either of the above profilers adds a bit of overhead to the system.
271	 If unsure, choose "No branch profiling".
272
273config BRANCH_PROFILE_NONE
274	bool "No branch profiling"
275	help
276	  No branch profiling. Branch profiling adds a bit of overhead.
277	  Only enable it if you want to analyse the branching behavior.
278	  Otherwise keep it disabled.
279
280config PROFILE_ANNOTATED_BRANCHES
281	bool "Trace likely/unlikely profiler"
282	select TRACE_BRANCH_PROFILING
283	help
284	  This tracer profiles all the the likely and unlikely macros
285	  in the kernel. It will display the results in:
286
287	  /sys/kernel/debug/tracing/profile_annotated_branch
288
289	  Note: this will add a significant overhead; only turn this
290	  on if you need to profile the system's use of these macros.
291
292config PROFILE_ALL_BRANCHES
293	bool "Profile all if conditionals"
294	select TRACE_BRANCH_PROFILING
295	help
296	  This tracer profiles all branch conditions. Every if ()
297	  taken in the kernel is recorded whether it hit or miss.
298	  The results will be displayed in:
299
300	  /sys/kernel/debug/tracing/profile_branch
301
302	  This option also enables the likely/unlikely profiler.
303
304	  This configuration, when enabled, will impose a great overhead
305	  on the system. This should only be enabled when the system
306	  is to be analyzed in much detail.
307endchoice
308
309config TRACING_BRANCHES
310	bool
311	help
312	  Selected by tracers that will trace the likely and unlikely
313	  conditions. This prevents the tracers themselves from being
314	  profiled. Profiling the tracing infrastructure can only happen
315	  when the likelys and unlikelys are not being traced.
316
317config BRANCH_TRACER
318	bool "Trace likely/unlikely instances"
319	depends on TRACE_BRANCH_PROFILING
320	select TRACING_BRANCHES
321	help
322	  This traces the events of likely and unlikely condition
323	  calls in the kernel.  The difference between this and the
324	  "Trace likely/unlikely profiler" is that this is not a
325	  histogram of the callers, but actually places the calling
326	  events into a running trace buffer to see when and where the
327	  events happened, as well as their results.
328
329	  Say N if unsure.
330
331config KSYM_TRACER
332	bool "Trace read and write access on kernel memory locations"
333	depends on HAVE_HW_BREAKPOINT
334	select TRACING
335	help
336	  This tracer helps find read and write operations on any given kernel
337	  symbol i.e. /proc/kallsyms.
338
339config PROFILE_KSYM_TRACER
340	bool "Profile all kernel memory accesses on 'watched' variables"
341	depends on KSYM_TRACER
342	help
343	  This tracer profiles kernel accesses on variables watched through the
344	  ksym tracer ftrace plugin. Depending upon the hardware, all read
345	  and write operations on kernel variables can be monitored for
346	  accesses.
347
348	  The results will be displayed in:
349	  /debugfs/tracing/profile_ksym
350
351	  Say N if unsure.
352
353config STACK_TRACER
354	bool "Trace max stack"
355	depends on HAVE_FUNCTION_TRACER
356	select FUNCTION_TRACER
357	select STACKTRACE
358	select KALLSYMS
359	help
360	  This special tracer records the maximum stack footprint of the
361	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
362
363	  This tracer works by hooking into every function call that the
364	  kernel executes, and keeping a maximum stack depth value and
365	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
366	  then it will not have any overhead while the stack tracer
367	  is disabled.
368
369	  To enable the stack tracer on bootup, pass in 'stacktrace'
370	  on the kernel command line.
371
372	  The stack tracer can also be enabled or disabled via the
373	  sysctl kernel.stack_tracer_enabled
374
375	  Say N if unsure.
376
377config HW_BRANCH_TRACER
378	depends on HAVE_HW_BRANCH_TRACER
379	bool "Trace hw branches"
380	select GENERIC_TRACER
381	help
382	  This tracer records all branches on the system in a circular
383	  buffer, giving access to the last N branches for each cpu.
384
385config KMEMTRACE
386	bool "Trace SLAB allocations"
387	select GENERIC_TRACER
388	help
389	  kmemtrace provides tracing for slab allocator functions, such as
390	  kmalloc, kfree, kmem_cache_alloc, kmem_cache_free, etc. Collected
391	  data is then fed to the userspace application in order to analyse
392	  allocation hotspots, internal fragmentation and so on, making it
393	  possible to see how well an allocator performs, as well as debug
394	  and profile kernel code.
395
396	  This requires an userspace application to use. See
397	  Documentation/trace/kmemtrace.txt for more information.
398
399	  Saying Y will make the kernel somewhat larger and slower. However,
400	  if you disable kmemtrace at run-time or boot-time, the performance
401	  impact is minimal (depending on the arch the kernel is built for).
402
403	  If unsure, say N.
404
405config WORKQUEUE_TRACER
406	bool "Trace workqueues"
407	select GENERIC_TRACER
408	help
409	  The workqueue tracer provides some statistical information
410          about each cpu workqueue thread such as the number of the
411          works inserted and executed since their creation. It can help
412          to evaluate the amount of work each of them has to perform.
413          For example it can help a developer to decide whether he should
414          choose a per-cpu workqueue instead of a singlethreaded one.
415
416config BLK_DEV_IO_TRACE
417	bool "Support for tracing block IO actions"
418	depends on SYSFS
419	depends on BLOCK
420	select RELAY
421	select DEBUG_FS
422	select TRACEPOINTS
423	select GENERIC_TRACER
424	select STACKTRACE
425	help
426	  Say Y here if you want to be able to trace the block layer actions
427	  on a given queue. Tracing allows you to see any traffic happening
428	  on a block device queue. For more information (and the userspace
429	  support tools needed), fetch the blktrace tools from:
430
431	  git://git.kernel.dk/blktrace.git
432
433	  Tracing also is possible using the ftrace interface, e.g.:
434
435	    echo 1 > /sys/block/sda/sda1/trace/enable
436	    echo blk > /sys/kernel/debug/tracing/current_tracer
437	    cat /sys/kernel/debug/tracing/trace_pipe
438
439	  If unsure, say N.
440
441config KPROBE_EVENT
442	depends on KPROBES
443	depends on HAVE_REGS_AND_STACK_ACCESS_API
444	bool "Enable kprobes-based dynamic events"
445	select TRACING
446	default y
447	help
448	  This allows the user to add tracing events (similar to tracepoints)
449	  on the fly via the ftrace interface. See
450	  Documentation/trace/kprobetrace.txt for more details.
451
452	  Those events can be inserted wherever kprobes can probe, and record
453	  various register and memory values.
454
455	  This option is also required by perf-probe subcommand of perf tools.
456	  If you want to use perf tools, this option is strongly recommended.
457
458config DYNAMIC_FTRACE
459	bool "enable/disable ftrace tracepoints dynamically"
460	depends on FUNCTION_TRACER
461	depends on HAVE_DYNAMIC_FTRACE
462	default y
463	help
464          This option will modify all the calls to ftrace dynamically
465	  (will patch them out of the binary image and replace them
466	  with a No-Op instruction) as they are called. A table is
467	  created to dynamically enable them again.
468
469	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
470	  otherwise has native performance as long as no tracing is active.
471
472	  The changes to the code are done by a kernel thread that
473	  wakes up once a second and checks to see if any ftrace calls
474	  were made. If so, it runs stop_machine (stops all CPUS)
475	  and modifies the code to jump over the call to ftrace.
476
477config FUNCTION_PROFILER
478	bool "Kernel function profiler"
479	depends on FUNCTION_TRACER
480	default n
481	help
482	  This option enables the kernel function profiler. A file is created
483	  in debugfs called function_profile_enabled which defaults to zero.
484	  When a 1 is echoed into this file profiling begins, and when a
485	  zero is entered, profiling stops. A "functions" file is created in
486	  the trace_stats directory; this file shows the list of functions that
487	  have been hit and their counters.
488
489	  If in doubt, say N.
490
491config FTRACE_MCOUNT_RECORD
492	def_bool y
493	depends on DYNAMIC_FTRACE
494	depends on HAVE_FTRACE_MCOUNT_RECORD
495
496config FTRACE_SELFTEST
497	bool
498
499config FTRACE_STARTUP_TEST
500	bool "Perform a startup test on ftrace"
501	depends on GENERIC_TRACER
502	select FTRACE_SELFTEST
503	help
504	  This option performs a series of startup tests on ftrace. On bootup
505	  a series of tests are made to verify that the tracer is
506	  functioning properly. It will do tests on all the configured
507	  tracers of ftrace.
508
509config EVENT_TRACE_TEST_SYSCALLS
510	bool "Run selftest on syscall events"
511	depends on FTRACE_STARTUP_TEST
512	help
513	 This option will also enable testing every syscall event.
514	 It only enables the event and disables it and runs various loads
515	 with the event enabled. This adds a bit more time for kernel boot
516	 up since it runs this on every system call defined.
517
518	 TBD - enable a way to actually call the syscalls as we test their
519	       events
520
521config MMIOTRACE
522	bool "Memory mapped IO tracing"
523	depends on HAVE_MMIOTRACE_SUPPORT && PCI
524	select GENERIC_TRACER
525	help
526	  Mmiotrace traces Memory Mapped I/O access and is meant for
527	  debugging and reverse engineering. It is called from the ioremap
528	  implementation and works via page faults. Tracing is disabled by
529	  default and can be enabled at run-time.
530
531	  See Documentation/trace/mmiotrace.txt.
532	  If you are not helping to develop drivers, say N.
533
534config MMIOTRACE_TEST
535	tristate "Test module for mmiotrace"
536	depends on MMIOTRACE && m
537	help
538	  This is a dumb module for testing mmiotrace. It is very dangerous
539	  as it will write garbage to IO memory starting at a given address.
540	  However, it should be safe to use on e.g. unused portion of VRAM.
541
542	  Say N, unless you absolutely know what you are doing.
543
544config RING_BUFFER_BENCHMARK
545	tristate "Ring buffer benchmark stress tester"
546	depends on RING_BUFFER
547	help
548	  This option creates a test to stress the ring buffer and benchmark it.
549	  It creates its own ring buffer such that it will not interfere with
550	  any other users of the ring buffer (such as ftrace). It then creates
551	  a producer and consumer that will run for 10 seconds and sleep for
552	  10 seconds. Each interval it will print out the number of events
553	  it recorded and give a rough estimate of how long each iteration took.
554
555	  It does not disable interrupts or raise its priority, so it may be
556	  affected by processes that are running.
557
558	  If unsure, say N.
559
560endif # FTRACE
561
562endif # TRACING_SUPPORT
563
564