xref: /linux/kernel/trace/Kconfig (revision f79e4d5f92a129a1159c973735007d4ddc8541f3)
1#
2# Architectures that offer an FUNCTION_TRACER implementation should
3#  select HAVE_FUNCTION_TRACER:
4#
5
6config USER_STACKTRACE_SUPPORT
7	bool
8
9config NOP_TRACER
10	bool
11
12config HAVE_FTRACE_NMI_ENTER
13	bool
14	help
15	  See Documentation/trace/ftrace-design.rst
16
17config HAVE_FUNCTION_TRACER
18	bool
19	help
20	  See Documentation/trace/ftrace-design.rst
21
22config HAVE_FUNCTION_GRAPH_TRACER
23	bool
24	help
25	  See Documentation/trace/ftrace-design.rst
26
27config HAVE_DYNAMIC_FTRACE
28	bool
29	help
30	  See Documentation/trace/ftrace-design.rst
31
32config HAVE_DYNAMIC_FTRACE_WITH_REGS
33	bool
34
35config HAVE_FTRACE_MCOUNT_RECORD
36	bool
37	help
38	  See Documentation/trace/ftrace-design.rst
39
40config HAVE_SYSCALL_TRACEPOINTS
41	bool
42	help
43	  See Documentation/trace/ftrace-design.rst
44
45config HAVE_FENTRY
46	bool
47	help
48	  Arch supports the gcc options -pg with -mfentry
49
50config HAVE_C_RECORDMCOUNT
51	bool
52	help
53	  C version of recordmcount available?
54
55config TRACER_MAX_TRACE
56	bool
57
58config TRACE_CLOCK
59	bool
60
61config RING_BUFFER
62	bool
63	select TRACE_CLOCK
64	select IRQ_WORK
65
66config FTRACE_NMI_ENTER
67       bool
68       depends on HAVE_FTRACE_NMI_ENTER
69       default y
70
71config EVENT_TRACING
72	select CONTEXT_SWITCH_TRACER
73        select GLOB
74	bool
75
76config CONTEXT_SWITCH_TRACER
77	bool
78
79config RING_BUFFER_ALLOW_SWAP
80	bool
81	help
82	 Allow the use of ring_buffer_swap_cpu.
83	 Adds a very slight overhead to tracing when enabled.
84
85# All tracer options should select GENERIC_TRACER. For those options that are
86# enabled by all tracers (context switch and event tracer) they select TRACING.
87# This allows those options to appear when no other tracer is selected. But the
88# options do not appear when something else selects it. We need the two options
89# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
90# hiding of the automatic options.
91
92config TRACING
93	bool
94	select DEBUG_FS
95	select RING_BUFFER
96	select STACKTRACE if STACKTRACE_SUPPORT
97	select TRACEPOINTS
98	select NOP_TRACER
99	select BINARY_PRINTF
100	select EVENT_TRACING
101	select TRACE_CLOCK
102
103config GENERIC_TRACER
104	bool
105	select TRACING
106
107#
108# Minimum requirements an architecture has to meet for us to
109# be able to offer generic tracing facilities:
110#
111config TRACING_SUPPORT
112	bool
113	depends on TRACE_IRQFLAGS_SUPPORT
114	depends on STACKTRACE_SUPPORT
115	default y
116
117if TRACING_SUPPORT
118
119menuconfig FTRACE
120	bool "Tracers"
121	default y if DEBUG_KERNEL
122	help
123	  Enable the kernel tracing infrastructure.
124
125if FTRACE
126
127config FUNCTION_TRACER
128	bool "Kernel Function Tracer"
129	depends on HAVE_FUNCTION_TRACER
130	select KALLSYMS
131	select GENERIC_TRACER
132	select CONTEXT_SWITCH_TRACER
133	select GLOB
134	select TASKS_RCU if PREEMPT
135	help
136	  Enable the kernel to trace every kernel function. This is done
137	  by using a compiler feature to insert a small, 5-byte No-Operation
138	  instruction at the beginning of every kernel function, which NOP
139	  sequence is then dynamically patched into a tracer call when
140	  tracing is enabled by the administrator. If it's runtime disabled
141	  (the bootup default), then the overhead of the instructions is very
142	  small and not measurable even in micro-benchmarks.
143
144config FUNCTION_GRAPH_TRACER
145	bool "Kernel Function Graph Tracer"
146	depends on HAVE_FUNCTION_GRAPH_TRACER
147	depends on FUNCTION_TRACER
148	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
149	default y
150	help
151	  Enable the kernel to trace a function at both its return
152	  and its entry.
153	  Its first purpose is to trace the duration of functions and
154	  draw a call graph for each thread with some information like
155	  the return value. This is done by setting the current return
156	  address on the current task structure into a stack of calls.
157
158
159config PREEMPTIRQ_EVENTS
160	bool "Enable trace events for preempt and irq disable/enable"
161	select TRACE_IRQFLAGS
162	depends on DEBUG_PREEMPT || !PROVE_LOCKING
163	depends on TRACING
164	default n
165	help
166	  Enable tracing of disable and enable events for preemption and irqs.
167	  For tracing preempt disable/enable events, DEBUG_PREEMPT must be
168	  enabled. For tracing irq disable/enable events, PROVE_LOCKING must
169	  be disabled.
170
171config IRQSOFF_TRACER
172	bool "Interrupts-off Latency Tracer"
173	default n
174	depends on TRACE_IRQFLAGS_SUPPORT
175	depends on !ARCH_USES_GETTIMEOFFSET
176	select TRACE_IRQFLAGS
177	select GENERIC_TRACER
178	select TRACER_MAX_TRACE
179	select RING_BUFFER_ALLOW_SWAP
180	select TRACER_SNAPSHOT
181	select TRACER_SNAPSHOT_PER_CPU_SWAP
182	help
183	  This option measures the time spent in irqs-off critical
184	  sections, with microsecond accuracy.
185
186	  The default measurement method is a maximum search, which is
187	  disabled by default and can be runtime (re-)started
188	  via:
189
190	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
191
192	  (Note that kernel size and overhead increase with this option
193	  enabled. This option and the preempt-off timing option can be
194	  used together or separately.)
195
196config PREEMPT_TRACER
197	bool "Preemption-off Latency Tracer"
198	default n
199	depends on !ARCH_USES_GETTIMEOFFSET
200	depends on PREEMPT
201	select GENERIC_TRACER
202	select TRACER_MAX_TRACE
203	select RING_BUFFER_ALLOW_SWAP
204	select TRACER_SNAPSHOT
205	select TRACER_SNAPSHOT_PER_CPU_SWAP
206	help
207	  This option measures the time spent in preemption-off critical
208	  sections, with microsecond accuracy.
209
210	  The default measurement method is a maximum search, which is
211	  disabled by default and can be runtime (re-)started
212	  via:
213
214	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
215
216	  (Note that kernel size and overhead increase with this option
217	  enabled. This option and the irqs-off timing option can be
218	  used together or separately.)
219
220config SCHED_TRACER
221	bool "Scheduling Latency Tracer"
222	select GENERIC_TRACER
223	select CONTEXT_SWITCH_TRACER
224	select TRACER_MAX_TRACE
225	select TRACER_SNAPSHOT
226	help
227	  This tracer tracks the latency of the highest priority task
228	  to be scheduled in, starting from the point it has woken up.
229
230config HWLAT_TRACER
231	bool "Tracer to detect hardware latencies (like SMIs)"
232	select GENERIC_TRACER
233	help
234	 This tracer, when enabled will create one or more kernel threads,
235	 depending on what the cpumask file is set to, which each thread
236	 spinning in a loop looking for interruptions caused by
237	 something other than the kernel. For example, if a
238	 System Management Interrupt (SMI) takes a noticeable amount of
239	 time, this tracer will detect it. This is useful for testing
240	 if a system is reliable for Real Time tasks.
241
242	 Some files are created in the tracing directory when this
243	 is enabled:
244
245	   hwlat_detector/width   - time in usecs for how long to spin for
246	   hwlat_detector/window  - time in usecs between the start of each
247				     iteration
248
249	 A kernel thread is created that will spin with interrupts disabled
250	 for "width" microseconds in every "window" cycle. It will not spin
251	 for "window - width" microseconds, where the system can
252	 continue to operate.
253
254	 The output will appear in the trace and trace_pipe files.
255
256	 When the tracer is not running, it has no affect on the system,
257	 but when it is running, it can cause the system to be
258	 periodically non responsive. Do not run this tracer on a
259	 production system.
260
261	 To enable this tracer, echo in "hwlat" into the current_tracer
262	 file. Every time a latency is greater than tracing_thresh, it will
263	 be recorded into the ring buffer.
264
265config ENABLE_DEFAULT_TRACERS
266	bool "Trace process context switches and events"
267	depends on !GENERIC_TRACER
268	select TRACING
269	help
270	  This tracer hooks to various trace points in the kernel,
271	  allowing the user to pick and choose which trace point they
272	  want to trace. It also includes the sched_switch tracer plugin.
273
274config FTRACE_SYSCALLS
275	bool "Trace syscalls"
276	depends on HAVE_SYSCALL_TRACEPOINTS
277	select GENERIC_TRACER
278	select KALLSYMS
279	help
280	  Basic tracer to catch the syscall entry and exit events.
281
282config TRACER_SNAPSHOT
283	bool "Create a snapshot trace buffer"
284	select TRACER_MAX_TRACE
285	help
286	  Allow tracing users to take snapshot of the current buffer using the
287	  ftrace interface, e.g.:
288
289	      echo 1 > /sys/kernel/debug/tracing/snapshot
290	      cat snapshot
291
292config TRACER_SNAPSHOT_PER_CPU_SWAP
293        bool "Allow snapshot to swap per CPU"
294	depends on TRACER_SNAPSHOT
295	select RING_BUFFER_ALLOW_SWAP
296	help
297	  Allow doing a snapshot of a single CPU buffer instead of a
298	  full swap (all buffers). If this is set, then the following is
299	  allowed:
300
301	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
302
303	  After which, only the tracing buffer for CPU 2 was swapped with
304	  the main tracing buffer, and the other CPU buffers remain the same.
305
306	  When this is enabled, this adds a little more overhead to the
307	  trace recording, as it needs to add some checks to synchronize
308	  recording with swaps. But this does not affect the performance
309	  of the overall system. This is enabled by default when the preempt
310	  or irq latency tracers are enabled, as those need to swap as well
311	  and already adds the overhead (plus a lot more).
312
313config TRACE_BRANCH_PROFILING
314	bool
315	select GENERIC_TRACER
316
317choice
318	prompt "Branch Profiling"
319	default BRANCH_PROFILE_NONE
320	help
321	 The branch profiling is a software profiler. It will add hooks
322	 into the C conditionals to test which path a branch takes.
323
324	 The likely/unlikely profiler only looks at the conditions that
325	 are annotated with a likely or unlikely macro.
326
327	 The "all branch" profiler will profile every if-statement in the
328	 kernel. This profiler will also enable the likely/unlikely
329	 profiler.
330
331	 Either of the above profilers adds a bit of overhead to the system.
332	 If unsure, choose "No branch profiling".
333
334config BRANCH_PROFILE_NONE
335	bool "No branch profiling"
336	help
337	  No branch profiling. Branch profiling adds a bit of overhead.
338	  Only enable it if you want to analyse the branching behavior.
339	  Otherwise keep it disabled.
340
341config PROFILE_ANNOTATED_BRANCHES
342	bool "Trace likely/unlikely profiler"
343	select TRACE_BRANCH_PROFILING
344	help
345	  This tracer profiles all likely and unlikely macros
346	  in the kernel. It will display the results in:
347
348	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
349
350	  Note: this will add a significant overhead; only turn this
351	  on if you need to profile the system's use of these macros.
352
353config PROFILE_ALL_BRANCHES
354	bool "Profile all if conditionals" if !FORTIFY_SOURCE
355	select TRACE_BRANCH_PROFILING
356	help
357	  This tracer profiles all branch conditions. Every if ()
358	  taken in the kernel is recorded whether it hit or miss.
359	  The results will be displayed in:
360
361	  /sys/kernel/debug/tracing/trace_stat/branch_all
362
363	  This option also enables the likely/unlikely profiler.
364
365	  This configuration, when enabled, will impose a great overhead
366	  on the system. This should only be enabled when the system
367	  is to be analyzed in much detail.
368endchoice
369
370config TRACING_BRANCHES
371	bool
372	help
373	  Selected by tracers that will trace the likely and unlikely
374	  conditions. This prevents the tracers themselves from being
375	  profiled. Profiling the tracing infrastructure can only happen
376	  when the likelys and unlikelys are not being traced.
377
378config BRANCH_TRACER
379	bool "Trace likely/unlikely instances"
380	depends on TRACE_BRANCH_PROFILING
381	select TRACING_BRANCHES
382	help
383	  This traces the events of likely and unlikely condition
384	  calls in the kernel.  The difference between this and the
385	  "Trace likely/unlikely profiler" is that this is not a
386	  histogram of the callers, but actually places the calling
387	  events into a running trace buffer to see when and where the
388	  events happened, as well as their results.
389
390	  Say N if unsure.
391
392config STACK_TRACER
393	bool "Trace max stack"
394	depends on HAVE_FUNCTION_TRACER
395	select FUNCTION_TRACER
396	select STACKTRACE
397	select KALLSYMS
398	help
399	  This special tracer records the maximum stack footprint of the
400	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
401
402	  This tracer works by hooking into every function call that the
403	  kernel executes, and keeping a maximum stack depth value and
404	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
405	  then it will not have any overhead while the stack tracer
406	  is disabled.
407
408	  To enable the stack tracer on bootup, pass in 'stacktrace'
409	  on the kernel command line.
410
411	  The stack tracer can also be enabled or disabled via the
412	  sysctl kernel.stack_tracer_enabled
413
414	  Say N if unsure.
415
416config BLK_DEV_IO_TRACE
417	bool "Support for tracing block IO actions"
418	depends on SYSFS
419	depends on BLOCK
420	select RELAY
421	select DEBUG_FS
422	select TRACEPOINTS
423	select GENERIC_TRACER
424	select STACKTRACE
425	help
426	  Say Y here if you want to be able to trace the block layer actions
427	  on a given queue. Tracing allows you to see any traffic happening
428	  on a block device queue. For more information (and the userspace
429	  support tools needed), fetch the blktrace tools from:
430
431	  git://git.kernel.dk/blktrace.git
432
433	  Tracing also is possible using the ftrace interface, e.g.:
434
435	    echo 1 > /sys/block/sda/sda1/trace/enable
436	    echo blk > /sys/kernel/debug/tracing/current_tracer
437	    cat /sys/kernel/debug/tracing/trace_pipe
438
439	  If unsure, say N.
440
441config KPROBE_EVENTS
442	depends on KPROBES
443	depends on HAVE_REGS_AND_STACK_ACCESS_API
444	bool "Enable kprobes-based dynamic events"
445	select TRACING
446	select PROBE_EVENTS
447	default y
448	help
449	  This allows the user to add tracing events (similar to tracepoints)
450	  on the fly via the ftrace interface. See
451	  Documentation/trace/kprobetrace.rst for more details.
452
453	  Those events can be inserted wherever kprobes can probe, and record
454	  various register and memory values.
455
456	  This option is also required by perf-probe subcommand of perf tools.
457	  If you want to use perf tools, this option is strongly recommended.
458
459config UPROBE_EVENTS
460	bool "Enable uprobes-based dynamic events"
461	depends on ARCH_SUPPORTS_UPROBES
462	depends on MMU
463	depends on PERF_EVENTS
464	select UPROBES
465	select PROBE_EVENTS
466	select TRACING
467	default y
468	help
469	  This allows the user to add tracing events on top of userspace
470	  dynamic events (similar to tracepoints) on the fly via the trace
471	  events interface. Those events can be inserted wherever uprobes
472	  can probe, and record various registers.
473	  This option is required if you plan to use perf-probe subcommand
474	  of perf tools on user space applications.
475
476config BPF_EVENTS
477	depends on BPF_SYSCALL
478	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
479	bool
480	default y
481	help
482	  This allows the user to attach BPF programs to kprobe events.
483
484config PROBE_EVENTS
485	def_bool n
486
487config DYNAMIC_FTRACE
488	bool "enable/disable function tracing dynamically"
489	depends on FUNCTION_TRACER
490	depends on HAVE_DYNAMIC_FTRACE
491	default y
492	help
493	  This option will modify all the calls to function tracing
494	  dynamically (will patch them out of the binary image and
495	  replace them with a No-Op instruction) on boot up. During
496	  compile time, a table is made of all the locations that ftrace
497	  can function trace, and this table is linked into the kernel
498	  image. When this is enabled, functions can be individually
499	  enabled, and the functions not enabled will not affect
500	  performance of the system.
501
502	  See the files in /sys/kernel/debug/tracing:
503	    available_filter_functions
504	    set_ftrace_filter
505	    set_ftrace_notrace
506
507	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
508	  otherwise has native performance as long as no tracing is active.
509
510config DYNAMIC_FTRACE_WITH_REGS
511	def_bool y
512	depends on DYNAMIC_FTRACE
513	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
514
515config FUNCTION_PROFILER
516	bool "Kernel function profiler"
517	depends on FUNCTION_TRACER
518	default n
519	help
520	  This option enables the kernel function profiler. A file is created
521	  in debugfs called function_profile_enabled which defaults to zero.
522	  When a 1 is echoed into this file profiling begins, and when a
523	  zero is entered, profiling stops. A "functions" file is created in
524	  the trace_stats directory; this file shows the list of functions that
525	  have been hit and their counters.
526
527	  If in doubt, say N.
528
529config BPF_KPROBE_OVERRIDE
530	bool "Enable BPF programs to override a kprobed function"
531	depends on BPF_EVENTS
532	depends on FUNCTION_ERROR_INJECTION
533	default n
534	help
535	 Allows BPF to override the execution of a probed function and
536	 set a different return value.  This is used for error injection.
537
538config FTRACE_MCOUNT_RECORD
539	def_bool y
540	depends on DYNAMIC_FTRACE
541	depends on HAVE_FTRACE_MCOUNT_RECORD
542
543config FTRACE_SELFTEST
544	bool
545
546config FTRACE_STARTUP_TEST
547	bool "Perform a startup test on ftrace"
548	depends on GENERIC_TRACER
549	select FTRACE_SELFTEST
550	help
551	  This option performs a series of startup tests on ftrace. On bootup
552	  a series of tests are made to verify that the tracer is
553	  functioning properly. It will do tests on all the configured
554	  tracers of ftrace.
555
556config EVENT_TRACE_TEST_SYSCALLS
557	bool "Run selftest on syscall events"
558	depends on FTRACE_STARTUP_TEST
559	help
560	 This option will also enable testing every syscall event.
561	 It only enables the event and disables it and runs various loads
562	 with the event enabled. This adds a bit more time for kernel boot
563	 up since it runs this on every system call defined.
564
565	 TBD - enable a way to actually call the syscalls as we test their
566	       events
567
568config MMIOTRACE
569	bool "Memory mapped IO tracing"
570	depends on HAVE_MMIOTRACE_SUPPORT && PCI
571	select GENERIC_TRACER
572	help
573	  Mmiotrace traces Memory Mapped I/O access and is meant for
574	  debugging and reverse engineering. It is called from the ioremap
575	  implementation and works via page faults. Tracing is disabled by
576	  default and can be enabled at run-time.
577
578	  See Documentation/trace/mmiotrace.rst.
579	  If you are not helping to develop drivers, say N.
580
581config TRACING_MAP
582	bool
583	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
584	help
585	  tracing_map is a special-purpose lock-free map for tracing,
586	  separated out as a stand-alone facility in order to allow it
587	  to be shared between multiple tracers.  It isn't meant to be
588	  generally used outside of that context, and is normally
589	  selected by tracers that use it.
590
591config HIST_TRIGGERS
592	bool "Histogram triggers"
593	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
594	select TRACING_MAP
595	select TRACING
596	default n
597	help
598	  Hist triggers allow one or more arbitrary trace event fields
599	  to be aggregated into hash tables and dumped to stdout by
600	  reading a debugfs/tracefs file.  They're useful for
601	  gathering quick and dirty (though precise) summaries of
602	  event activity as an initial guide for further investigation
603	  using more advanced tools.
604
605	  Inter-event tracing of quantities such as latencies is also
606	  supported using hist triggers under this option.
607
608	  See Documentation/trace/histogram.txt.
609	  If in doubt, say N.
610
611config MMIOTRACE_TEST
612	tristate "Test module for mmiotrace"
613	depends on MMIOTRACE && m
614	help
615	  This is a dumb module for testing mmiotrace. It is very dangerous
616	  as it will write garbage to IO memory starting at a given address.
617	  However, it should be safe to use on e.g. unused portion of VRAM.
618
619	  Say N, unless you absolutely know what you are doing.
620
621config TRACEPOINT_BENCHMARK
622        bool "Add tracepoint that benchmarks tracepoints"
623	help
624	 This option creates the tracepoint "benchmark:benchmark_event".
625	 When the tracepoint is enabled, it kicks off a kernel thread that
626	 goes into an infinite loop (calling cond_sched() to let other tasks
627	 run), and calls the tracepoint. Each iteration will record the time
628	 it took to write to the tracepoint and the next iteration that
629	 data will be passed to the tracepoint itself. That is, the tracepoint
630	 will report the time it took to do the previous tracepoint.
631	 The string written to the tracepoint is a static string of 128 bytes
632	 to keep the time the same. The initial string is simply a write of
633	 "START". The second string records the cold cache time of the first
634	 write which is not added to the rest of the calculations.
635
636	 As it is a tight loop, it benchmarks as hot cache. That's fine because
637	 we care most about hot paths that are probably in cache already.
638
639	 An example of the output:
640
641	      START
642	      first=3672 [COLD CACHED]
643	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
644	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
645	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
646	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
647	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
648	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
649
650
651config RING_BUFFER_BENCHMARK
652	tristate "Ring buffer benchmark stress tester"
653	depends on RING_BUFFER
654	help
655	  This option creates a test to stress the ring buffer and benchmark it.
656	  It creates its own ring buffer such that it will not interfere with
657	  any other users of the ring buffer (such as ftrace). It then creates
658	  a producer and consumer that will run for 10 seconds and sleep for
659	  10 seconds. Each interval it will print out the number of events
660	  it recorded and give a rough estimate of how long each iteration took.
661
662	  It does not disable interrupts or raise its priority, so it may be
663	  affected by processes that are running.
664
665	  If unsure, say N.
666
667config RING_BUFFER_STARTUP_TEST
668       bool "Ring buffer startup self test"
669       depends on RING_BUFFER
670       help
671         Run a simple self test on the ring buffer on boot up. Late in the
672	 kernel boot sequence, the test will start that kicks off
673	 a thread per cpu. Each thread will write various size events
674	 into the ring buffer. Another thread is created to send IPIs
675	 to each of the threads, where the IPI handler will also write
676	 to the ring buffer, to test/stress the nesting ability.
677	 If any anomalies are discovered, a warning will be displayed
678	 and all ring buffers will be disabled.
679
680	 The test runs for 10 seconds. This will slow your boot time
681	 by at least 10 more seconds.
682
683	 At the end of the test, statics and more checks are done.
684	 It will output the stats of each per cpu buffer. What
685	 was written, the sizes, what was read, what was lost, and
686	 other similar details.
687
688	 If unsure, say N
689
690config TRACE_EVAL_MAP_FILE
691       bool "Show eval mappings for trace events"
692       depends on TRACING
693       help
694	The "print fmt" of the trace events will show the enum/sizeof names
695	instead	of their values. This can cause problems for user space tools
696	that use this string to parse the raw data as user space does not know
697	how to convert the string to its value.
698
699	To fix this, there's a special macro in the kernel that can be used
700	to convert an enum/sizeof into its value. If this macro is used, then
701	the print fmt strings will be converted to their values.
702
703	If something does not get converted properly, this option can be
704	used to show what enums/sizeof the kernel tried to convert.
705
706	This option is for debugging the conversions. A file is created
707	in the tracing directory called "eval_map" that will show the
708	names matched with their values and what trace event system they
709	belong too.
710
711	Normally, the mapping of the strings to values will be freed after
712	boot up or module load. With this option, they will not be freed, as
713	they are needed for the "eval_map" file. Enabling this option will
714	increase the memory footprint of the running kernel.
715
716	If unsure, say N
717
718config TRACING_EVENTS_GPIO
719	bool "Trace gpio events"
720	depends on GPIOLIB
721	default y
722	help
723	  Enable tracing events for gpio subsystem
724
725endif # FTRACE
726
727endif # TRACING_SUPPORT
728
729