xref: /linux/kernel/trace/trace.h (revision fc4fa6e112c0f999fab022a4eb7f6614bb47c7ab)
1 
2 #ifndef _LINUX_KERNEL_TRACE_H
3 #define _LINUX_KERNEL_TRACE_H
4 
5 #include <linux/fs.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/clocksource.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/tracepoint.h>
12 #include <linux/ftrace.h>
13 #include <linux/hw_breakpoint.h>
14 #include <linux/trace_seq.h>
15 #include <linux/trace_events.h>
16 #include <linux/compiler.h>
17 #include <linux/trace_seq.h>
18 
19 #ifdef CONFIG_FTRACE_SYSCALLS
20 #include <asm/unistd.h>		/* For NR_SYSCALLS	     */
21 #include <asm/syscall.h>	/* some archs define it here */
22 #endif
23 
24 enum trace_type {
25 	__TRACE_FIRST_TYPE = 0,
26 
27 	TRACE_FN,
28 	TRACE_CTX,
29 	TRACE_WAKE,
30 	TRACE_STACK,
31 	TRACE_PRINT,
32 	TRACE_BPRINT,
33 	TRACE_MMIO_RW,
34 	TRACE_MMIO_MAP,
35 	TRACE_BRANCH,
36 	TRACE_GRAPH_RET,
37 	TRACE_GRAPH_ENT,
38 	TRACE_USER_STACK,
39 	TRACE_BLK,
40 	TRACE_BPUTS,
41 
42 	__TRACE_LAST_TYPE,
43 };
44 
45 
46 #undef __field
47 #define __field(type, item)		type	item;
48 
49 #undef __field_struct
50 #define __field_struct(type, item)	__field(type, item)
51 
52 #undef __field_desc
53 #define __field_desc(type, container, item)
54 
55 #undef __array
56 #define __array(type, item, size)	type	item[size];
57 
58 #undef __array_desc
59 #define __array_desc(type, container, item, size)
60 
61 #undef __dynamic_array
62 #define __dynamic_array(type, item)	type	item[];
63 
64 #undef F_STRUCT
65 #define F_STRUCT(args...)		args
66 
67 #undef FTRACE_ENTRY
68 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)	\
69 	struct struct_name {						\
70 		struct trace_entry	ent;				\
71 		tstruct							\
72 	}
73 
74 #undef FTRACE_ENTRY_DUP
75 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
76 
77 #undef FTRACE_ENTRY_REG
78 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	\
79 			 filter, regfn) \
80 	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
81 		     filter)
82 
83 #include "trace_entries.h"
84 
85 /*
86  * syscalls are special, and need special handling, this is why
87  * they are not included in trace_entries.h
88  */
89 struct syscall_trace_enter {
90 	struct trace_entry	ent;
91 	int			nr;
92 	unsigned long		args[];
93 };
94 
95 struct syscall_trace_exit {
96 	struct trace_entry	ent;
97 	int			nr;
98 	long			ret;
99 };
100 
101 struct kprobe_trace_entry_head {
102 	struct trace_entry	ent;
103 	unsigned long		ip;
104 };
105 
106 struct kretprobe_trace_entry_head {
107 	struct trace_entry	ent;
108 	unsigned long		func;
109 	unsigned long		ret_ip;
110 };
111 
112 /*
113  * trace_flag_type is an enumeration that holds different
114  * states when a trace occurs. These are:
115  *  IRQS_OFF		- interrupts were disabled
116  *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
117  *  NEED_RESCHED	- reschedule is requested
118  *  HARDIRQ		- inside an interrupt handler
119  *  SOFTIRQ		- inside a softirq handler
120  */
121 enum trace_flag_type {
122 	TRACE_FLAG_IRQS_OFF		= 0x01,
123 	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
124 	TRACE_FLAG_NEED_RESCHED		= 0x04,
125 	TRACE_FLAG_HARDIRQ		= 0x08,
126 	TRACE_FLAG_SOFTIRQ		= 0x10,
127 	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
128 };
129 
130 #define TRACE_BUF_SIZE		1024
131 
132 struct trace_array;
133 
134 /*
135  * The CPU trace array - it consists of thousands of trace entries
136  * plus some other descriptor data: (for example which task started
137  * the trace, etc.)
138  */
139 struct trace_array_cpu {
140 	atomic_t		disabled;
141 	void			*buffer_page;	/* ring buffer spare */
142 
143 	unsigned long		entries;
144 	unsigned long		saved_latency;
145 	unsigned long		critical_start;
146 	unsigned long		critical_end;
147 	unsigned long		critical_sequence;
148 	unsigned long		nice;
149 	unsigned long		policy;
150 	unsigned long		rt_priority;
151 	unsigned long		skipped_entries;
152 	cycle_t			preempt_timestamp;
153 	pid_t			pid;
154 	kuid_t			uid;
155 	char			comm[TASK_COMM_LEN];
156 
157 	bool			ignore_pid;
158 };
159 
160 struct tracer;
161 struct trace_option_dentry;
162 
163 struct trace_buffer {
164 	struct trace_array		*tr;
165 	struct ring_buffer		*buffer;
166 	struct trace_array_cpu __percpu	*data;
167 	cycle_t				time_start;
168 	int				cpu;
169 };
170 
171 #define TRACE_FLAGS_MAX_SIZE		32
172 
173 struct trace_options {
174 	struct tracer			*tracer;
175 	struct trace_option_dentry	*topts;
176 };
177 
178 struct trace_pid_list {
179 	unsigned int			nr_pids;
180 	int				order;
181 	pid_t				*pids;
182 };
183 
184 /*
185  * The trace array - an array of per-CPU trace arrays. This is the
186  * highest level data structure that individual tracers deal with.
187  * They have on/off state as well:
188  */
189 struct trace_array {
190 	struct list_head	list;
191 	char			*name;
192 	struct trace_buffer	trace_buffer;
193 #ifdef CONFIG_TRACER_MAX_TRACE
194 	/*
195 	 * The max_buffer is used to snapshot the trace when a maximum
196 	 * latency is reached, or when the user initiates a snapshot.
197 	 * Some tracers will use this to store a maximum trace while
198 	 * it continues examining live traces.
199 	 *
200 	 * The buffers for the max_buffer are set up the same as the trace_buffer
201 	 * When a snapshot is taken, the buffer of the max_buffer is swapped
202 	 * with the buffer of the trace_buffer and the buffers are reset for
203 	 * the trace_buffer so the tracing can continue.
204 	 */
205 	struct trace_buffer	max_buffer;
206 	bool			allocated_snapshot;
207 	unsigned long		max_latency;
208 #endif
209 	struct trace_pid_list	__rcu *filtered_pids;
210 	/*
211 	 * max_lock is used to protect the swapping of buffers
212 	 * when taking a max snapshot. The buffers themselves are
213 	 * protected by per_cpu spinlocks. But the action of the swap
214 	 * needs its own lock.
215 	 *
216 	 * This is defined as a arch_spinlock_t in order to help
217 	 * with performance when lockdep debugging is enabled.
218 	 *
219 	 * It is also used in other places outside the update_max_tr
220 	 * so it needs to be defined outside of the
221 	 * CONFIG_TRACER_MAX_TRACE.
222 	 */
223 	arch_spinlock_t		max_lock;
224 	int			buffer_disabled;
225 #ifdef CONFIG_FTRACE_SYSCALLS
226 	int			sys_refcount_enter;
227 	int			sys_refcount_exit;
228 	struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
229 	struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
230 #endif
231 	int			stop_count;
232 	int			clock_id;
233 	int			nr_topts;
234 	struct tracer		*current_trace;
235 	unsigned int		trace_flags;
236 	unsigned char		trace_flags_index[TRACE_FLAGS_MAX_SIZE];
237 	unsigned int		flags;
238 	raw_spinlock_t		start_lock;
239 	struct dentry		*dir;
240 	struct dentry		*options;
241 	struct dentry		*percpu_dir;
242 	struct dentry		*event_dir;
243 	struct trace_options	*topts;
244 	struct list_head	systems;
245 	struct list_head	events;
246 	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
247 	int			ref;
248 #ifdef CONFIG_FUNCTION_TRACER
249 	struct ftrace_ops	*ops;
250 	/* function tracing enabled */
251 	int			function_enabled;
252 #endif
253 };
254 
255 enum {
256 	TRACE_ARRAY_FL_GLOBAL	= (1 << 0)
257 };
258 
259 extern struct list_head ftrace_trace_arrays;
260 
261 extern struct mutex trace_types_lock;
262 
263 extern int trace_array_get(struct trace_array *tr);
264 extern void trace_array_put(struct trace_array *tr);
265 
266 /*
267  * The global tracer (top) should be the first trace array added,
268  * but we check the flag anyway.
269  */
270 static inline struct trace_array *top_trace_array(void)
271 {
272 	struct trace_array *tr;
273 
274 	if (list_empty(&ftrace_trace_arrays))
275 		return NULL;
276 
277 	tr = list_entry(ftrace_trace_arrays.prev,
278 			typeof(*tr), list);
279 	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
280 	return tr;
281 }
282 
283 #define FTRACE_CMP_TYPE(var, type) \
284 	__builtin_types_compatible_p(typeof(var), type *)
285 
286 #undef IF_ASSIGN
287 #define IF_ASSIGN(var, entry, etype, id)		\
288 	if (FTRACE_CMP_TYPE(var, etype)) {		\
289 		var = (typeof(var))(entry);		\
290 		WARN_ON(id && (entry)->type != id);	\
291 		break;					\
292 	}
293 
294 /* Will cause compile errors if type is not found. */
295 extern void __ftrace_bad_type(void);
296 
297 /*
298  * The trace_assign_type is a verifier that the entry type is
299  * the same as the type being assigned. To add new types simply
300  * add a line with the following format:
301  *
302  * IF_ASSIGN(var, ent, type, id);
303  *
304  *  Where "type" is the trace type that includes the trace_entry
305  *  as the "ent" item. And "id" is the trace identifier that is
306  *  used in the trace_type enum.
307  *
308  *  If the type can have more than one id, then use zero.
309  */
310 #define trace_assign_type(var, ent)					\
311 	do {								\
312 		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
313 		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
314 		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
315 		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
316 		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
317 		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
318 		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
319 		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
320 			  TRACE_MMIO_RW);				\
321 		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
322 			  TRACE_MMIO_MAP);				\
323 		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
324 		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
325 			  TRACE_GRAPH_ENT);		\
326 		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
327 			  TRACE_GRAPH_RET);		\
328 		__ftrace_bad_type();					\
329 	} while (0)
330 
331 /*
332  * An option specific to a tracer. This is a boolean value.
333  * The bit is the bit index that sets its value on the
334  * flags value in struct tracer_flags.
335  */
336 struct tracer_opt {
337 	const char	*name; /* Will appear on the trace_options file */
338 	u32		bit; /* Mask assigned in val field in tracer_flags */
339 };
340 
341 /*
342  * The set of specific options for a tracer. Your tracer
343  * have to set the initial value of the flags val.
344  */
345 struct tracer_flags {
346 	u32			val;
347 	struct tracer_opt	*opts;
348 };
349 
350 /* Makes more easy to define a tracer opt */
351 #define TRACER_OPT(s, b)	.name = #s, .bit = b
352 
353 
354 struct trace_option_dentry {
355 	struct tracer_opt		*opt;
356 	struct tracer_flags		*flags;
357 	struct trace_array		*tr;
358 	struct dentry			*entry;
359 };
360 
361 /**
362  * struct tracer - a specific tracer and its callbacks to interact with tracefs
363  * @name: the name chosen to select it on the available_tracers file
364  * @init: called when one switches to this tracer (echo name > current_tracer)
365  * @reset: called when one switches to another tracer
366  * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
367  * @stop: called when tracing is paused (echo 0 > tracing_enabled)
368  * @update_thresh: called when tracing_thresh is updated
369  * @open: called when the trace file is opened
370  * @pipe_open: called when the trace_pipe file is opened
371  * @close: called when the trace file is released
372  * @pipe_close: called when the trace_pipe file is released
373  * @read: override the default read callback on trace_pipe
374  * @splice_read: override the default splice_read callback on trace_pipe
375  * @selftest: selftest to run on boot (see trace_selftest.c)
376  * @print_headers: override the first lines that describe your columns
377  * @print_line: callback that prints a trace
378  * @set_flag: signals one of your private flags changed (trace_options file)
379  * @flags: your private flags
380  */
381 struct tracer {
382 	const char		*name;
383 	int			(*init)(struct trace_array *tr);
384 	void			(*reset)(struct trace_array *tr);
385 	void			(*start)(struct trace_array *tr);
386 	void			(*stop)(struct trace_array *tr);
387 	int			(*update_thresh)(struct trace_array *tr);
388 	void			(*open)(struct trace_iterator *iter);
389 	void			(*pipe_open)(struct trace_iterator *iter);
390 	void			(*close)(struct trace_iterator *iter);
391 	void			(*pipe_close)(struct trace_iterator *iter);
392 	ssize_t			(*read)(struct trace_iterator *iter,
393 					struct file *filp, char __user *ubuf,
394 					size_t cnt, loff_t *ppos);
395 	ssize_t			(*splice_read)(struct trace_iterator *iter,
396 					       struct file *filp,
397 					       loff_t *ppos,
398 					       struct pipe_inode_info *pipe,
399 					       size_t len,
400 					       unsigned int flags);
401 #ifdef CONFIG_FTRACE_STARTUP_TEST
402 	int			(*selftest)(struct tracer *trace,
403 					    struct trace_array *tr);
404 #endif
405 	void			(*print_header)(struct seq_file *m);
406 	enum print_line_t	(*print_line)(struct trace_iterator *iter);
407 	/* If you handled the flag setting, return 0 */
408 	int			(*set_flag)(struct trace_array *tr,
409 					    u32 old_flags, u32 bit, int set);
410 	/* Return 0 if OK with change, else return non-zero */
411 	int			(*flag_changed)(struct trace_array *tr,
412 						u32 mask, int set);
413 	struct tracer		*next;
414 	struct tracer_flags	*flags;
415 	int			enabled;
416 	int			ref;
417 	bool			print_max;
418 	bool			allow_instances;
419 #ifdef CONFIG_TRACER_MAX_TRACE
420 	bool			use_max_tr;
421 #endif
422 };
423 
424 
425 /* Only current can touch trace_recursion */
426 
427 /*
428  * For function tracing recursion:
429  *  The order of these bits are important.
430  *
431  *  When function tracing occurs, the following steps are made:
432  *   If arch does not support a ftrace feature:
433  *    call internal function (uses INTERNAL bits) which calls...
434  *   If callback is registered to the "global" list, the list
435  *    function is called and recursion checks the GLOBAL bits.
436  *    then this function calls...
437  *   The function callback, which can use the FTRACE bits to
438  *    check for recursion.
439  *
440  * Now if the arch does not suppport a feature, and it calls
441  * the global list function which calls the ftrace callback
442  * all three of these steps will do a recursion protection.
443  * There's no reason to do one if the previous caller already
444  * did. The recursion that we are protecting against will
445  * go through the same steps again.
446  *
447  * To prevent the multiple recursion checks, if a recursion
448  * bit is set that is higher than the MAX bit of the current
449  * check, then we know that the check was made by the previous
450  * caller, and we can skip the current check.
451  */
452 enum {
453 	TRACE_BUFFER_BIT,
454 	TRACE_BUFFER_NMI_BIT,
455 	TRACE_BUFFER_IRQ_BIT,
456 	TRACE_BUFFER_SIRQ_BIT,
457 
458 	/* Start of function recursion bits */
459 	TRACE_FTRACE_BIT,
460 	TRACE_FTRACE_NMI_BIT,
461 	TRACE_FTRACE_IRQ_BIT,
462 	TRACE_FTRACE_SIRQ_BIT,
463 
464 	/* INTERNAL_BITs must be greater than FTRACE_BITs */
465 	TRACE_INTERNAL_BIT,
466 	TRACE_INTERNAL_NMI_BIT,
467 	TRACE_INTERNAL_IRQ_BIT,
468 	TRACE_INTERNAL_SIRQ_BIT,
469 
470 	TRACE_CONTROL_BIT,
471 
472 	TRACE_BRANCH_BIT,
473 /*
474  * Abuse of the trace_recursion.
475  * As we need a way to maintain state if we are tracing the function
476  * graph in irq because we want to trace a particular function that
477  * was called in irq context but we have irq tracing off. Since this
478  * can only be modified by current, we can reuse trace_recursion.
479  */
480 	TRACE_IRQ_BIT,
481 };
482 
483 #define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
484 #define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
485 #define trace_recursion_test(bit)	((current)->trace_recursion & (1<<(bit)))
486 
487 #define TRACE_CONTEXT_BITS	4
488 
489 #define TRACE_FTRACE_START	TRACE_FTRACE_BIT
490 #define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
491 
492 #define TRACE_LIST_START	TRACE_INTERNAL_BIT
493 #define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
494 
495 #define TRACE_CONTEXT_MASK	TRACE_LIST_MAX
496 
497 static __always_inline int trace_get_context_bit(void)
498 {
499 	int bit;
500 
501 	if (in_interrupt()) {
502 		if (in_nmi())
503 			bit = 0;
504 
505 		else if (in_irq())
506 			bit = 1;
507 		else
508 			bit = 2;
509 	} else
510 		bit = 3;
511 
512 	return bit;
513 }
514 
515 static __always_inline int trace_test_and_set_recursion(int start, int max)
516 {
517 	unsigned int val = current->trace_recursion;
518 	int bit;
519 
520 	/* A previous recursion check was made */
521 	if ((val & TRACE_CONTEXT_MASK) > max)
522 		return 0;
523 
524 	bit = trace_get_context_bit() + start;
525 	if (unlikely(val & (1 << bit)))
526 		return -1;
527 
528 	val |= 1 << bit;
529 	current->trace_recursion = val;
530 	barrier();
531 
532 	return bit;
533 }
534 
535 static __always_inline void trace_clear_recursion(int bit)
536 {
537 	unsigned int val = current->trace_recursion;
538 
539 	if (!bit)
540 		return;
541 
542 	bit = 1 << bit;
543 	val &= ~bit;
544 
545 	barrier();
546 	current->trace_recursion = val;
547 }
548 
549 static inline struct ring_buffer_iter *
550 trace_buffer_iter(struct trace_iterator *iter, int cpu)
551 {
552 	if (iter->buffer_iter && iter->buffer_iter[cpu])
553 		return iter->buffer_iter[cpu];
554 	return NULL;
555 }
556 
557 int tracer_init(struct tracer *t, struct trace_array *tr);
558 int tracing_is_enabled(void);
559 void tracing_reset(struct trace_buffer *buf, int cpu);
560 void tracing_reset_online_cpus(struct trace_buffer *buf);
561 void tracing_reset_current(int cpu);
562 void tracing_reset_all_online_cpus(void);
563 int tracing_open_generic(struct inode *inode, struct file *filp);
564 bool tracing_is_disabled(void);
565 struct dentry *trace_create_file(const char *name,
566 				 umode_t mode,
567 				 struct dentry *parent,
568 				 void *data,
569 				 const struct file_operations *fops);
570 
571 struct dentry *tracing_init_dentry(void);
572 
573 struct ring_buffer_event;
574 
575 struct ring_buffer_event *
576 trace_buffer_lock_reserve(struct ring_buffer *buffer,
577 			  int type,
578 			  unsigned long len,
579 			  unsigned long flags,
580 			  int pc);
581 
582 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
583 						struct trace_array_cpu *data);
584 
585 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
586 					  int *ent_cpu, u64 *ent_ts);
587 
588 void __buffer_unlock_commit(struct ring_buffer *buffer,
589 			    struct ring_buffer_event *event);
590 
591 int trace_empty(struct trace_iterator *iter);
592 
593 void *trace_find_next_entry_inc(struct trace_iterator *iter);
594 
595 void trace_init_global_iter(struct trace_iterator *iter);
596 
597 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
598 
599 void trace_function(struct trace_array *tr,
600 		    unsigned long ip,
601 		    unsigned long parent_ip,
602 		    unsigned long flags, int pc);
603 void trace_graph_function(struct trace_array *tr,
604 		    unsigned long ip,
605 		    unsigned long parent_ip,
606 		    unsigned long flags, int pc);
607 void trace_latency_header(struct seq_file *m);
608 void trace_default_header(struct seq_file *m);
609 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
610 int trace_empty(struct trace_iterator *iter);
611 
612 void trace_graph_return(struct ftrace_graph_ret *trace);
613 int trace_graph_entry(struct ftrace_graph_ent *trace);
614 void set_graph_array(struct trace_array *tr);
615 
616 void tracing_start_cmdline_record(void);
617 void tracing_stop_cmdline_record(void);
618 int register_tracer(struct tracer *type);
619 int is_tracing_stopped(void);
620 
621 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
622 
623 extern cpumask_var_t __read_mostly tracing_buffer_mask;
624 
625 #define for_each_tracing_cpu(cpu)	\
626 	for_each_cpu(cpu, tracing_buffer_mask)
627 
628 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
629 
630 extern unsigned long tracing_thresh;
631 
632 #ifdef CONFIG_TRACER_MAX_TRACE
633 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
634 void update_max_tr_single(struct trace_array *tr,
635 			  struct task_struct *tsk, int cpu);
636 #endif /* CONFIG_TRACER_MAX_TRACE */
637 
638 #ifdef CONFIG_STACKTRACE
639 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
640 			    int pc);
641 
642 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
643 		   int pc);
644 #else
645 static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
646 					  unsigned long flags, int pc)
647 {
648 }
649 
650 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
651 				 int skip, int pc)
652 {
653 }
654 #endif /* CONFIG_STACKTRACE */
655 
656 extern cycle_t ftrace_now(int cpu);
657 
658 extern void trace_find_cmdline(int pid, char comm[]);
659 
660 #ifdef CONFIG_DYNAMIC_FTRACE
661 extern unsigned long ftrace_update_tot_cnt;
662 #endif
663 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
664 extern int DYN_FTRACE_TEST_NAME(void);
665 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
666 extern int DYN_FTRACE_TEST_NAME2(void);
667 
668 extern bool ring_buffer_expanded;
669 extern bool tracing_selftest_disabled;
670 DECLARE_PER_CPU(int, ftrace_cpu_disabled);
671 
672 #ifdef CONFIG_FTRACE_STARTUP_TEST
673 extern int trace_selftest_startup_function(struct tracer *trace,
674 					   struct trace_array *tr);
675 extern int trace_selftest_startup_function_graph(struct tracer *trace,
676 						 struct trace_array *tr);
677 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
678 					  struct trace_array *tr);
679 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
680 					     struct trace_array *tr);
681 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
682 						 struct trace_array *tr);
683 extern int trace_selftest_startup_wakeup(struct tracer *trace,
684 					 struct trace_array *tr);
685 extern int trace_selftest_startup_nop(struct tracer *trace,
686 					 struct trace_array *tr);
687 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
688 					       struct trace_array *tr);
689 extern int trace_selftest_startup_branch(struct tracer *trace,
690 					 struct trace_array *tr);
691 /*
692  * Tracer data references selftest functions that only occur
693  * on boot up. These can be __init functions. Thus, when selftests
694  * are enabled, then the tracers need to reference __init functions.
695  */
696 #define __tracer_data		__refdata
697 #else
698 /* Tracers are seldom changed. Optimize when selftests are disabled. */
699 #define __tracer_data		__read_mostly
700 #endif /* CONFIG_FTRACE_STARTUP_TEST */
701 
702 extern void *head_page(struct trace_array_cpu *data);
703 extern unsigned long long ns2usecs(cycle_t nsec);
704 extern int
705 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
706 extern int
707 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
708 extern int
709 trace_array_vprintk(struct trace_array *tr,
710 		    unsigned long ip, const char *fmt, va_list args);
711 int trace_array_printk(struct trace_array *tr,
712 		       unsigned long ip, const char *fmt, ...);
713 int trace_array_printk_buf(struct ring_buffer *buffer,
714 			   unsigned long ip, const char *fmt, ...);
715 void trace_printk_seq(struct trace_seq *s);
716 enum print_line_t print_trace_line(struct trace_iterator *iter);
717 
718 extern char trace_find_mark(unsigned long long duration);
719 
720 /* Standard output formatting function used for function return traces */
721 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
722 
723 /* Flag options */
724 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
725 #define TRACE_GRAPH_PRINT_CPU           0x2
726 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
727 #define TRACE_GRAPH_PRINT_PROC          0x8
728 #define TRACE_GRAPH_PRINT_DURATION      0x10
729 #define TRACE_GRAPH_PRINT_ABS_TIME      0x20
730 #define TRACE_GRAPH_PRINT_IRQS          0x40
731 #define TRACE_GRAPH_PRINT_TAIL          0x80
732 #define TRACE_GRAPH_SLEEP_TIME		0x100
733 #define TRACE_GRAPH_GRAPH_TIME		0x200
734 #define TRACE_GRAPH_PRINT_FILL_SHIFT	28
735 #define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
736 
737 extern void ftrace_graph_sleep_time_control(bool enable);
738 extern void ftrace_graph_graph_time_control(bool enable);
739 
740 extern enum print_line_t
741 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
742 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
743 extern void
744 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
745 extern void graph_trace_open(struct trace_iterator *iter);
746 extern void graph_trace_close(struct trace_iterator *iter);
747 extern int __trace_graph_entry(struct trace_array *tr,
748 			       struct ftrace_graph_ent *trace,
749 			       unsigned long flags, int pc);
750 extern void __trace_graph_return(struct trace_array *tr,
751 				 struct ftrace_graph_ret *trace,
752 				 unsigned long flags, int pc);
753 
754 
755 #ifdef CONFIG_DYNAMIC_FTRACE
756 /* TODO: make this variable */
757 #define FTRACE_GRAPH_MAX_FUNCS		32
758 extern int ftrace_graph_count;
759 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
760 extern int ftrace_graph_notrace_count;
761 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
762 
763 static inline int ftrace_graph_addr(unsigned long addr)
764 {
765 	int i;
766 
767 	if (!ftrace_graph_count)
768 		return 1;
769 
770 	for (i = 0; i < ftrace_graph_count; i++) {
771 		if (addr == ftrace_graph_funcs[i]) {
772 			/*
773 			 * If no irqs are to be traced, but a set_graph_function
774 			 * is set, and called by an interrupt handler, we still
775 			 * want to trace it.
776 			 */
777 			if (in_irq())
778 				trace_recursion_set(TRACE_IRQ_BIT);
779 			else
780 				trace_recursion_clear(TRACE_IRQ_BIT);
781 			return 1;
782 		}
783 	}
784 
785 	return 0;
786 }
787 
788 static inline int ftrace_graph_notrace_addr(unsigned long addr)
789 {
790 	int i;
791 
792 	if (!ftrace_graph_notrace_count)
793 		return 0;
794 
795 	for (i = 0; i < ftrace_graph_notrace_count; i++) {
796 		if (addr == ftrace_graph_notrace_funcs[i])
797 			return 1;
798 	}
799 
800 	return 0;
801 }
802 #else
803 static inline int ftrace_graph_addr(unsigned long addr)
804 {
805 	return 1;
806 }
807 
808 static inline int ftrace_graph_notrace_addr(unsigned long addr)
809 {
810 	return 0;
811 }
812 #endif /* CONFIG_DYNAMIC_FTRACE */
813 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
814 static inline enum print_line_t
815 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
816 {
817 	return TRACE_TYPE_UNHANDLED;
818 }
819 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
820 
821 extern struct list_head ftrace_pids;
822 
823 #ifdef CONFIG_FUNCTION_TRACER
824 extern bool ftrace_filter_param __initdata;
825 static inline int ftrace_trace_task(struct task_struct *task)
826 {
827 	if (list_empty(&ftrace_pids))
828 		return 1;
829 
830 	return test_tsk_trace_trace(task);
831 }
832 extern int ftrace_is_dead(void);
833 int ftrace_create_function_files(struct trace_array *tr,
834 				 struct dentry *parent);
835 void ftrace_destroy_function_files(struct trace_array *tr);
836 void ftrace_init_global_array_ops(struct trace_array *tr);
837 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
838 void ftrace_reset_array_ops(struct trace_array *tr);
839 int using_ftrace_ops_list_func(void);
840 #else
841 static inline int ftrace_trace_task(struct task_struct *task)
842 {
843 	return 1;
844 }
845 static inline int ftrace_is_dead(void) { return 0; }
846 static inline int
847 ftrace_create_function_files(struct trace_array *tr,
848 			     struct dentry *parent)
849 {
850 	return 0;
851 }
852 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
853 static inline __init void
854 ftrace_init_global_array_ops(struct trace_array *tr) { }
855 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
856 /* ftace_func_t type is not defined, use macro instead of static inline */
857 #define ftrace_init_array_ops(tr, func) do { } while (0)
858 #endif /* CONFIG_FUNCTION_TRACER */
859 
860 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
861 void ftrace_create_filter_files(struct ftrace_ops *ops,
862 				struct dentry *parent);
863 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
864 #else
865 /*
866  * The ops parameter passed in is usually undefined.
867  * This must be a macro.
868  */
869 #define ftrace_create_filter_files(ops, parent) do { } while (0)
870 #define ftrace_destroy_filter_files(ops) do { } while (0)
871 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
872 
873 bool ftrace_event_is_function(struct trace_event_call *call);
874 
875 /*
876  * struct trace_parser - servers for reading the user input separated by spaces
877  * @cont: set if the input is not complete - no final space char was found
878  * @buffer: holds the parsed user input
879  * @idx: user input length
880  * @size: buffer size
881  */
882 struct trace_parser {
883 	bool		cont;
884 	char		*buffer;
885 	unsigned	idx;
886 	unsigned	size;
887 };
888 
889 static inline bool trace_parser_loaded(struct trace_parser *parser)
890 {
891 	return (parser->idx != 0);
892 }
893 
894 static inline bool trace_parser_cont(struct trace_parser *parser)
895 {
896 	return parser->cont;
897 }
898 
899 static inline void trace_parser_clear(struct trace_parser *parser)
900 {
901 	parser->cont = false;
902 	parser->idx = 0;
903 }
904 
905 extern int trace_parser_get_init(struct trace_parser *parser, int size);
906 extern void trace_parser_put(struct trace_parser *parser);
907 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
908 	size_t cnt, loff_t *ppos);
909 
910 /*
911  * Only create function graph options if function graph is configured.
912  */
913 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
914 # define FGRAPH_FLAGS						\
915 		C(DISPLAY_GRAPH,	"display-graph"),
916 #else
917 # define FGRAPH_FLAGS
918 #endif
919 
920 #ifdef CONFIG_BRANCH_TRACER
921 # define BRANCH_FLAGS					\
922 		C(BRANCH,		"branch"),
923 #else
924 # define BRANCH_FLAGS
925 #endif
926 
927 #ifdef CONFIG_FUNCTION_TRACER
928 # define FUNCTION_FLAGS						\
929 		C(FUNCTION,		"function-trace"),
930 # define FUNCTION_DEFAULT_FLAGS		TRACE_ITER_FUNCTION
931 #else
932 # define FUNCTION_FLAGS
933 # define FUNCTION_DEFAULT_FLAGS		0UL
934 #endif
935 
936 #ifdef CONFIG_STACKTRACE
937 # define STACK_FLAGS				\
938 		C(STACKTRACE,		"stacktrace"),
939 #else
940 # define STACK_FLAGS
941 #endif
942 
943 /*
944  * trace_iterator_flags is an enumeration that defines bit
945  * positions into trace_flags that controls the output.
946  *
947  * NOTE: These bits must match the trace_options array in
948  *       trace.c (this macro guarantees it).
949  */
950 #define TRACE_FLAGS						\
951 		C(PRINT_PARENT,		"print-parent"),	\
952 		C(SYM_OFFSET,		"sym-offset"),		\
953 		C(SYM_ADDR,		"sym-addr"),		\
954 		C(VERBOSE,		"verbose"),		\
955 		C(RAW,			"raw"),			\
956 		C(HEX,			"hex"),			\
957 		C(BIN,			"bin"),			\
958 		C(BLOCK,		"block"),		\
959 		C(PRINTK,		"trace_printk"),	\
960 		C(ANNOTATE,		"annotate"),		\
961 		C(USERSTACKTRACE,	"userstacktrace"),	\
962 		C(SYM_USEROBJ,		"sym-userobj"),		\
963 		C(PRINTK_MSGONLY,	"printk-msg-only"),	\
964 		C(CONTEXT_INFO,		"context-info"),   /* Print pid/cpu/time */ \
965 		C(LATENCY_FMT,		"latency-format"),	\
966 		C(RECORD_CMD,		"record-cmd"),		\
967 		C(OVERWRITE,		"overwrite"),		\
968 		C(STOP_ON_FREE,		"disable_on_free"),	\
969 		C(IRQ_INFO,		"irq-info"),		\
970 		C(MARKERS,		"markers"),		\
971 		FUNCTION_FLAGS					\
972 		FGRAPH_FLAGS					\
973 		STACK_FLAGS					\
974 		BRANCH_FLAGS
975 
976 /*
977  * By defining C, we can make TRACE_FLAGS a list of bit names
978  * that will define the bits for the flag masks.
979  */
980 #undef C
981 #define C(a, b) TRACE_ITER_##a##_BIT
982 
983 enum trace_iterator_bits {
984 	TRACE_FLAGS
985 	/* Make sure we don't go more than we have bits for */
986 	TRACE_ITER_LAST_BIT
987 };
988 
989 /*
990  * By redefining C, we can make TRACE_FLAGS a list of masks that
991  * use the bits as defined above.
992  */
993 #undef C
994 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
995 
996 enum trace_iterator_flags { TRACE_FLAGS };
997 
998 /*
999  * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1000  * control the output of kernel symbols.
1001  */
1002 #define TRACE_ITER_SYM_MASK \
1003 	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1004 
1005 extern struct tracer nop_trace;
1006 
1007 #ifdef CONFIG_BRANCH_TRACER
1008 extern int enable_branch_tracing(struct trace_array *tr);
1009 extern void disable_branch_tracing(void);
1010 static inline int trace_branch_enable(struct trace_array *tr)
1011 {
1012 	if (tr->trace_flags & TRACE_ITER_BRANCH)
1013 		return enable_branch_tracing(tr);
1014 	return 0;
1015 }
1016 static inline void trace_branch_disable(void)
1017 {
1018 	/* due to races, always disable */
1019 	disable_branch_tracing();
1020 }
1021 #else
1022 static inline int trace_branch_enable(struct trace_array *tr)
1023 {
1024 	return 0;
1025 }
1026 static inline void trace_branch_disable(void)
1027 {
1028 }
1029 #endif /* CONFIG_BRANCH_TRACER */
1030 
1031 /* set ring buffers to default size if not already done so */
1032 int tracing_update_buffers(void);
1033 
1034 struct ftrace_event_field {
1035 	struct list_head	link;
1036 	const char		*name;
1037 	const char		*type;
1038 	int			filter_type;
1039 	int			offset;
1040 	int			size;
1041 	int			is_signed;
1042 };
1043 
1044 struct event_filter {
1045 	int			n_preds;	/* Number assigned */
1046 	int			a_preds;	/* allocated */
1047 	struct filter_pred	*preds;
1048 	struct filter_pred	*root;
1049 	char			*filter_string;
1050 };
1051 
1052 struct event_subsystem {
1053 	struct list_head	list;
1054 	const char		*name;
1055 	struct event_filter	*filter;
1056 	int			ref_count;
1057 };
1058 
1059 struct trace_subsystem_dir {
1060 	struct list_head		list;
1061 	struct event_subsystem		*subsystem;
1062 	struct trace_array		*tr;
1063 	struct dentry			*entry;
1064 	int				ref_count;
1065 	int				nr_events;
1066 };
1067 
1068 #define FILTER_PRED_INVALID	((unsigned short)-1)
1069 #define FILTER_PRED_IS_RIGHT	(1 << 15)
1070 #define FILTER_PRED_FOLD	(1 << 15)
1071 
1072 /*
1073  * The max preds is the size of unsigned short with
1074  * two flags at the MSBs. One bit is used for both the IS_RIGHT
1075  * and FOLD flags. The other is reserved.
1076  *
1077  * 2^14 preds is way more than enough.
1078  */
1079 #define MAX_FILTER_PRED		16384
1080 
1081 struct filter_pred;
1082 struct regex;
1083 
1084 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1085 
1086 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1087 
1088 enum regex_type {
1089 	MATCH_FULL = 0,
1090 	MATCH_FRONT_ONLY,
1091 	MATCH_MIDDLE_ONLY,
1092 	MATCH_END_ONLY,
1093 };
1094 
1095 struct regex {
1096 	char			pattern[MAX_FILTER_STR_VAL];
1097 	int			len;
1098 	int			field_len;
1099 	regex_match_func	match;
1100 };
1101 
1102 struct filter_pred {
1103 	filter_pred_fn_t 	fn;
1104 	u64 			val;
1105 	struct regex		regex;
1106 	unsigned short		*ops;
1107 	struct ftrace_event_field *field;
1108 	int 			offset;
1109 	int 			not;
1110 	int 			op;
1111 	unsigned short		index;
1112 	unsigned short		parent;
1113 	unsigned short		left;
1114 	unsigned short		right;
1115 };
1116 
1117 extern enum regex_type
1118 filter_parse_regex(char *buff, int len, char **search, int *not);
1119 extern void print_event_filter(struct trace_event_file *file,
1120 			       struct trace_seq *s);
1121 extern int apply_event_filter(struct trace_event_file *file,
1122 			      char *filter_string);
1123 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1124 					char *filter_string);
1125 extern void print_subsystem_event_filter(struct event_subsystem *system,
1126 					 struct trace_seq *s);
1127 extern int filter_assign_type(const char *type);
1128 extern int create_event_filter(struct trace_event_call *call,
1129 			       char *filter_str, bool set_str,
1130 			       struct event_filter **filterp);
1131 extern void free_event_filter(struct event_filter *filter);
1132 
1133 struct ftrace_event_field *
1134 trace_find_event_field(struct trace_event_call *call, char *name);
1135 
1136 extern void trace_event_enable_cmd_record(bool enable);
1137 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1138 extern int event_trace_del_tracer(struct trace_array *tr);
1139 
1140 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1141 						const char *system,
1142 						const char *event);
1143 
1144 static inline void *event_file_data(struct file *filp)
1145 {
1146 	return ACCESS_ONCE(file_inode(filp)->i_private);
1147 }
1148 
1149 extern struct mutex event_mutex;
1150 extern struct list_head ftrace_events;
1151 
1152 extern const struct file_operations event_trigger_fops;
1153 
1154 extern int register_trigger_cmds(void);
1155 extern void clear_event_triggers(struct trace_array *tr);
1156 
1157 struct event_trigger_data {
1158 	unsigned long			count;
1159 	int				ref;
1160 	struct event_trigger_ops	*ops;
1161 	struct event_command		*cmd_ops;
1162 	struct event_filter __rcu	*filter;
1163 	char				*filter_str;
1164 	void				*private_data;
1165 	struct list_head		list;
1166 };
1167 
1168 /**
1169  * struct event_trigger_ops - callbacks for trace event triggers
1170  *
1171  * The methods in this structure provide per-event trigger hooks for
1172  * various trigger operations.
1173  *
1174  * All the methods below, except for @init() and @free(), must be
1175  * implemented.
1176  *
1177  * @func: The trigger 'probe' function called when the triggering
1178  *	event occurs.  The data passed into this callback is the data
1179  *	that was supplied to the event_command @reg() function that
1180  *	registered the trigger (see struct event_command).
1181  *
1182  * @init: An optional initialization function called for the trigger
1183  *	when the trigger is registered (via the event_command reg()
1184  *	function).  This can be used to perform per-trigger
1185  *	initialization such as incrementing a per-trigger reference
1186  *	count, for instance.  This is usually implemented by the
1187  *	generic utility function @event_trigger_init() (see
1188  *	trace_event_triggers.c).
1189  *
1190  * @free: An optional de-initialization function called for the
1191  *	trigger when the trigger is unregistered (via the
1192  *	event_command @reg() function).  This can be used to perform
1193  *	per-trigger de-initialization such as decrementing a
1194  *	per-trigger reference count and freeing corresponding trigger
1195  *	data, for instance.  This is usually implemented by the
1196  *	generic utility function @event_trigger_free() (see
1197  *	trace_event_triggers.c).
1198  *
1199  * @print: The callback function invoked to have the trigger print
1200  *	itself.  This is usually implemented by a wrapper function
1201  *	that calls the generic utility function @event_trigger_print()
1202  *	(see trace_event_triggers.c).
1203  */
1204 struct event_trigger_ops {
1205 	void			(*func)(struct event_trigger_data *data);
1206 	int			(*init)(struct event_trigger_ops *ops,
1207 					struct event_trigger_data *data);
1208 	void			(*free)(struct event_trigger_ops *ops,
1209 					struct event_trigger_data *data);
1210 	int			(*print)(struct seq_file *m,
1211 					 struct event_trigger_ops *ops,
1212 					 struct event_trigger_data *data);
1213 };
1214 
1215 /**
1216  * struct event_command - callbacks and data members for event commands
1217  *
1218  * Event commands are invoked by users by writing the command name
1219  * into the 'trigger' file associated with a trace event.  The
1220  * parameters associated with a specific invocation of an event
1221  * command are used to create an event trigger instance, which is
1222  * added to the list of trigger instances associated with that trace
1223  * event.  When the event is hit, the set of triggers associated with
1224  * that event is invoked.
1225  *
1226  * The data members in this structure provide per-event command data
1227  * for various event commands.
1228  *
1229  * All the data members below, except for @post_trigger, must be set
1230  * for each event command.
1231  *
1232  * @name: The unique name that identifies the event command.  This is
1233  *	the name used when setting triggers via trigger files.
1234  *
1235  * @trigger_type: A unique id that identifies the event command
1236  *	'type'.  This value has two purposes, the first to ensure that
1237  *	only one trigger of the same type can be set at a given time
1238  *	for a particular event e.g. it doesn't make sense to have both
1239  *	a traceon and traceoff trigger attached to a single event at
1240  *	the same time, so traceon and traceoff have the same type
1241  *	though they have different names.  The @trigger_type value is
1242  *	also used as a bit value for deferring the actual trigger
1243  *	action until after the current event is finished.  Some
1244  *	commands need to do this if they themselves log to the trace
1245  *	buffer (see the @post_trigger() member below).  @trigger_type
1246  *	values are defined by adding new values to the trigger_type
1247  *	enum in include/linux/trace_events.h.
1248  *
1249  * @post_trigger: A flag that says whether or not this command needs
1250  *	to have its action delayed until after the current event has
1251  *	been closed.  Some triggers need to avoid being invoked while
1252  *	an event is currently in the process of being logged, since
1253  *	the trigger may itself log data into the trace buffer.  Thus
1254  *	we make sure the current event is committed before invoking
1255  *	those triggers.  To do that, the trigger invocation is split
1256  *	in two - the first part checks the filter using the current
1257  *	trace record; if a command has the @post_trigger flag set, it
1258  *	sets a bit for itself in the return value, otherwise it
1259  *	directly invokes the trigger.  Once all commands have been
1260  *	either invoked or set their return flag, the current record is
1261  *	either committed or discarded.  At that point, if any commands
1262  *	have deferred their triggers, those commands are finally
1263  *	invoked following the close of the current event.  In other
1264  *	words, if the event_trigger_ops @func() probe implementation
1265  *	itself logs to the trace buffer, this flag should be set,
1266  *	otherwise it can be left unspecified.
1267  *
1268  * All the methods below, except for @set_filter(), must be
1269  * implemented.
1270  *
1271  * @func: The callback function responsible for parsing and
1272  *	registering the trigger written to the 'trigger' file by the
1273  *	user.  It allocates the trigger instance and registers it with
1274  *	the appropriate trace event.  It makes use of the other
1275  *	event_command callback functions to orchestrate this, and is
1276  *	usually implemented by the generic utility function
1277  *	@event_trigger_callback() (see trace_event_triggers.c).
1278  *
1279  * @reg: Adds the trigger to the list of triggers associated with the
1280  *	event, and enables the event trigger itself, after
1281  *	initializing it (via the event_trigger_ops @init() function).
1282  *	This is also where commands can use the @trigger_type value to
1283  *	make the decision as to whether or not multiple instances of
1284  *	the trigger should be allowed.  This is usually implemented by
1285  *	the generic utility function @register_trigger() (see
1286  *	trace_event_triggers.c).
1287  *
1288  * @unreg: Removes the trigger from the list of triggers associated
1289  *	with the event, and disables the event trigger itself, after
1290  *	initializing it (via the event_trigger_ops @free() function).
1291  *	This is usually implemented by the generic utility function
1292  *	@unregister_trigger() (see trace_event_triggers.c).
1293  *
1294  * @set_filter: An optional function called to parse and set a filter
1295  *	for the trigger.  If no @set_filter() method is set for the
1296  *	event command, filters set by the user for the command will be
1297  *	ignored.  This is usually implemented by the generic utility
1298  *	function @set_trigger_filter() (see trace_event_triggers.c).
1299  *
1300  * @get_trigger_ops: The callback function invoked to retrieve the
1301  *	event_trigger_ops implementation associated with the command.
1302  */
1303 struct event_command {
1304 	struct list_head	list;
1305 	char			*name;
1306 	enum event_trigger_type	trigger_type;
1307 	bool			post_trigger;
1308 	int			(*func)(struct event_command *cmd_ops,
1309 					struct trace_event_file *file,
1310 					char *glob, char *cmd, char *params);
1311 	int			(*reg)(char *glob,
1312 				       struct event_trigger_ops *ops,
1313 				       struct event_trigger_data *data,
1314 				       struct trace_event_file *file);
1315 	void			(*unreg)(char *glob,
1316 					 struct event_trigger_ops *ops,
1317 					 struct event_trigger_data *data,
1318 					 struct trace_event_file *file);
1319 	int			(*set_filter)(char *filter_str,
1320 					      struct event_trigger_data *data,
1321 					      struct trace_event_file *file);
1322 	struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1323 };
1324 
1325 extern int trace_event_enable_disable(struct trace_event_file *file,
1326 				      int enable, int soft_disable);
1327 extern int tracing_alloc_snapshot(void);
1328 
1329 extern const char *__start___trace_bprintk_fmt[];
1330 extern const char *__stop___trace_bprintk_fmt[];
1331 
1332 extern const char *__start___tracepoint_str[];
1333 extern const char *__stop___tracepoint_str[];
1334 
1335 void trace_printk_control(bool enabled);
1336 void trace_printk_init_buffers(void);
1337 void trace_printk_start_comm(void);
1338 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1339 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1340 
1341 /*
1342  * Normal trace_printk() and friends allocates special buffers
1343  * to do the manipulation, as well as saves the print formats
1344  * into sections to display. But the trace infrastructure wants
1345  * to use these without the added overhead at the price of being
1346  * a bit slower (used mainly for warnings, where we don't care
1347  * about performance). The internal_trace_puts() is for such
1348  * a purpose.
1349  */
1350 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1351 
1352 #undef FTRACE_ENTRY
1353 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)	\
1354 	extern struct trace_event_call					\
1355 	__aligned(4) event_##call;
1356 #undef FTRACE_ENTRY_DUP
1357 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)	\
1358 	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1359 		     filter)
1360 #include "trace_entries.h"
1361 
1362 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1363 int perf_ftrace_event_register(struct trace_event_call *call,
1364 			       enum trace_reg type, void *data);
1365 #else
1366 #define perf_ftrace_event_register NULL
1367 #endif
1368 
1369 #ifdef CONFIG_FTRACE_SYSCALLS
1370 void init_ftrace_syscalls(void);
1371 #else
1372 static inline void init_ftrace_syscalls(void) { }
1373 #endif
1374 
1375 #ifdef CONFIG_EVENT_TRACING
1376 void trace_event_init(void);
1377 void trace_event_enum_update(struct trace_enum_map **map, int len);
1378 #else
1379 static inline void __init trace_event_init(void) { }
1380 static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
1381 #endif
1382 
1383 extern struct trace_iterator *tracepoint_print_iter;
1384 
1385 #endif /* _LINUX_KERNEL_TRACE_H */
1386