xref: /linux/kernel/trace/trace.c (revision 6e59bcc9c8adec9a5bbedfa95a89946c56c510d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Originally taken from the RT patch by:
9  *    Arnaldo Carvalho de Melo <acme@redhat.com>
10  *
11  * Based on code from the latency_tracer, that is:
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 #include <linux/ring_buffer.h>
16 #include <linux/utsname.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
44 #include <linux/fs.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
51 
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
53 
54 #include "trace.h"
55 #include "trace_output.h"
56 
57 #ifdef CONFIG_FTRACE_STARTUP_TEST
58 /*
59  * We need to change this state when a selftest is running.
60  * A selftest will lurk into the ring-buffer to count the
61  * entries inserted during the selftest although some concurrent
62  * insertions into the ring-buffer such as trace_printk could occurred
63  * at the same time, giving false positive or negative results.
64  */
65 static bool __read_mostly tracing_selftest_running;
66 
67 /*
68  * If boot-time tracing including tracers/events via kernel cmdline
69  * is running, we do not want to run SELFTEST.
70  */
71 bool __read_mostly tracing_selftest_disabled;
72 
73 void __init disable_tracing_selftest(const char *reason)
74 {
75 	if (!tracing_selftest_disabled) {
76 		tracing_selftest_disabled = true;
77 		pr_info("Ftrace startup test is disabled due to %s\n", reason);
78 	}
79 }
80 #else
81 #define tracing_selftest_running	0
82 #define tracing_selftest_disabled	0
83 #endif
84 
85 /* Pipe tracepoints to printk */
86 static struct trace_iterator *tracepoint_print_iter;
87 int tracepoint_printk;
88 static bool tracepoint_printk_stop_on_boot __initdata;
89 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
90 
91 /* For tracers that don't implement custom flags */
92 static struct tracer_opt dummy_tracer_opt[] = {
93 	{ }
94 };
95 
96 static int
97 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
98 {
99 	return 0;
100 }
101 
102 /*
103  * To prevent the comm cache from being overwritten when no
104  * tracing is active, only save the comm when a trace event
105  * occurred.
106  */
107 DEFINE_PER_CPU(bool, trace_taskinfo_save);
108 
109 /*
110  * Kill all tracing for good (never come back).
111  * It is initialized to 1 but will turn to zero if the initialization
112  * of the tracer is successful. But that is the only place that sets
113  * this back to zero.
114  */
115 static int tracing_disabled = 1;
116 
117 cpumask_var_t __read_mostly	tracing_buffer_mask;
118 
119 /*
120  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
121  *
122  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123  * is set, then ftrace_dump is called. This will output the contents
124  * of the ftrace buffers to the console.  This is very useful for
125  * capturing traces that lead to crashes and outputing it to a
126  * serial console.
127  *
128  * It is default off, but you can enable it with either specifying
129  * "ftrace_dump_on_oops" in the kernel command line, or setting
130  * /proc/sys/kernel/ftrace_dump_on_oops
131  * Set 1 if you want to dump buffers of all CPUs
132  * Set 2 if you want to dump the buffer of the CPU that triggered oops
133  * Set instance name if you want to dump the specific trace instance
134  * Multiple instance dump is also supported, and instances are seperated
135  * by commas.
136  */
137 /* Set to string format zero to disable by default */
138 char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0";
139 
140 /* When set, tracing will stop when a WARN*() is hit */
141 int __disable_trace_on_warning;
142 
143 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
144 /* Map of enums to their values, for "eval_map" file */
145 struct trace_eval_map_head {
146 	struct module			*mod;
147 	unsigned long			length;
148 };
149 
150 union trace_eval_map_item;
151 
152 struct trace_eval_map_tail {
153 	/*
154 	 * "end" is first and points to NULL as it must be different
155 	 * than "mod" or "eval_string"
156 	 */
157 	union trace_eval_map_item	*next;
158 	const char			*end;	/* points to NULL */
159 };
160 
161 static DEFINE_MUTEX(trace_eval_mutex);
162 
163 /*
164  * The trace_eval_maps are saved in an array with two extra elements,
165  * one at the beginning, and one at the end. The beginning item contains
166  * the count of the saved maps (head.length), and the module they
167  * belong to if not built in (head.mod). The ending item contains a
168  * pointer to the next array of saved eval_map items.
169  */
170 union trace_eval_map_item {
171 	struct trace_eval_map		map;
172 	struct trace_eval_map_head	head;
173 	struct trace_eval_map_tail	tail;
174 };
175 
176 static union trace_eval_map_item *trace_eval_maps;
177 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
178 
179 int tracing_set_tracer(struct trace_array *tr, const char *buf);
180 static void ftrace_trace_userstack(struct trace_array *tr,
181 				   struct trace_buffer *buffer,
182 				   unsigned int trace_ctx);
183 
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
186 
187 static bool allocate_snapshot;
188 static bool snapshot_at_boot;
189 
190 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
191 static int boot_instance_index;
192 
193 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
194 static int boot_snapshot_index;
195 
196 static int __init set_cmdline_ftrace(char *str)
197 {
198 	strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
199 	default_bootup_tracer = bootup_tracer_buf;
200 	/* We are using ftrace early, expand it */
201 	trace_set_ring_buffer_expanded(NULL);
202 	return 1;
203 }
204 __setup("ftrace=", set_cmdline_ftrace);
205 
206 int ftrace_dump_on_oops_enabled(void)
207 {
208 	if (!strcmp("0", ftrace_dump_on_oops))
209 		return 0;
210 	else
211 		return 1;
212 }
213 
214 static int __init set_ftrace_dump_on_oops(char *str)
215 {
216 	if (!*str) {
217 		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
218 		return 1;
219 	}
220 
221 	if (*str == ',') {
222 		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
223 		strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1);
224 		return 1;
225 	}
226 
227 	if (*str++ == '=') {
228 		strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE);
229 		return 1;
230 	}
231 
232 	return 0;
233 }
234 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
235 
236 static int __init stop_trace_on_warning(char *str)
237 {
238 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
239 		__disable_trace_on_warning = 1;
240 	return 1;
241 }
242 __setup("traceoff_on_warning", stop_trace_on_warning);
243 
244 static int __init boot_alloc_snapshot(char *str)
245 {
246 	char *slot = boot_snapshot_info + boot_snapshot_index;
247 	int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
248 	int ret;
249 
250 	if (str[0] == '=') {
251 		str++;
252 		if (strlen(str) >= left)
253 			return -1;
254 
255 		ret = snprintf(slot, left, "%s\t", str);
256 		boot_snapshot_index += ret;
257 	} else {
258 		allocate_snapshot = true;
259 		/* We also need the main ring buffer expanded */
260 		trace_set_ring_buffer_expanded(NULL);
261 	}
262 	return 1;
263 }
264 __setup("alloc_snapshot", boot_alloc_snapshot);
265 
266 
267 static int __init boot_snapshot(char *str)
268 {
269 	snapshot_at_boot = true;
270 	boot_alloc_snapshot(str);
271 	return 1;
272 }
273 __setup("ftrace_boot_snapshot", boot_snapshot);
274 
275 
276 static int __init boot_instance(char *str)
277 {
278 	char *slot = boot_instance_info + boot_instance_index;
279 	int left = sizeof(boot_instance_info) - boot_instance_index;
280 	int ret;
281 
282 	if (strlen(str) >= left)
283 		return -1;
284 
285 	ret = snprintf(slot, left, "%s\t", str);
286 	boot_instance_index += ret;
287 
288 	return 1;
289 }
290 __setup("trace_instance=", boot_instance);
291 
292 
293 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
294 
295 static int __init set_trace_boot_options(char *str)
296 {
297 	strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
298 	return 1;
299 }
300 __setup("trace_options=", set_trace_boot_options);
301 
302 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
303 static char *trace_boot_clock __initdata;
304 
305 static int __init set_trace_boot_clock(char *str)
306 {
307 	strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
308 	trace_boot_clock = trace_boot_clock_buf;
309 	return 1;
310 }
311 __setup("trace_clock=", set_trace_boot_clock);
312 
313 static int __init set_tracepoint_printk(char *str)
314 {
315 	/* Ignore the "tp_printk_stop_on_boot" param */
316 	if (*str == '_')
317 		return 0;
318 
319 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
320 		tracepoint_printk = 1;
321 	return 1;
322 }
323 __setup("tp_printk", set_tracepoint_printk);
324 
325 static int __init set_tracepoint_printk_stop(char *str)
326 {
327 	tracepoint_printk_stop_on_boot = true;
328 	return 1;
329 }
330 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
331 
332 unsigned long long ns2usecs(u64 nsec)
333 {
334 	nsec += 500;
335 	do_div(nsec, 1000);
336 	return nsec;
337 }
338 
339 static void
340 trace_process_export(struct trace_export *export,
341 	       struct ring_buffer_event *event, int flag)
342 {
343 	struct trace_entry *entry;
344 	unsigned int size = 0;
345 
346 	if (export->flags & flag) {
347 		entry = ring_buffer_event_data(event);
348 		size = ring_buffer_event_length(event);
349 		export->write(export, entry, size);
350 	}
351 }
352 
353 static DEFINE_MUTEX(ftrace_export_lock);
354 
355 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
356 
357 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
358 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
359 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
360 
361 static inline void ftrace_exports_enable(struct trace_export *export)
362 {
363 	if (export->flags & TRACE_EXPORT_FUNCTION)
364 		static_branch_inc(&trace_function_exports_enabled);
365 
366 	if (export->flags & TRACE_EXPORT_EVENT)
367 		static_branch_inc(&trace_event_exports_enabled);
368 
369 	if (export->flags & TRACE_EXPORT_MARKER)
370 		static_branch_inc(&trace_marker_exports_enabled);
371 }
372 
373 static inline void ftrace_exports_disable(struct trace_export *export)
374 {
375 	if (export->flags & TRACE_EXPORT_FUNCTION)
376 		static_branch_dec(&trace_function_exports_enabled);
377 
378 	if (export->flags & TRACE_EXPORT_EVENT)
379 		static_branch_dec(&trace_event_exports_enabled);
380 
381 	if (export->flags & TRACE_EXPORT_MARKER)
382 		static_branch_dec(&trace_marker_exports_enabled);
383 }
384 
385 static void ftrace_exports(struct ring_buffer_event *event, int flag)
386 {
387 	struct trace_export *export;
388 
389 	preempt_disable_notrace();
390 
391 	export = rcu_dereference_raw_check(ftrace_exports_list);
392 	while (export) {
393 		trace_process_export(export, event, flag);
394 		export = rcu_dereference_raw_check(export->next);
395 	}
396 
397 	preempt_enable_notrace();
398 }
399 
400 static inline void
401 add_trace_export(struct trace_export **list, struct trace_export *export)
402 {
403 	rcu_assign_pointer(export->next, *list);
404 	/*
405 	 * We are entering export into the list but another
406 	 * CPU might be walking that list. We need to make sure
407 	 * the export->next pointer is valid before another CPU sees
408 	 * the export pointer included into the list.
409 	 */
410 	rcu_assign_pointer(*list, export);
411 }
412 
413 static inline int
414 rm_trace_export(struct trace_export **list, struct trace_export *export)
415 {
416 	struct trace_export **p;
417 
418 	for (p = list; *p != NULL; p = &(*p)->next)
419 		if (*p == export)
420 			break;
421 
422 	if (*p != export)
423 		return -1;
424 
425 	rcu_assign_pointer(*p, (*p)->next);
426 
427 	return 0;
428 }
429 
430 static inline void
431 add_ftrace_export(struct trace_export **list, struct trace_export *export)
432 {
433 	ftrace_exports_enable(export);
434 
435 	add_trace_export(list, export);
436 }
437 
438 static inline int
439 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
440 {
441 	int ret;
442 
443 	ret = rm_trace_export(list, export);
444 	ftrace_exports_disable(export);
445 
446 	return ret;
447 }
448 
449 int register_ftrace_export(struct trace_export *export)
450 {
451 	if (WARN_ON_ONCE(!export->write))
452 		return -1;
453 
454 	mutex_lock(&ftrace_export_lock);
455 
456 	add_ftrace_export(&ftrace_exports_list, export);
457 
458 	mutex_unlock(&ftrace_export_lock);
459 
460 	return 0;
461 }
462 EXPORT_SYMBOL_GPL(register_ftrace_export);
463 
464 int unregister_ftrace_export(struct trace_export *export)
465 {
466 	int ret;
467 
468 	mutex_lock(&ftrace_export_lock);
469 
470 	ret = rm_ftrace_export(&ftrace_exports_list, export);
471 
472 	mutex_unlock(&ftrace_export_lock);
473 
474 	return ret;
475 }
476 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
477 
478 /* trace_flags holds trace_options default values */
479 #define TRACE_DEFAULT_FLAGS						\
480 	(FUNCTION_DEFAULT_FLAGS |					\
481 	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
482 	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
483 	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
484 	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |			\
485 	 TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK)
486 
487 /* trace_options that are only supported by global_trace */
488 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
489 	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
490 
491 /* trace_flags that are default zero for instances */
492 #define ZEROED_TRACE_FLAGS \
493 	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK)
494 
495 /*
496  * The global_trace is the descriptor that holds the top-level tracing
497  * buffers for the live tracing.
498  */
499 static struct trace_array global_trace = {
500 	.trace_flags = TRACE_DEFAULT_FLAGS,
501 };
502 
503 static struct trace_array *printk_trace = &global_trace;
504 
505 static __always_inline bool printk_binsafe(struct trace_array *tr)
506 {
507 	/*
508 	 * The binary format of traceprintk can cause a crash if used
509 	 * by a buffer from another boot. Force the use of the
510 	 * non binary version of trace_printk if the trace_printk
511 	 * buffer is a boot mapped ring buffer.
512 	 */
513 	return !(tr->flags & TRACE_ARRAY_FL_BOOT);
514 }
515 
516 static void update_printk_trace(struct trace_array *tr)
517 {
518 	if (printk_trace == tr)
519 		return;
520 
521 	printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK;
522 	printk_trace = tr;
523 	tr->trace_flags |= TRACE_ITER_TRACE_PRINTK;
524 }
525 
526 void trace_set_ring_buffer_expanded(struct trace_array *tr)
527 {
528 	if (!tr)
529 		tr = &global_trace;
530 	tr->ring_buffer_expanded = true;
531 }
532 
533 LIST_HEAD(ftrace_trace_arrays);
534 
535 int trace_array_get(struct trace_array *this_tr)
536 {
537 	struct trace_array *tr;
538 	int ret = -ENODEV;
539 
540 	mutex_lock(&trace_types_lock);
541 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
542 		if (tr == this_tr) {
543 			tr->ref++;
544 			ret = 0;
545 			break;
546 		}
547 	}
548 	mutex_unlock(&trace_types_lock);
549 
550 	return ret;
551 }
552 
553 static void __trace_array_put(struct trace_array *this_tr)
554 {
555 	WARN_ON(!this_tr->ref);
556 	this_tr->ref--;
557 }
558 
559 /**
560  * trace_array_put - Decrement the reference counter for this trace array.
561  * @this_tr : pointer to the trace array
562  *
563  * NOTE: Use this when we no longer need the trace array returned by
564  * trace_array_get_by_name(). This ensures the trace array can be later
565  * destroyed.
566  *
567  */
568 void trace_array_put(struct trace_array *this_tr)
569 {
570 	if (!this_tr)
571 		return;
572 
573 	mutex_lock(&trace_types_lock);
574 	__trace_array_put(this_tr);
575 	mutex_unlock(&trace_types_lock);
576 }
577 EXPORT_SYMBOL_GPL(trace_array_put);
578 
579 int tracing_check_open_get_tr(struct trace_array *tr)
580 {
581 	int ret;
582 
583 	ret = security_locked_down(LOCKDOWN_TRACEFS);
584 	if (ret)
585 		return ret;
586 
587 	if (tracing_disabled)
588 		return -ENODEV;
589 
590 	if (tr && trace_array_get(tr) < 0)
591 		return -ENODEV;
592 
593 	return 0;
594 }
595 
596 /**
597  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
598  * @filtered_pids: The list of pids to check
599  * @search_pid: The PID to find in @filtered_pids
600  *
601  * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
602  */
603 bool
604 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
605 {
606 	return trace_pid_list_is_set(filtered_pids, search_pid);
607 }
608 
609 /**
610  * trace_ignore_this_task - should a task be ignored for tracing
611  * @filtered_pids: The list of pids to check
612  * @filtered_no_pids: The list of pids not to be traced
613  * @task: The task that should be ignored if not filtered
614  *
615  * Checks if @task should be traced or not from @filtered_pids.
616  * Returns true if @task should *NOT* be traced.
617  * Returns false if @task should be traced.
618  */
619 bool
620 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
621 		       struct trace_pid_list *filtered_no_pids,
622 		       struct task_struct *task)
623 {
624 	/*
625 	 * If filtered_no_pids is not empty, and the task's pid is listed
626 	 * in filtered_no_pids, then return true.
627 	 * Otherwise, if filtered_pids is empty, that means we can
628 	 * trace all tasks. If it has content, then only trace pids
629 	 * within filtered_pids.
630 	 */
631 
632 	return (filtered_pids &&
633 		!trace_find_filtered_pid(filtered_pids, task->pid)) ||
634 		(filtered_no_pids &&
635 		 trace_find_filtered_pid(filtered_no_pids, task->pid));
636 }
637 
638 /**
639  * trace_filter_add_remove_task - Add or remove a task from a pid_list
640  * @pid_list: The list to modify
641  * @self: The current task for fork or NULL for exit
642  * @task: The task to add or remove
643  *
644  * If adding a task, if @self is defined, the task is only added if @self
645  * is also included in @pid_list. This happens on fork and tasks should
646  * only be added when the parent is listed. If @self is NULL, then the
647  * @task pid will be removed from the list, which would happen on exit
648  * of a task.
649  */
650 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
651 				  struct task_struct *self,
652 				  struct task_struct *task)
653 {
654 	if (!pid_list)
655 		return;
656 
657 	/* For forks, we only add if the forking task is listed */
658 	if (self) {
659 		if (!trace_find_filtered_pid(pid_list, self->pid))
660 			return;
661 	}
662 
663 	/* "self" is set for forks, and NULL for exits */
664 	if (self)
665 		trace_pid_list_set(pid_list, task->pid);
666 	else
667 		trace_pid_list_clear(pid_list, task->pid);
668 }
669 
670 /**
671  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
672  * @pid_list: The pid list to show
673  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
674  * @pos: The position of the file
675  *
676  * This is used by the seq_file "next" operation to iterate the pids
677  * listed in a trace_pid_list structure.
678  *
679  * Returns the pid+1 as we want to display pid of zero, but NULL would
680  * stop the iteration.
681  */
682 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
683 {
684 	long pid = (unsigned long)v;
685 	unsigned int next;
686 
687 	(*pos)++;
688 
689 	/* pid already is +1 of the actual previous bit */
690 	if (trace_pid_list_next(pid_list, pid, &next) < 0)
691 		return NULL;
692 
693 	pid = next;
694 
695 	/* Return pid + 1 to allow zero to be represented */
696 	return (void *)(pid + 1);
697 }
698 
699 /**
700  * trace_pid_start - Used for seq_file to start reading pid lists
701  * @pid_list: The pid list to show
702  * @pos: The position of the file
703  *
704  * This is used by seq_file "start" operation to start the iteration
705  * of listing pids.
706  *
707  * Returns the pid+1 as we want to display pid of zero, but NULL would
708  * stop the iteration.
709  */
710 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
711 {
712 	unsigned long pid;
713 	unsigned int first;
714 	loff_t l = 0;
715 
716 	if (trace_pid_list_first(pid_list, &first) < 0)
717 		return NULL;
718 
719 	pid = first;
720 
721 	/* Return pid + 1 so that zero can be the exit value */
722 	for (pid++; pid && l < *pos;
723 	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
724 		;
725 	return (void *)pid;
726 }
727 
728 /**
729  * trace_pid_show - show the current pid in seq_file processing
730  * @m: The seq_file structure to write into
731  * @v: A void pointer of the pid (+1) value to display
732  *
733  * Can be directly used by seq_file operations to display the current
734  * pid value.
735  */
736 int trace_pid_show(struct seq_file *m, void *v)
737 {
738 	unsigned long pid = (unsigned long)v - 1;
739 
740 	seq_printf(m, "%lu\n", pid);
741 	return 0;
742 }
743 
744 /* 128 should be much more than enough */
745 #define PID_BUF_SIZE		127
746 
747 int trace_pid_write(struct trace_pid_list *filtered_pids,
748 		    struct trace_pid_list **new_pid_list,
749 		    const char __user *ubuf, size_t cnt)
750 {
751 	struct trace_pid_list *pid_list;
752 	struct trace_parser parser;
753 	unsigned long val;
754 	int nr_pids = 0;
755 	ssize_t read = 0;
756 	ssize_t ret;
757 	loff_t pos;
758 	pid_t pid;
759 
760 	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
761 		return -ENOMEM;
762 
763 	/*
764 	 * Always recreate a new array. The write is an all or nothing
765 	 * operation. Always create a new array when adding new pids by
766 	 * the user. If the operation fails, then the current list is
767 	 * not modified.
768 	 */
769 	pid_list = trace_pid_list_alloc();
770 	if (!pid_list) {
771 		trace_parser_put(&parser);
772 		return -ENOMEM;
773 	}
774 
775 	if (filtered_pids) {
776 		/* copy the current bits to the new max */
777 		ret = trace_pid_list_first(filtered_pids, &pid);
778 		while (!ret) {
779 			trace_pid_list_set(pid_list, pid);
780 			ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
781 			nr_pids++;
782 		}
783 	}
784 
785 	ret = 0;
786 	while (cnt > 0) {
787 
788 		pos = 0;
789 
790 		ret = trace_get_user(&parser, ubuf, cnt, &pos);
791 		if (ret < 0)
792 			break;
793 
794 		read += ret;
795 		ubuf += ret;
796 		cnt -= ret;
797 
798 		if (!trace_parser_loaded(&parser))
799 			break;
800 
801 		ret = -EINVAL;
802 		if (kstrtoul(parser.buffer, 0, &val))
803 			break;
804 
805 		pid = (pid_t)val;
806 
807 		if (trace_pid_list_set(pid_list, pid) < 0) {
808 			ret = -1;
809 			break;
810 		}
811 		nr_pids++;
812 
813 		trace_parser_clear(&parser);
814 		ret = 0;
815 	}
816 	trace_parser_put(&parser);
817 
818 	if (ret < 0) {
819 		trace_pid_list_free(pid_list);
820 		return ret;
821 	}
822 
823 	if (!nr_pids) {
824 		/* Cleared the list of pids */
825 		trace_pid_list_free(pid_list);
826 		pid_list = NULL;
827 	}
828 
829 	*new_pid_list = pid_list;
830 
831 	return read;
832 }
833 
834 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
835 {
836 	u64 ts;
837 
838 	/* Early boot up does not have a buffer yet */
839 	if (!buf->buffer)
840 		return trace_clock_local();
841 
842 	ts = ring_buffer_time_stamp(buf->buffer);
843 	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
844 
845 	return ts;
846 }
847 
848 u64 ftrace_now(int cpu)
849 {
850 	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
851 }
852 
853 /**
854  * tracing_is_enabled - Show if global_trace has been enabled
855  *
856  * Shows if the global trace has been enabled or not. It uses the
857  * mirror flag "buffer_disabled" to be used in fast paths such as for
858  * the irqsoff tracer. But it may be inaccurate due to races. If you
859  * need to know the accurate state, use tracing_is_on() which is a little
860  * slower, but accurate.
861  */
862 int tracing_is_enabled(void)
863 {
864 	/*
865 	 * For quick access (irqsoff uses this in fast path), just
866 	 * return the mirror variable of the state of the ring buffer.
867 	 * It's a little racy, but we don't really care.
868 	 */
869 	smp_rmb();
870 	return !global_trace.buffer_disabled;
871 }
872 
873 /*
874  * trace_buf_size is the size in bytes that is allocated
875  * for a buffer. Note, the number of bytes is always rounded
876  * to page size.
877  *
878  * This number is purposely set to a low number of 16384.
879  * If the dump on oops happens, it will be much appreciated
880  * to not have to wait for all that output. Anyway this can be
881  * boot time and run time configurable.
882  */
883 #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
884 
885 static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
886 
887 /* trace_types holds a link list of available tracers. */
888 static struct tracer		*trace_types __read_mostly;
889 
890 /*
891  * trace_types_lock is used to protect the trace_types list.
892  */
893 DEFINE_MUTEX(trace_types_lock);
894 
895 /*
896  * serialize the access of the ring buffer
897  *
898  * ring buffer serializes readers, but it is low level protection.
899  * The validity of the events (which returns by ring_buffer_peek() ..etc)
900  * are not protected by ring buffer.
901  *
902  * The content of events may become garbage if we allow other process consumes
903  * these events concurrently:
904  *   A) the page of the consumed events may become a normal page
905  *      (not reader page) in ring buffer, and this page will be rewritten
906  *      by events producer.
907  *   B) The page of the consumed events may become a page for splice_read,
908  *      and this page will be returned to system.
909  *
910  * These primitives allow multi process access to different cpu ring buffer
911  * concurrently.
912  *
913  * These primitives don't distinguish read-only and read-consume access.
914  * Multi read-only access are also serialized.
915  */
916 
917 #ifdef CONFIG_SMP
918 static DECLARE_RWSEM(all_cpu_access_lock);
919 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
920 
921 static inline void trace_access_lock(int cpu)
922 {
923 	if (cpu == RING_BUFFER_ALL_CPUS) {
924 		/* gain it for accessing the whole ring buffer. */
925 		down_write(&all_cpu_access_lock);
926 	} else {
927 		/* gain it for accessing a cpu ring buffer. */
928 
929 		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
930 		down_read(&all_cpu_access_lock);
931 
932 		/* Secondly block other access to this @cpu ring buffer. */
933 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
934 	}
935 }
936 
937 static inline void trace_access_unlock(int cpu)
938 {
939 	if (cpu == RING_BUFFER_ALL_CPUS) {
940 		up_write(&all_cpu_access_lock);
941 	} else {
942 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
943 		up_read(&all_cpu_access_lock);
944 	}
945 }
946 
947 static inline void trace_access_lock_init(void)
948 {
949 	int cpu;
950 
951 	for_each_possible_cpu(cpu)
952 		mutex_init(&per_cpu(cpu_access_lock, cpu));
953 }
954 
955 #else
956 
957 static DEFINE_MUTEX(access_lock);
958 
959 static inline void trace_access_lock(int cpu)
960 {
961 	(void)cpu;
962 	mutex_lock(&access_lock);
963 }
964 
965 static inline void trace_access_unlock(int cpu)
966 {
967 	(void)cpu;
968 	mutex_unlock(&access_lock);
969 }
970 
971 static inline void trace_access_lock_init(void)
972 {
973 }
974 
975 #endif
976 
977 #ifdef CONFIG_STACKTRACE
978 static void __ftrace_trace_stack(struct trace_buffer *buffer,
979 				 unsigned int trace_ctx,
980 				 int skip, struct pt_regs *regs);
981 static inline void ftrace_trace_stack(struct trace_array *tr,
982 				      struct trace_buffer *buffer,
983 				      unsigned int trace_ctx,
984 				      int skip, struct pt_regs *regs);
985 
986 #else
987 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
988 					unsigned int trace_ctx,
989 					int skip, struct pt_regs *regs)
990 {
991 }
992 static inline void ftrace_trace_stack(struct trace_array *tr,
993 				      struct trace_buffer *buffer,
994 				      unsigned long trace_ctx,
995 				      int skip, struct pt_regs *regs)
996 {
997 }
998 
999 #endif
1000 
1001 static __always_inline void
1002 trace_event_setup(struct ring_buffer_event *event,
1003 		  int type, unsigned int trace_ctx)
1004 {
1005 	struct trace_entry *ent = ring_buffer_event_data(event);
1006 
1007 	tracing_generic_entry_update(ent, type, trace_ctx);
1008 }
1009 
1010 static __always_inline struct ring_buffer_event *
1011 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
1012 			  int type,
1013 			  unsigned long len,
1014 			  unsigned int trace_ctx)
1015 {
1016 	struct ring_buffer_event *event;
1017 
1018 	event = ring_buffer_lock_reserve(buffer, len);
1019 	if (event != NULL)
1020 		trace_event_setup(event, type, trace_ctx);
1021 
1022 	return event;
1023 }
1024 
1025 void tracer_tracing_on(struct trace_array *tr)
1026 {
1027 	if (tr->array_buffer.buffer)
1028 		ring_buffer_record_on(tr->array_buffer.buffer);
1029 	/*
1030 	 * This flag is looked at when buffers haven't been allocated
1031 	 * yet, or by some tracers (like irqsoff), that just want to
1032 	 * know if the ring buffer has been disabled, but it can handle
1033 	 * races of where it gets disabled but we still do a record.
1034 	 * As the check is in the fast path of the tracers, it is more
1035 	 * important to be fast than accurate.
1036 	 */
1037 	tr->buffer_disabled = 0;
1038 	/* Make the flag seen by readers */
1039 	smp_wmb();
1040 }
1041 
1042 /**
1043  * tracing_on - enable tracing buffers
1044  *
1045  * This function enables tracing buffers that may have been
1046  * disabled with tracing_off.
1047  */
1048 void tracing_on(void)
1049 {
1050 	tracer_tracing_on(&global_trace);
1051 }
1052 EXPORT_SYMBOL_GPL(tracing_on);
1053 
1054 
1055 static __always_inline void
1056 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1057 {
1058 	__this_cpu_write(trace_taskinfo_save, true);
1059 
1060 	/* If this is the temp buffer, we need to commit fully */
1061 	if (this_cpu_read(trace_buffered_event) == event) {
1062 		/* Length is in event->array[0] */
1063 		ring_buffer_write(buffer, event->array[0], &event->array[1]);
1064 		/* Release the temp buffer */
1065 		this_cpu_dec(trace_buffered_event_cnt);
1066 		/* ring_buffer_unlock_commit() enables preemption */
1067 		preempt_enable_notrace();
1068 	} else
1069 		ring_buffer_unlock_commit(buffer);
1070 }
1071 
1072 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1073 		       const char *str, int size)
1074 {
1075 	struct ring_buffer_event *event;
1076 	struct trace_buffer *buffer;
1077 	struct print_entry *entry;
1078 	unsigned int trace_ctx;
1079 	int alloc;
1080 
1081 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1082 		return 0;
1083 
1084 	if (unlikely(tracing_selftest_running && tr == &global_trace))
1085 		return 0;
1086 
1087 	if (unlikely(tracing_disabled))
1088 		return 0;
1089 
1090 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
1091 
1092 	trace_ctx = tracing_gen_ctx();
1093 	buffer = tr->array_buffer.buffer;
1094 	ring_buffer_nest_start(buffer);
1095 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1096 					    trace_ctx);
1097 	if (!event) {
1098 		size = 0;
1099 		goto out;
1100 	}
1101 
1102 	entry = ring_buffer_event_data(event);
1103 	entry->ip = ip;
1104 
1105 	memcpy(&entry->buf, str, size);
1106 
1107 	/* Add a newline if necessary */
1108 	if (entry->buf[size - 1] != '\n') {
1109 		entry->buf[size] = '\n';
1110 		entry->buf[size + 1] = '\0';
1111 	} else
1112 		entry->buf[size] = '\0';
1113 
1114 	__buffer_unlock_commit(buffer, event);
1115 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1116  out:
1117 	ring_buffer_nest_end(buffer);
1118 	return size;
1119 }
1120 EXPORT_SYMBOL_GPL(__trace_array_puts);
1121 
1122 /**
1123  * __trace_puts - write a constant string into the trace buffer.
1124  * @ip:	   The address of the caller
1125  * @str:   The constant string to write
1126  * @size:  The size of the string.
1127  */
1128 int __trace_puts(unsigned long ip, const char *str, int size)
1129 {
1130 	return __trace_array_puts(printk_trace, ip, str, size);
1131 }
1132 EXPORT_SYMBOL_GPL(__trace_puts);
1133 
1134 /**
1135  * __trace_bputs - write the pointer to a constant string into trace buffer
1136  * @ip:	   The address of the caller
1137  * @str:   The constant string to write to the buffer to
1138  */
1139 int __trace_bputs(unsigned long ip, const char *str)
1140 {
1141 	struct trace_array *tr = READ_ONCE(printk_trace);
1142 	struct ring_buffer_event *event;
1143 	struct trace_buffer *buffer;
1144 	struct bputs_entry *entry;
1145 	unsigned int trace_ctx;
1146 	int size = sizeof(struct bputs_entry);
1147 	int ret = 0;
1148 
1149 	if (!printk_binsafe(tr))
1150 		return __trace_puts(ip, str, strlen(str));
1151 
1152 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1153 		return 0;
1154 
1155 	if (unlikely(tracing_selftest_running || tracing_disabled))
1156 		return 0;
1157 
1158 	trace_ctx = tracing_gen_ctx();
1159 	buffer = tr->array_buffer.buffer;
1160 
1161 	ring_buffer_nest_start(buffer);
1162 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1163 					    trace_ctx);
1164 	if (!event)
1165 		goto out;
1166 
1167 	entry = ring_buffer_event_data(event);
1168 	entry->ip			= ip;
1169 	entry->str			= str;
1170 
1171 	__buffer_unlock_commit(buffer, event);
1172 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1173 
1174 	ret = 1;
1175  out:
1176 	ring_buffer_nest_end(buffer);
1177 	return ret;
1178 }
1179 EXPORT_SYMBOL_GPL(__trace_bputs);
1180 
1181 #ifdef CONFIG_TRACER_SNAPSHOT
1182 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1183 					   void *cond_data)
1184 {
1185 	struct tracer *tracer = tr->current_trace;
1186 	unsigned long flags;
1187 
1188 	if (in_nmi()) {
1189 		trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1190 		trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
1191 		return;
1192 	}
1193 
1194 	if (!tr->allocated_snapshot) {
1195 		trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1196 		trace_array_puts(tr, "*** stopping trace here!   ***\n");
1197 		tracer_tracing_off(tr);
1198 		return;
1199 	}
1200 
1201 	/* Note, snapshot can not be used when the tracer uses it */
1202 	if (tracer->use_max_tr) {
1203 		trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1204 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1205 		return;
1206 	}
1207 
1208 	if (tr->mapped) {
1209 		trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
1210 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1211 		return;
1212 	}
1213 
1214 	local_irq_save(flags);
1215 	update_max_tr(tr, current, smp_processor_id(), cond_data);
1216 	local_irq_restore(flags);
1217 }
1218 
1219 void tracing_snapshot_instance(struct trace_array *tr)
1220 {
1221 	tracing_snapshot_instance_cond(tr, NULL);
1222 }
1223 
1224 /**
1225  * tracing_snapshot - take a snapshot of the current buffer.
1226  *
1227  * This causes a swap between the snapshot buffer and the current live
1228  * tracing buffer. You can use this to take snapshots of the live
1229  * trace when some condition is triggered, but continue to trace.
1230  *
1231  * Note, make sure to allocate the snapshot with either
1232  * a tracing_snapshot_alloc(), or by doing it manually
1233  * with: echo 1 > /sys/kernel/tracing/snapshot
1234  *
1235  * If the snapshot buffer is not allocated, it will stop tracing.
1236  * Basically making a permanent snapshot.
1237  */
1238 void tracing_snapshot(void)
1239 {
1240 	struct trace_array *tr = &global_trace;
1241 
1242 	tracing_snapshot_instance(tr);
1243 }
1244 EXPORT_SYMBOL_GPL(tracing_snapshot);
1245 
1246 /**
1247  * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1248  * @tr:		The tracing instance to snapshot
1249  * @cond_data:	The data to be tested conditionally, and possibly saved
1250  *
1251  * This is the same as tracing_snapshot() except that the snapshot is
1252  * conditional - the snapshot will only happen if the
1253  * cond_snapshot.update() implementation receiving the cond_data
1254  * returns true, which means that the trace array's cond_snapshot
1255  * update() operation used the cond_data to determine whether the
1256  * snapshot should be taken, and if it was, presumably saved it along
1257  * with the snapshot.
1258  */
1259 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1260 {
1261 	tracing_snapshot_instance_cond(tr, cond_data);
1262 }
1263 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1264 
1265 /**
1266  * tracing_cond_snapshot_data - get the user data associated with a snapshot
1267  * @tr:		The tracing instance
1268  *
1269  * When the user enables a conditional snapshot using
1270  * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1271  * with the snapshot.  This accessor is used to retrieve it.
1272  *
1273  * Should not be called from cond_snapshot.update(), since it takes
1274  * the tr->max_lock lock, which the code calling
1275  * cond_snapshot.update() has already done.
1276  *
1277  * Returns the cond_data associated with the trace array's snapshot.
1278  */
1279 void *tracing_cond_snapshot_data(struct trace_array *tr)
1280 {
1281 	void *cond_data = NULL;
1282 
1283 	local_irq_disable();
1284 	arch_spin_lock(&tr->max_lock);
1285 
1286 	if (tr->cond_snapshot)
1287 		cond_data = tr->cond_snapshot->cond_data;
1288 
1289 	arch_spin_unlock(&tr->max_lock);
1290 	local_irq_enable();
1291 
1292 	return cond_data;
1293 }
1294 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1295 
1296 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1297 					struct array_buffer *size_buf, int cpu_id);
1298 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1299 
1300 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1301 {
1302 	int order;
1303 	int ret;
1304 
1305 	if (!tr->allocated_snapshot) {
1306 
1307 		/* Make the snapshot buffer have the same order as main buffer */
1308 		order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1309 		ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1310 		if (ret < 0)
1311 			return ret;
1312 
1313 		/* allocate spare buffer */
1314 		ret = resize_buffer_duplicate_size(&tr->max_buffer,
1315 				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1316 		if (ret < 0)
1317 			return ret;
1318 
1319 		tr->allocated_snapshot = true;
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 static void free_snapshot(struct trace_array *tr)
1326 {
1327 	/*
1328 	 * We don't free the ring buffer. instead, resize it because
1329 	 * The max_tr ring buffer has some state (e.g. ring->clock) and
1330 	 * we want preserve it.
1331 	 */
1332 	ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1333 	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1334 	set_buffer_entries(&tr->max_buffer, 1);
1335 	tracing_reset_online_cpus(&tr->max_buffer);
1336 	tr->allocated_snapshot = false;
1337 }
1338 
1339 static int tracing_arm_snapshot_locked(struct trace_array *tr)
1340 {
1341 	int ret;
1342 
1343 	lockdep_assert_held(&trace_types_lock);
1344 
1345 	spin_lock(&tr->snapshot_trigger_lock);
1346 	if (tr->snapshot == UINT_MAX || tr->mapped) {
1347 		spin_unlock(&tr->snapshot_trigger_lock);
1348 		return -EBUSY;
1349 	}
1350 
1351 	tr->snapshot++;
1352 	spin_unlock(&tr->snapshot_trigger_lock);
1353 
1354 	ret = tracing_alloc_snapshot_instance(tr);
1355 	if (ret) {
1356 		spin_lock(&tr->snapshot_trigger_lock);
1357 		tr->snapshot--;
1358 		spin_unlock(&tr->snapshot_trigger_lock);
1359 	}
1360 
1361 	return ret;
1362 }
1363 
1364 int tracing_arm_snapshot(struct trace_array *tr)
1365 {
1366 	int ret;
1367 
1368 	mutex_lock(&trace_types_lock);
1369 	ret = tracing_arm_snapshot_locked(tr);
1370 	mutex_unlock(&trace_types_lock);
1371 
1372 	return ret;
1373 }
1374 
1375 void tracing_disarm_snapshot(struct trace_array *tr)
1376 {
1377 	spin_lock(&tr->snapshot_trigger_lock);
1378 	if (!WARN_ON(!tr->snapshot))
1379 		tr->snapshot--;
1380 	spin_unlock(&tr->snapshot_trigger_lock);
1381 }
1382 
1383 /**
1384  * tracing_alloc_snapshot - allocate snapshot buffer.
1385  *
1386  * This only allocates the snapshot buffer if it isn't already
1387  * allocated - it doesn't also take a snapshot.
1388  *
1389  * This is meant to be used in cases where the snapshot buffer needs
1390  * to be set up for events that can't sleep but need to be able to
1391  * trigger a snapshot.
1392  */
1393 int tracing_alloc_snapshot(void)
1394 {
1395 	struct trace_array *tr = &global_trace;
1396 	int ret;
1397 
1398 	ret = tracing_alloc_snapshot_instance(tr);
1399 	WARN_ON(ret < 0);
1400 
1401 	return ret;
1402 }
1403 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1404 
1405 /**
1406  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1407  *
1408  * This is similar to tracing_snapshot(), but it will allocate the
1409  * snapshot buffer if it isn't already allocated. Use this only
1410  * where it is safe to sleep, as the allocation may sleep.
1411  *
1412  * This causes a swap between the snapshot buffer and the current live
1413  * tracing buffer. You can use this to take snapshots of the live
1414  * trace when some condition is triggered, but continue to trace.
1415  */
1416 void tracing_snapshot_alloc(void)
1417 {
1418 	int ret;
1419 
1420 	ret = tracing_alloc_snapshot();
1421 	if (ret < 0)
1422 		return;
1423 
1424 	tracing_snapshot();
1425 }
1426 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1427 
1428 /**
1429  * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1430  * @tr:		The tracing instance
1431  * @cond_data:	User data to associate with the snapshot
1432  * @update:	Implementation of the cond_snapshot update function
1433  *
1434  * Check whether the conditional snapshot for the given instance has
1435  * already been enabled, or if the current tracer is already using a
1436  * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1437  * save the cond_data and update function inside.
1438  *
1439  * Returns 0 if successful, error otherwise.
1440  */
1441 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1442 				 cond_update_fn_t update)
1443 {
1444 	struct cond_snapshot *cond_snapshot;
1445 	int ret = 0;
1446 
1447 	cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1448 	if (!cond_snapshot)
1449 		return -ENOMEM;
1450 
1451 	cond_snapshot->cond_data = cond_data;
1452 	cond_snapshot->update = update;
1453 
1454 	mutex_lock(&trace_types_lock);
1455 
1456 	if (tr->current_trace->use_max_tr) {
1457 		ret = -EBUSY;
1458 		goto fail_unlock;
1459 	}
1460 
1461 	/*
1462 	 * The cond_snapshot can only change to NULL without the
1463 	 * trace_types_lock. We don't care if we race with it going
1464 	 * to NULL, but we want to make sure that it's not set to
1465 	 * something other than NULL when we get here, which we can
1466 	 * do safely with only holding the trace_types_lock and not
1467 	 * having to take the max_lock.
1468 	 */
1469 	if (tr->cond_snapshot) {
1470 		ret = -EBUSY;
1471 		goto fail_unlock;
1472 	}
1473 
1474 	ret = tracing_arm_snapshot_locked(tr);
1475 	if (ret)
1476 		goto fail_unlock;
1477 
1478 	local_irq_disable();
1479 	arch_spin_lock(&tr->max_lock);
1480 	tr->cond_snapshot = cond_snapshot;
1481 	arch_spin_unlock(&tr->max_lock);
1482 	local_irq_enable();
1483 
1484 	mutex_unlock(&trace_types_lock);
1485 
1486 	return ret;
1487 
1488  fail_unlock:
1489 	mutex_unlock(&trace_types_lock);
1490 	kfree(cond_snapshot);
1491 	return ret;
1492 }
1493 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1494 
1495 /**
1496  * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1497  * @tr:		The tracing instance
1498  *
1499  * Check whether the conditional snapshot for the given instance is
1500  * enabled; if so, free the cond_snapshot associated with it,
1501  * otherwise return -EINVAL.
1502  *
1503  * Returns 0 if successful, error otherwise.
1504  */
1505 int tracing_snapshot_cond_disable(struct trace_array *tr)
1506 {
1507 	int ret = 0;
1508 
1509 	local_irq_disable();
1510 	arch_spin_lock(&tr->max_lock);
1511 
1512 	if (!tr->cond_snapshot)
1513 		ret = -EINVAL;
1514 	else {
1515 		kfree(tr->cond_snapshot);
1516 		tr->cond_snapshot = NULL;
1517 	}
1518 
1519 	arch_spin_unlock(&tr->max_lock);
1520 	local_irq_enable();
1521 
1522 	tracing_disarm_snapshot(tr);
1523 
1524 	return ret;
1525 }
1526 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1527 #else
1528 void tracing_snapshot(void)
1529 {
1530 	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1531 }
1532 EXPORT_SYMBOL_GPL(tracing_snapshot);
1533 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1534 {
1535 	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1536 }
1537 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1538 int tracing_alloc_snapshot(void)
1539 {
1540 	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1541 	return -ENODEV;
1542 }
1543 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1544 void tracing_snapshot_alloc(void)
1545 {
1546 	/* Give warning */
1547 	tracing_snapshot();
1548 }
1549 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1550 void *tracing_cond_snapshot_data(struct trace_array *tr)
1551 {
1552 	return NULL;
1553 }
1554 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1555 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1556 {
1557 	return -ENODEV;
1558 }
1559 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1560 int tracing_snapshot_cond_disable(struct trace_array *tr)
1561 {
1562 	return false;
1563 }
1564 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1565 #define free_snapshot(tr)	do { } while (0)
1566 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
1567 #endif /* CONFIG_TRACER_SNAPSHOT */
1568 
1569 void tracer_tracing_off(struct trace_array *tr)
1570 {
1571 	if (tr->array_buffer.buffer)
1572 		ring_buffer_record_off(tr->array_buffer.buffer);
1573 	/*
1574 	 * This flag is looked at when buffers haven't been allocated
1575 	 * yet, or by some tracers (like irqsoff), that just want to
1576 	 * know if the ring buffer has been disabled, but it can handle
1577 	 * races of where it gets disabled but we still do a record.
1578 	 * As the check is in the fast path of the tracers, it is more
1579 	 * important to be fast than accurate.
1580 	 */
1581 	tr->buffer_disabled = 1;
1582 	/* Make the flag seen by readers */
1583 	smp_wmb();
1584 }
1585 
1586 /**
1587  * tracing_off - turn off tracing buffers
1588  *
1589  * This function stops the tracing buffers from recording data.
1590  * It does not disable any overhead the tracers themselves may
1591  * be causing. This function simply causes all recording to
1592  * the ring buffers to fail.
1593  */
1594 void tracing_off(void)
1595 {
1596 	tracer_tracing_off(&global_trace);
1597 }
1598 EXPORT_SYMBOL_GPL(tracing_off);
1599 
1600 void disable_trace_on_warning(void)
1601 {
1602 	if (__disable_trace_on_warning) {
1603 		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1604 			"Disabling tracing due to warning\n");
1605 		tracing_off();
1606 	}
1607 }
1608 
1609 /**
1610  * tracer_tracing_is_on - show real state of ring buffer enabled
1611  * @tr : the trace array to know if ring buffer is enabled
1612  *
1613  * Shows real state of the ring buffer if it is enabled or not.
1614  */
1615 bool tracer_tracing_is_on(struct trace_array *tr)
1616 {
1617 	if (tr->array_buffer.buffer)
1618 		return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1619 	return !tr->buffer_disabled;
1620 }
1621 
1622 /**
1623  * tracing_is_on - show state of ring buffers enabled
1624  */
1625 int tracing_is_on(void)
1626 {
1627 	return tracer_tracing_is_on(&global_trace);
1628 }
1629 EXPORT_SYMBOL_GPL(tracing_is_on);
1630 
1631 static int __init set_buf_size(char *str)
1632 {
1633 	unsigned long buf_size;
1634 
1635 	if (!str)
1636 		return 0;
1637 	buf_size = memparse(str, &str);
1638 	/*
1639 	 * nr_entries can not be zero and the startup
1640 	 * tests require some buffer space. Therefore
1641 	 * ensure we have at least 4096 bytes of buffer.
1642 	 */
1643 	trace_buf_size = max(4096UL, buf_size);
1644 	return 1;
1645 }
1646 __setup("trace_buf_size=", set_buf_size);
1647 
1648 static int __init set_tracing_thresh(char *str)
1649 {
1650 	unsigned long threshold;
1651 	int ret;
1652 
1653 	if (!str)
1654 		return 0;
1655 	ret = kstrtoul(str, 0, &threshold);
1656 	if (ret < 0)
1657 		return 0;
1658 	tracing_thresh = threshold * 1000;
1659 	return 1;
1660 }
1661 __setup("tracing_thresh=", set_tracing_thresh);
1662 
1663 unsigned long nsecs_to_usecs(unsigned long nsecs)
1664 {
1665 	return nsecs / 1000;
1666 }
1667 
1668 /*
1669  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1670  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1671  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1672  * of strings in the order that the evals (enum) were defined.
1673  */
1674 #undef C
1675 #define C(a, b) b
1676 
1677 /* These must match the bit positions in trace_iterator_flags */
1678 static const char *trace_options[] = {
1679 	TRACE_FLAGS
1680 	NULL
1681 };
1682 
1683 static struct {
1684 	u64 (*func)(void);
1685 	const char *name;
1686 	int in_ns;		/* is this clock in nanoseconds? */
1687 } trace_clocks[] = {
1688 	{ trace_clock_local,		"local",	1 },
1689 	{ trace_clock_global,		"global",	1 },
1690 	{ trace_clock_counter,		"counter",	0 },
1691 	{ trace_clock_jiffies,		"uptime",	0 },
1692 	{ trace_clock,			"perf",		1 },
1693 	{ ktime_get_mono_fast_ns,	"mono",		1 },
1694 	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
1695 	{ ktime_get_boot_fast_ns,	"boot",		1 },
1696 	{ ktime_get_tai_fast_ns,	"tai",		1 },
1697 	ARCH_TRACE_CLOCKS
1698 };
1699 
1700 bool trace_clock_in_ns(struct trace_array *tr)
1701 {
1702 	if (trace_clocks[tr->clock_id].in_ns)
1703 		return true;
1704 
1705 	return false;
1706 }
1707 
1708 /*
1709  * trace_parser_get_init - gets the buffer for trace parser
1710  */
1711 int trace_parser_get_init(struct trace_parser *parser, int size)
1712 {
1713 	memset(parser, 0, sizeof(*parser));
1714 
1715 	parser->buffer = kmalloc(size, GFP_KERNEL);
1716 	if (!parser->buffer)
1717 		return 1;
1718 
1719 	parser->size = size;
1720 	return 0;
1721 }
1722 
1723 /*
1724  * trace_parser_put - frees the buffer for trace parser
1725  */
1726 void trace_parser_put(struct trace_parser *parser)
1727 {
1728 	kfree(parser->buffer);
1729 	parser->buffer = NULL;
1730 }
1731 
1732 /*
1733  * trace_get_user - reads the user input string separated by  space
1734  * (matched by isspace(ch))
1735  *
1736  * For each string found the 'struct trace_parser' is updated,
1737  * and the function returns.
1738  *
1739  * Returns number of bytes read.
1740  *
1741  * See kernel/trace/trace.h for 'struct trace_parser' details.
1742  */
1743 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1744 	size_t cnt, loff_t *ppos)
1745 {
1746 	char ch;
1747 	size_t read = 0;
1748 	ssize_t ret;
1749 
1750 	if (!*ppos)
1751 		trace_parser_clear(parser);
1752 
1753 	ret = get_user(ch, ubuf++);
1754 	if (ret)
1755 		goto out;
1756 
1757 	read++;
1758 	cnt--;
1759 
1760 	/*
1761 	 * The parser is not finished with the last write,
1762 	 * continue reading the user input without skipping spaces.
1763 	 */
1764 	if (!parser->cont) {
1765 		/* skip white space */
1766 		while (cnt && isspace(ch)) {
1767 			ret = get_user(ch, ubuf++);
1768 			if (ret)
1769 				goto out;
1770 			read++;
1771 			cnt--;
1772 		}
1773 
1774 		parser->idx = 0;
1775 
1776 		/* only spaces were written */
1777 		if (isspace(ch) || !ch) {
1778 			*ppos += read;
1779 			ret = read;
1780 			goto out;
1781 		}
1782 	}
1783 
1784 	/* read the non-space input */
1785 	while (cnt && !isspace(ch) && ch) {
1786 		if (parser->idx < parser->size - 1)
1787 			parser->buffer[parser->idx++] = ch;
1788 		else {
1789 			ret = -EINVAL;
1790 			goto out;
1791 		}
1792 		ret = get_user(ch, ubuf++);
1793 		if (ret)
1794 			goto out;
1795 		read++;
1796 		cnt--;
1797 	}
1798 
1799 	/* We either got finished input or we have to wait for another call. */
1800 	if (isspace(ch) || !ch) {
1801 		parser->buffer[parser->idx] = 0;
1802 		parser->cont = false;
1803 	} else if (parser->idx < parser->size - 1) {
1804 		parser->cont = true;
1805 		parser->buffer[parser->idx++] = ch;
1806 		/* Make sure the parsed string always terminates with '\0'. */
1807 		parser->buffer[parser->idx] = 0;
1808 	} else {
1809 		ret = -EINVAL;
1810 		goto out;
1811 	}
1812 
1813 	*ppos += read;
1814 	ret = read;
1815 
1816 out:
1817 	return ret;
1818 }
1819 
1820 /* TODO add a seq_buf_to_buffer() */
1821 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1822 {
1823 	int len;
1824 
1825 	if (trace_seq_used(s) <= s->readpos)
1826 		return -EBUSY;
1827 
1828 	len = trace_seq_used(s) - s->readpos;
1829 	if (cnt > len)
1830 		cnt = len;
1831 	memcpy(buf, s->buffer + s->readpos, cnt);
1832 
1833 	s->readpos += cnt;
1834 	return cnt;
1835 }
1836 
1837 unsigned long __read_mostly	tracing_thresh;
1838 
1839 #ifdef CONFIG_TRACER_MAX_TRACE
1840 static const struct file_operations tracing_max_lat_fops;
1841 
1842 #ifdef LATENCY_FS_NOTIFY
1843 
1844 static struct workqueue_struct *fsnotify_wq;
1845 
1846 static void latency_fsnotify_workfn(struct work_struct *work)
1847 {
1848 	struct trace_array *tr = container_of(work, struct trace_array,
1849 					      fsnotify_work);
1850 	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1851 }
1852 
1853 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1854 {
1855 	struct trace_array *tr = container_of(iwork, struct trace_array,
1856 					      fsnotify_irqwork);
1857 	queue_work(fsnotify_wq, &tr->fsnotify_work);
1858 }
1859 
1860 static void trace_create_maxlat_file(struct trace_array *tr,
1861 				     struct dentry *d_tracer)
1862 {
1863 	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1864 	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1865 	tr->d_max_latency = trace_create_file("tracing_max_latency",
1866 					      TRACE_MODE_WRITE,
1867 					      d_tracer, tr,
1868 					      &tracing_max_lat_fops);
1869 }
1870 
1871 __init static int latency_fsnotify_init(void)
1872 {
1873 	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1874 				      WQ_UNBOUND | WQ_HIGHPRI, 0);
1875 	if (!fsnotify_wq) {
1876 		pr_err("Unable to allocate tr_max_lat_wq\n");
1877 		return -ENOMEM;
1878 	}
1879 	return 0;
1880 }
1881 
1882 late_initcall_sync(latency_fsnotify_init);
1883 
1884 void latency_fsnotify(struct trace_array *tr)
1885 {
1886 	if (!fsnotify_wq)
1887 		return;
1888 	/*
1889 	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1890 	 * possible that we are called from __schedule() or do_idle(), which
1891 	 * could cause a deadlock.
1892 	 */
1893 	irq_work_queue(&tr->fsnotify_irqwork);
1894 }
1895 
1896 #else /* !LATENCY_FS_NOTIFY */
1897 
1898 #define trace_create_maxlat_file(tr, d_tracer)				\
1899 	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
1900 			  d_tracer, tr, &tracing_max_lat_fops)
1901 
1902 #endif
1903 
1904 /*
1905  * Copy the new maximum trace into the separate maximum-trace
1906  * structure. (this way the maximum trace is permanently saved,
1907  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1908  */
1909 static void
1910 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1911 {
1912 	struct array_buffer *trace_buf = &tr->array_buffer;
1913 	struct array_buffer *max_buf = &tr->max_buffer;
1914 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1915 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1916 
1917 	max_buf->cpu = cpu;
1918 	max_buf->time_start = data->preempt_timestamp;
1919 
1920 	max_data->saved_latency = tr->max_latency;
1921 	max_data->critical_start = data->critical_start;
1922 	max_data->critical_end = data->critical_end;
1923 
1924 	strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1925 	max_data->pid = tsk->pid;
1926 	/*
1927 	 * If tsk == current, then use current_uid(), as that does not use
1928 	 * RCU. The irq tracer can be called out of RCU scope.
1929 	 */
1930 	if (tsk == current)
1931 		max_data->uid = current_uid();
1932 	else
1933 		max_data->uid = task_uid(tsk);
1934 
1935 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1936 	max_data->policy = tsk->policy;
1937 	max_data->rt_priority = tsk->rt_priority;
1938 
1939 	/* record this tasks comm */
1940 	tracing_record_cmdline(tsk);
1941 	latency_fsnotify(tr);
1942 }
1943 
1944 /**
1945  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1946  * @tr: tracer
1947  * @tsk: the task with the latency
1948  * @cpu: The cpu that initiated the trace.
1949  * @cond_data: User data associated with a conditional snapshot
1950  *
1951  * Flip the buffers between the @tr and the max_tr and record information
1952  * about which task was the cause of this latency.
1953  */
1954 void
1955 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1956 	      void *cond_data)
1957 {
1958 	if (tr->stop_count)
1959 		return;
1960 
1961 	WARN_ON_ONCE(!irqs_disabled());
1962 
1963 	if (!tr->allocated_snapshot) {
1964 		/* Only the nop tracer should hit this when disabling */
1965 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1966 		return;
1967 	}
1968 
1969 	arch_spin_lock(&tr->max_lock);
1970 
1971 	/* Inherit the recordable setting from array_buffer */
1972 	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1973 		ring_buffer_record_on(tr->max_buffer.buffer);
1974 	else
1975 		ring_buffer_record_off(tr->max_buffer.buffer);
1976 
1977 #ifdef CONFIG_TRACER_SNAPSHOT
1978 	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1979 		arch_spin_unlock(&tr->max_lock);
1980 		return;
1981 	}
1982 #endif
1983 	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1984 
1985 	__update_max_tr(tr, tsk, cpu);
1986 
1987 	arch_spin_unlock(&tr->max_lock);
1988 
1989 	/* Any waiters on the old snapshot buffer need to wake up */
1990 	ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1991 }
1992 
1993 /**
1994  * update_max_tr_single - only copy one trace over, and reset the rest
1995  * @tr: tracer
1996  * @tsk: task with the latency
1997  * @cpu: the cpu of the buffer to copy.
1998  *
1999  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
2000  */
2001 void
2002 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
2003 {
2004 	int ret;
2005 
2006 	if (tr->stop_count)
2007 		return;
2008 
2009 	WARN_ON_ONCE(!irqs_disabled());
2010 	if (!tr->allocated_snapshot) {
2011 		/* Only the nop tracer should hit this when disabling */
2012 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
2013 		return;
2014 	}
2015 
2016 	arch_spin_lock(&tr->max_lock);
2017 
2018 	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
2019 
2020 	if (ret == -EBUSY) {
2021 		/*
2022 		 * We failed to swap the buffer due to a commit taking
2023 		 * place on this CPU. We fail to record, but we reset
2024 		 * the max trace buffer (no one writes directly to it)
2025 		 * and flag that it failed.
2026 		 * Another reason is resize is in progress.
2027 		 */
2028 		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
2029 			"Failed to swap buffers due to commit or resize in progress\n");
2030 	}
2031 
2032 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
2033 
2034 	__update_max_tr(tr, tsk, cpu);
2035 	arch_spin_unlock(&tr->max_lock);
2036 }
2037 
2038 #endif /* CONFIG_TRACER_MAX_TRACE */
2039 
2040 struct pipe_wait {
2041 	struct trace_iterator		*iter;
2042 	int				wait_index;
2043 };
2044 
2045 static bool wait_pipe_cond(void *data)
2046 {
2047 	struct pipe_wait *pwait = data;
2048 	struct trace_iterator *iter = pwait->iter;
2049 
2050 	if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
2051 		return true;
2052 
2053 	return iter->closed;
2054 }
2055 
2056 static int wait_on_pipe(struct trace_iterator *iter, int full)
2057 {
2058 	struct pipe_wait pwait;
2059 	int ret;
2060 
2061 	/* Iterators are static, they should be filled or empty */
2062 	if (trace_buffer_iter(iter, iter->cpu_file))
2063 		return 0;
2064 
2065 	pwait.wait_index = atomic_read_acquire(&iter->wait_index);
2066 	pwait.iter = iter;
2067 
2068 	ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
2069 			       wait_pipe_cond, &pwait);
2070 
2071 #ifdef CONFIG_TRACER_MAX_TRACE
2072 	/*
2073 	 * Make sure this is still the snapshot buffer, as if a snapshot were
2074 	 * to happen, this would now be the main buffer.
2075 	 */
2076 	if (iter->snapshot)
2077 		iter->array_buffer = &iter->tr->max_buffer;
2078 #endif
2079 	return ret;
2080 }
2081 
2082 #ifdef CONFIG_FTRACE_STARTUP_TEST
2083 static bool selftests_can_run;
2084 
2085 struct trace_selftests {
2086 	struct list_head		list;
2087 	struct tracer			*type;
2088 };
2089 
2090 static LIST_HEAD(postponed_selftests);
2091 
2092 static int save_selftest(struct tracer *type)
2093 {
2094 	struct trace_selftests *selftest;
2095 
2096 	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
2097 	if (!selftest)
2098 		return -ENOMEM;
2099 
2100 	selftest->type = type;
2101 	list_add(&selftest->list, &postponed_selftests);
2102 	return 0;
2103 }
2104 
2105 static int run_tracer_selftest(struct tracer *type)
2106 {
2107 	struct trace_array *tr = &global_trace;
2108 	struct tracer *saved_tracer = tr->current_trace;
2109 	int ret;
2110 
2111 	if (!type->selftest || tracing_selftest_disabled)
2112 		return 0;
2113 
2114 	/*
2115 	 * If a tracer registers early in boot up (before scheduling is
2116 	 * initialized and such), then do not run its selftests yet.
2117 	 * Instead, run it a little later in the boot process.
2118 	 */
2119 	if (!selftests_can_run)
2120 		return save_selftest(type);
2121 
2122 	if (!tracing_is_on()) {
2123 		pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2124 			type->name);
2125 		return 0;
2126 	}
2127 
2128 	/*
2129 	 * Run a selftest on this tracer.
2130 	 * Here we reset the trace buffer, and set the current
2131 	 * tracer to be this tracer. The tracer can then run some
2132 	 * internal tracing to verify that everything is in order.
2133 	 * If we fail, we do not register this tracer.
2134 	 */
2135 	tracing_reset_online_cpus(&tr->array_buffer);
2136 
2137 	tr->current_trace = type;
2138 
2139 #ifdef CONFIG_TRACER_MAX_TRACE
2140 	if (type->use_max_tr) {
2141 		/* If we expanded the buffers, make sure the max is expanded too */
2142 		if (tr->ring_buffer_expanded)
2143 			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2144 					   RING_BUFFER_ALL_CPUS);
2145 		tr->allocated_snapshot = true;
2146 	}
2147 #endif
2148 
2149 	/* the test is responsible for initializing and enabling */
2150 	pr_info("Testing tracer %s: ", type->name);
2151 	ret = type->selftest(type, tr);
2152 	/* the test is responsible for resetting too */
2153 	tr->current_trace = saved_tracer;
2154 	if (ret) {
2155 		printk(KERN_CONT "FAILED!\n");
2156 		/* Add the warning after printing 'FAILED' */
2157 		WARN_ON(1);
2158 		return -1;
2159 	}
2160 	/* Only reset on passing, to avoid touching corrupted buffers */
2161 	tracing_reset_online_cpus(&tr->array_buffer);
2162 
2163 #ifdef CONFIG_TRACER_MAX_TRACE
2164 	if (type->use_max_tr) {
2165 		tr->allocated_snapshot = false;
2166 
2167 		/* Shrink the max buffer again */
2168 		if (tr->ring_buffer_expanded)
2169 			ring_buffer_resize(tr->max_buffer.buffer, 1,
2170 					   RING_BUFFER_ALL_CPUS);
2171 	}
2172 #endif
2173 
2174 	printk(KERN_CONT "PASSED\n");
2175 	return 0;
2176 }
2177 
2178 static int do_run_tracer_selftest(struct tracer *type)
2179 {
2180 	int ret;
2181 
2182 	/*
2183 	 * Tests can take a long time, especially if they are run one after the
2184 	 * other, as does happen during bootup when all the tracers are
2185 	 * registered. This could cause the soft lockup watchdog to trigger.
2186 	 */
2187 	cond_resched();
2188 
2189 	tracing_selftest_running = true;
2190 	ret = run_tracer_selftest(type);
2191 	tracing_selftest_running = false;
2192 
2193 	return ret;
2194 }
2195 
2196 static __init int init_trace_selftests(void)
2197 {
2198 	struct trace_selftests *p, *n;
2199 	struct tracer *t, **last;
2200 	int ret;
2201 
2202 	selftests_can_run = true;
2203 
2204 	mutex_lock(&trace_types_lock);
2205 
2206 	if (list_empty(&postponed_selftests))
2207 		goto out;
2208 
2209 	pr_info("Running postponed tracer tests:\n");
2210 
2211 	tracing_selftest_running = true;
2212 	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2213 		/* This loop can take minutes when sanitizers are enabled, so
2214 		 * lets make sure we allow RCU processing.
2215 		 */
2216 		cond_resched();
2217 		ret = run_tracer_selftest(p->type);
2218 		/* If the test fails, then warn and remove from available_tracers */
2219 		if (ret < 0) {
2220 			WARN(1, "tracer: %s failed selftest, disabling\n",
2221 			     p->type->name);
2222 			last = &trace_types;
2223 			for (t = trace_types; t; t = t->next) {
2224 				if (t == p->type) {
2225 					*last = t->next;
2226 					break;
2227 				}
2228 				last = &t->next;
2229 			}
2230 		}
2231 		list_del(&p->list);
2232 		kfree(p);
2233 	}
2234 	tracing_selftest_running = false;
2235 
2236  out:
2237 	mutex_unlock(&trace_types_lock);
2238 
2239 	return 0;
2240 }
2241 core_initcall(init_trace_selftests);
2242 #else
2243 static inline int do_run_tracer_selftest(struct tracer *type)
2244 {
2245 	return 0;
2246 }
2247 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2248 
2249 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2250 
2251 static void __init apply_trace_boot_options(void);
2252 
2253 /**
2254  * register_tracer - register a tracer with the ftrace system.
2255  * @type: the plugin for the tracer
2256  *
2257  * Register a new plugin tracer.
2258  */
2259 int __init register_tracer(struct tracer *type)
2260 {
2261 	struct tracer *t;
2262 	int ret = 0;
2263 
2264 	if (!type->name) {
2265 		pr_info("Tracer must have a name\n");
2266 		return -1;
2267 	}
2268 
2269 	if (strlen(type->name) >= MAX_TRACER_SIZE) {
2270 		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2271 		return -1;
2272 	}
2273 
2274 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
2275 		pr_warn("Can not register tracer %s due to lockdown\n",
2276 			   type->name);
2277 		return -EPERM;
2278 	}
2279 
2280 	mutex_lock(&trace_types_lock);
2281 
2282 	for (t = trace_types; t; t = t->next) {
2283 		if (strcmp(type->name, t->name) == 0) {
2284 			/* already found */
2285 			pr_info("Tracer %s already registered\n",
2286 				type->name);
2287 			ret = -1;
2288 			goto out;
2289 		}
2290 	}
2291 
2292 	if (!type->set_flag)
2293 		type->set_flag = &dummy_set_flag;
2294 	if (!type->flags) {
2295 		/*allocate a dummy tracer_flags*/
2296 		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2297 		if (!type->flags) {
2298 			ret = -ENOMEM;
2299 			goto out;
2300 		}
2301 		type->flags->val = 0;
2302 		type->flags->opts = dummy_tracer_opt;
2303 	} else
2304 		if (!type->flags->opts)
2305 			type->flags->opts = dummy_tracer_opt;
2306 
2307 	/* store the tracer for __set_tracer_option */
2308 	type->flags->trace = type;
2309 
2310 	ret = do_run_tracer_selftest(type);
2311 	if (ret < 0)
2312 		goto out;
2313 
2314 	type->next = trace_types;
2315 	trace_types = type;
2316 	add_tracer_options(&global_trace, type);
2317 
2318  out:
2319 	mutex_unlock(&trace_types_lock);
2320 
2321 	if (ret || !default_bootup_tracer)
2322 		goto out_unlock;
2323 
2324 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2325 		goto out_unlock;
2326 
2327 	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2328 	/* Do we want this tracer to start on bootup? */
2329 	tracing_set_tracer(&global_trace, type->name);
2330 	default_bootup_tracer = NULL;
2331 
2332 	apply_trace_boot_options();
2333 
2334 	/* disable other selftests, since this will break it. */
2335 	disable_tracing_selftest("running a tracer");
2336 
2337  out_unlock:
2338 	return ret;
2339 }
2340 
2341 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2342 {
2343 	struct trace_buffer *buffer = buf->buffer;
2344 
2345 	if (!buffer)
2346 		return;
2347 
2348 	ring_buffer_record_disable(buffer);
2349 
2350 	/* Make sure all commits have finished */
2351 	synchronize_rcu();
2352 	ring_buffer_reset_cpu(buffer, cpu);
2353 
2354 	ring_buffer_record_enable(buffer);
2355 }
2356 
2357 void tracing_reset_online_cpus(struct array_buffer *buf)
2358 {
2359 	struct trace_buffer *buffer = buf->buffer;
2360 
2361 	if (!buffer)
2362 		return;
2363 
2364 	ring_buffer_record_disable(buffer);
2365 
2366 	/* Make sure all commits have finished */
2367 	synchronize_rcu();
2368 
2369 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2370 
2371 	ring_buffer_reset_online_cpus(buffer);
2372 
2373 	ring_buffer_record_enable(buffer);
2374 }
2375 
2376 /* Must have trace_types_lock held */
2377 void tracing_reset_all_online_cpus_unlocked(void)
2378 {
2379 	struct trace_array *tr;
2380 
2381 	lockdep_assert_held(&trace_types_lock);
2382 
2383 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2384 		if (!tr->clear_trace)
2385 			continue;
2386 		tr->clear_trace = false;
2387 		tracing_reset_online_cpus(&tr->array_buffer);
2388 #ifdef CONFIG_TRACER_MAX_TRACE
2389 		tracing_reset_online_cpus(&tr->max_buffer);
2390 #endif
2391 	}
2392 }
2393 
2394 void tracing_reset_all_online_cpus(void)
2395 {
2396 	mutex_lock(&trace_types_lock);
2397 	tracing_reset_all_online_cpus_unlocked();
2398 	mutex_unlock(&trace_types_lock);
2399 }
2400 
2401 int is_tracing_stopped(void)
2402 {
2403 	return global_trace.stop_count;
2404 }
2405 
2406 static void tracing_start_tr(struct trace_array *tr)
2407 {
2408 	struct trace_buffer *buffer;
2409 	unsigned long flags;
2410 
2411 	if (tracing_disabled)
2412 		return;
2413 
2414 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2415 	if (--tr->stop_count) {
2416 		if (WARN_ON_ONCE(tr->stop_count < 0)) {
2417 			/* Someone screwed up their debugging */
2418 			tr->stop_count = 0;
2419 		}
2420 		goto out;
2421 	}
2422 
2423 	/* Prevent the buffers from switching */
2424 	arch_spin_lock(&tr->max_lock);
2425 
2426 	buffer = tr->array_buffer.buffer;
2427 	if (buffer)
2428 		ring_buffer_record_enable(buffer);
2429 
2430 #ifdef CONFIG_TRACER_MAX_TRACE
2431 	buffer = tr->max_buffer.buffer;
2432 	if (buffer)
2433 		ring_buffer_record_enable(buffer);
2434 #endif
2435 
2436 	arch_spin_unlock(&tr->max_lock);
2437 
2438  out:
2439 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2440 }
2441 
2442 /**
2443  * tracing_start - quick start of the tracer
2444  *
2445  * If tracing is enabled but was stopped by tracing_stop,
2446  * this will start the tracer back up.
2447  */
2448 void tracing_start(void)
2449 
2450 {
2451 	return tracing_start_tr(&global_trace);
2452 }
2453 
2454 static void tracing_stop_tr(struct trace_array *tr)
2455 {
2456 	struct trace_buffer *buffer;
2457 	unsigned long flags;
2458 
2459 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2460 	if (tr->stop_count++)
2461 		goto out;
2462 
2463 	/* Prevent the buffers from switching */
2464 	arch_spin_lock(&tr->max_lock);
2465 
2466 	buffer = tr->array_buffer.buffer;
2467 	if (buffer)
2468 		ring_buffer_record_disable(buffer);
2469 
2470 #ifdef CONFIG_TRACER_MAX_TRACE
2471 	buffer = tr->max_buffer.buffer;
2472 	if (buffer)
2473 		ring_buffer_record_disable(buffer);
2474 #endif
2475 
2476 	arch_spin_unlock(&tr->max_lock);
2477 
2478  out:
2479 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2480 }
2481 
2482 /**
2483  * tracing_stop - quick stop of the tracer
2484  *
2485  * Light weight way to stop tracing. Use in conjunction with
2486  * tracing_start.
2487  */
2488 void tracing_stop(void)
2489 {
2490 	return tracing_stop_tr(&global_trace);
2491 }
2492 
2493 /*
2494  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2495  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2496  * simplifies those functions and keeps them in sync.
2497  */
2498 enum print_line_t trace_handle_return(struct trace_seq *s)
2499 {
2500 	return trace_seq_has_overflowed(s) ?
2501 		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2502 }
2503 EXPORT_SYMBOL_GPL(trace_handle_return);
2504 
2505 static unsigned short migration_disable_value(void)
2506 {
2507 #if defined(CONFIG_SMP)
2508 	return current->migration_disabled;
2509 #else
2510 	return 0;
2511 #endif
2512 }
2513 
2514 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2515 {
2516 	unsigned int trace_flags = irqs_status;
2517 	unsigned int pc;
2518 
2519 	pc = preempt_count();
2520 
2521 	if (pc & NMI_MASK)
2522 		trace_flags |= TRACE_FLAG_NMI;
2523 	if (pc & HARDIRQ_MASK)
2524 		trace_flags |= TRACE_FLAG_HARDIRQ;
2525 	if (in_serving_softirq())
2526 		trace_flags |= TRACE_FLAG_SOFTIRQ;
2527 	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2528 		trace_flags |= TRACE_FLAG_BH_OFF;
2529 
2530 	if (tif_need_resched())
2531 		trace_flags |= TRACE_FLAG_NEED_RESCHED;
2532 	if (test_preempt_need_resched())
2533 		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2534 	return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2535 		(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2536 }
2537 
2538 struct ring_buffer_event *
2539 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2540 			  int type,
2541 			  unsigned long len,
2542 			  unsigned int trace_ctx)
2543 {
2544 	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2545 }
2546 
2547 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2548 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2549 static int trace_buffered_event_ref;
2550 
2551 /**
2552  * trace_buffered_event_enable - enable buffering events
2553  *
2554  * When events are being filtered, it is quicker to use a temporary
2555  * buffer to write the event data into if there's a likely chance
2556  * that it will not be committed. The discard of the ring buffer
2557  * is not as fast as committing, and is much slower than copying
2558  * a commit.
2559  *
2560  * When an event is to be filtered, allocate per cpu buffers to
2561  * write the event data into, and if the event is filtered and discarded
2562  * it is simply dropped, otherwise, the entire data is to be committed
2563  * in one shot.
2564  */
2565 void trace_buffered_event_enable(void)
2566 {
2567 	struct ring_buffer_event *event;
2568 	struct page *page;
2569 	int cpu;
2570 
2571 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2572 
2573 	if (trace_buffered_event_ref++)
2574 		return;
2575 
2576 	for_each_tracing_cpu(cpu) {
2577 		page = alloc_pages_node(cpu_to_node(cpu),
2578 					GFP_KERNEL | __GFP_NORETRY, 0);
2579 		/* This is just an optimization and can handle failures */
2580 		if (!page) {
2581 			pr_err("Failed to allocate event buffer\n");
2582 			break;
2583 		}
2584 
2585 		event = page_address(page);
2586 		memset(event, 0, sizeof(*event));
2587 
2588 		per_cpu(trace_buffered_event, cpu) = event;
2589 
2590 		preempt_disable();
2591 		if (cpu == smp_processor_id() &&
2592 		    __this_cpu_read(trace_buffered_event) !=
2593 		    per_cpu(trace_buffered_event, cpu))
2594 			WARN_ON_ONCE(1);
2595 		preempt_enable();
2596 	}
2597 }
2598 
2599 static void enable_trace_buffered_event(void *data)
2600 {
2601 	/* Probably not needed, but do it anyway */
2602 	smp_rmb();
2603 	this_cpu_dec(trace_buffered_event_cnt);
2604 }
2605 
2606 static void disable_trace_buffered_event(void *data)
2607 {
2608 	this_cpu_inc(trace_buffered_event_cnt);
2609 }
2610 
2611 /**
2612  * trace_buffered_event_disable - disable buffering events
2613  *
2614  * When a filter is removed, it is faster to not use the buffered
2615  * events, and to commit directly into the ring buffer. Free up
2616  * the temp buffers when there are no more users. This requires
2617  * special synchronization with current events.
2618  */
2619 void trace_buffered_event_disable(void)
2620 {
2621 	int cpu;
2622 
2623 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2624 
2625 	if (WARN_ON_ONCE(!trace_buffered_event_ref))
2626 		return;
2627 
2628 	if (--trace_buffered_event_ref)
2629 		return;
2630 
2631 	/* For each CPU, set the buffer as used. */
2632 	on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2633 			 NULL, true);
2634 
2635 	/* Wait for all current users to finish */
2636 	synchronize_rcu();
2637 
2638 	for_each_tracing_cpu(cpu) {
2639 		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2640 		per_cpu(trace_buffered_event, cpu) = NULL;
2641 	}
2642 
2643 	/*
2644 	 * Wait for all CPUs that potentially started checking if they can use
2645 	 * their event buffer only after the previous synchronize_rcu() call and
2646 	 * they still read a valid pointer from trace_buffered_event. It must be
2647 	 * ensured they don't see cleared trace_buffered_event_cnt else they
2648 	 * could wrongly decide to use the pointed-to buffer which is now freed.
2649 	 */
2650 	synchronize_rcu();
2651 
2652 	/* For each CPU, relinquish the buffer */
2653 	on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2654 			 true);
2655 }
2656 
2657 static struct trace_buffer *temp_buffer;
2658 
2659 struct ring_buffer_event *
2660 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2661 			  struct trace_event_file *trace_file,
2662 			  int type, unsigned long len,
2663 			  unsigned int trace_ctx)
2664 {
2665 	struct ring_buffer_event *entry;
2666 	struct trace_array *tr = trace_file->tr;
2667 	int val;
2668 
2669 	*current_rb = tr->array_buffer.buffer;
2670 
2671 	if (!tr->no_filter_buffering_ref &&
2672 	    (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2673 		preempt_disable_notrace();
2674 		/*
2675 		 * Filtering is on, so try to use the per cpu buffer first.
2676 		 * This buffer will simulate a ring_buffer_event,
2677 		 * where the type_len is zero and the array[0] will
2678 		 * hold the full length.
2679 		 * (see include/linux/ring-buffer.h for details on
2680 		 *  how the ring_buffer_event is structured).
2681 		 *
2682 		 * Using a temp buffer during filtering and copying it
2683 		 * on a matched filter is quicker than writing directly
2684 		 * into the ring buffer and then discarding it when
2685 		 * it doesn't match. That is because the discard
2686 		 * requires several atomic operations to get right.
2687 		 * Copying on match and doing nothing on a failed match
2688 		 * is still quicker than no copy on match, but having
2689 		 * to discard out of the ring buffer on a failed match.
2690 		 */
2691 		if ((entry = __this_cpu_read(trace_buffered_event))) {
2692 			int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2693 
2694 			val = this_cpu_inc_return(trace_buffered_event_cnt);
2695 
2696 			/*
2697 			 * Preemption is disabled, but interrupts and NMIs
2698 			 * can still come in now. If that happens after
2699 			 * the above increment, then it will have to go
2700 			 * back to the old method of allocating the event
2701 			 * on the ring buffer, and if the filter fails, it
2702 			 * will have to call ring_buffer_discard_commit()
2703 			 * to remove it.
2704 			 *
2705 			 * Need to also check the unlikely case that the
2706 			 * length is bigger than the temp buffer size.
2707 			 * If that happens, then the reserve is pretty much
2708 			 * guaranteed to fail, as the ring buffer currently
2709 			 * only allows events less than a page. But that may
2710 			 * change in the future, so let the ring buffer reserve
2711 			 * handle the failure in that case.
2712 			 */
2713 			if (val == 1 && likely(len <= max_len)) {
2714 				trace_event_setup(entry, type, trace_ctx);
2715 				entry->array[0] = len;
2716 				/* Return with preemption disabled */
2717 				return entry;
2718 			}
2719 			this_cpu_dec(trace_buffered_event_cnt);
2720 		}
2721 		/* __trace_buffer_lock_reserve() disables preemption */
2722 		preempt_enable_notrace();
2723 	}
2724 
2725 	entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2726 					    trace_ctx);
2727 	/*
2728 	 * If tracing is off, but we have triggers enabled
2729 	 * we still need to look at the event data. Use the temp_buffer
2730 	 * to store the trace event for the trigger to use. It's recursive
2731 	 * safe and will not be recorded anywhere.
2732 	 */
2733 	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2734 		*current_rb = temp_buffer;
2735 		entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2736 						    trace_ctx);
2737 	}
2738 	return entry;
2739 }
2740 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2741 
2742 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2743 static DEFINE_MUTEX(tracepoint_printk_mutex);
2744 
2745 static void output_printk(struct trace_event_buffer *fbuffer)
2746 {
2747 	struct trace_event_call *event_call;
2748 	struct trace_event_file *file;
2749 	struct trace_event *event;
2750 	unsigned long flags;
2751 	struct trace_iterator *iter = tracepoint_print_iter;
2752 
2753 	/* We should never get here if iter is NULL */
2754 	if (WARN_ON_ONCE(!iter))
2755 		return;
2756 
2757 	event_call = fbuffer->trace_file->event_call;
2758 	if (!event_call || !event_call->event.funcs ||
2759 	    !event_call->event.funcs->trace)
2760 		return;
2761 
2762 	file = fbuffer->trace_file;
2763 	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2764 	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2765 	     !filter_match_preds(file->filter, fbuffer->entry)))
2766 		return;
2767 
2768 	event = &fbuffer->trace_file->event_call->event;
2769 
2770 	raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2771 	trace_seq_init(&iter->seq);
2772 	iter->ent = fbuffer->entry;
2773 	event_call->event.funcs->trace(iter, 0, event);
2774 	trace_seq_putc(&iter->seq, 0);
2775 	printk("%s", iter->seq.buffer);
2776 
2777 	raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2778 }
2779 
2780 int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
2781 			     void *buffer, size_t *lenp,
2782 			     loff_t *ppos)
2783 {
2784 	int save_tracepoint_printk;
2785 	int ret;
2786 
2787 	mutex_lock(&tracepoint_printk_mutex);
2788 	save_tracepoint_printk = tracepoint_printk;
2789 
2790 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2791 
2792 	/*
2793 	 * This will force exiting early, as tracepoint_printk
2794 	 * is always zero when tracepoint_printk_iter is not allocated
2795 	 */
2796 	if (!tracepoint_print_iter)
2797 		tracepoint_printk = 0;
2798 
2799 	if (save_tracepoint_printk == tracepoint_printk)
2800 		goto out;
2801 
2802 	if (tracepoint_printk)
2803 		static_key_enable(&tracepoint_printk_key.key);
2804 	else
2805 		static_key_disable(&tracepoint_printk_key.key);
2806 
2807  out:
2808 	mutex_unlock(&tracepoint_printk_mutex);
2809 
2810 	return ret;
2811 }
2812 
2813 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2814 {
2815 	enum event_trigger_type tt = ETT_NONE;
2816 	struct trace_event_file *file = fbuffer->trace_file;
2817 
2818 	if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2819 			fbuffer->entry, &tt))
2820 		goto discard;
2821 
2822 	if (static_key_false(&tracepoint_printk_key.key))
2823 		output_printk(fbuffer);
2824 
2825 	if (static_branch_unlikely(&trace_event_exports_enabled))
2826 		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2827 
2828 	trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2829 			fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2830 
2831 discard:
2832 	if (tt)
2833 		event_triggers_post_call(file, tt);
2834 
2835 }
2836 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2837 
2838 /*
2839  * Skip 3:
2840  *
2841  *   trace_buffer_unlock_commit_regs()
2842  *   trace_event_buffer_commit()
2843  *   trace_event_raw_event_xxx()
2844  */
2845 # define STACK_SKIP 3
2846 
2847 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2848 				     struct trace_buffer *buffer,
2849 				     struct ring_buffer_event *event,
2850 				     unsigned int trace_ctx,
2851 				     struct pt_regs *regs)
2852 {
2853 	__buffer_unlock_commit(buffer, event);
2854 
2855 	/*
2856 	 * If regs is not set, then skip the necessary functions.
2857 	 * Note, we can still get here via blktrace, wakeup tracer
2858 	 * and mmiotrace, but that's ok if they lose a function or
2859 	 * two. They are not that meaningful.
2860 	 */
2861 	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2862 	ftrace_trace_userstack(tr, buffer, trace_ctx);
2863 }
2864 
2865 /*
2866  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2867  */
2868 void
2869 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2870 				   struct ring_buffer_event *event)
2871 {
2872 	__buffer_unlock_commit(buffer, event);
2873 }
2874 
2875 void
2876 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2877 	       parent_ip, unsigned int trace_ctx)
2878 {
2879 	struct trace_buffer *buffer = tr->array_buffer.buffer;
2880 	struct ring_buffer_event *event;
2881 	struct ftrace_entry *entry;
2882 
2883 	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2884 					    trace_ctx);
2885 	if (!event)
2886 		return;
2887 	entry	= ring_buffer_event_data(event);
2888 	entry->ip			= ip;
2889 	entry->parent_ip		= parent_ip;
2890 
2891 	if (static_branch_unlikely(&trace_function_exports_enabled))
2892 		ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2893 	__buffer_unlock_commit(buffer, event);
2894 }
2895 
2896 #ifdef CONFIG_STACKTRACE
2897 
2898 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2899 #define FTRACE_KSTACK_NESTING	4
2900 
2901 #define FTRACE_KSTACK_ENTRIES	(PAGE_SIZE / FTRACE_KSTACK_NESTING)
2902 
2903 struct ftrace_stack {
2904 	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
2905 };
2906 
2907 
2908 struct ftrace_stacks {
2909 	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
2910 };
2911 
2912 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2913 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2914 
2915 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2916 				 unsigned int trace_ctx,
2917 				 int skip, struct pt_regs *regs)
2918 {
2919 	struct ring_buffer_event *event;
2920 	unsigned int size, nr_entries;
2921 	struct ftrace_stack *fstack;
2922 	struct stack_entry *entry;
2923 	int stackidx;
2924 
2925 	/*
2926 	 * Add one, for this function and the call to save_stack_trace()
2927 	 * If regs is set, then these functions will not be in the way.
2928 	 */
2929 #ifndef CONFIG_UNWINDER_ORC
2930 	if (!regs)
2931 		skip++;
2932 #endif
2933 
2934 	preempt_disable_notrace();
2935 
2936 	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2937 
2938 	/* This should never happen. If it does, yell once and skip */
2939 	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2940 		goto out;
2941 
2942 	/*
2943 	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2944 	 * interrupt will either see the value pre increment or post
2945 	 * increment. If the interrupt happens pre increment it will have
2946 	 * restored the counter when it returns.  We just need a barrier to
2947 	 * keep gcc from moving things around.
2948 	 */
2949 	barrier();
2950 
2951 	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2952 	size = ARRAY_SIZE(fstack->calls);
2953 
2954 	if (regs) {
2955 		nr_entries = stack_trace_save_regs(regs, fstack->calls,
2956 						   size, skip);
2957 	} else {
2958 		nr_entries = stack_trace_save(fstack->calls, size, skip);
2959 	}
2960 
2961 	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2962 				    struct_size(entry, caller, nr_entries),
2963 				    trace_ctx);
2964 	if (!event)
2965 		goto out;
2966 	entry = ring_buffer_event_data(event);
2967 
2968 	entry->size = nr_entries;
2969 	memcpy(&entry->caller, fstack->calls,
2970 	       flex_array_size(entry, caller, nr_entries));
2971 
2972 	__buffer_unlock_commit(buffer, event);
2973 
2974  out:
2975 	/* Again, don't let gcc optimize things here */
2976 	barrier();
2977 	__this_cpu_dec(ftrace_stack_reserve);
2978 	preempt_enable_notrace();
2979 
2980 }
2981 
2982 static inline void ftrace_trace_stack(struct trace_array *tr,
2983 				      struct trace_buffer *buffer,
2984 				      unsigned int trace_ctx,
2985 				      int skip, struct pt_regs *regs)
2986 {
2987 	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2988 		return;
2989 
2990 	__ftrace_trace_stack(buffer, trace_ctx, skip, regs);
2991 }
2992 
2993 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
2994 		   int skip)
2995 {
2996 	struct trace_buffer *buffer = tr->array_buffer.buffer;
2997 
2998 	if (rcu_is_watching()) {
2999 		__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3000 		return;
3001 	}
3002 
3003 	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3004 		return;
3005 
3006 	/*
3007 	 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3008 	 * but if the above rcu_is_watching() failed, then the NMI
3009 	 * triggered someplace critical, and ct_irq_enter() should
3010 	 * not be called from NMI.
3011 	 */
3012 	if (unlikely(in_nmi()))
3013 		return;
3014 
3015 	ct_irq_enter_irqson();
3016 	__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3017 	ct_irq_exit_irqson();
3018 }
3019 
3020 /**
3021  * trace_dump_stack - record a stack back trace in the trace buffer
3022  * @skip: Number of functions to skip (helper handlers)
3023  */
3024 void trace_dump_stack(int skip)
3025 {
3026 	if (tracing_disabled || tracing_selftest_running)
3027 		return;
3028 
3029 #ifndef CONFIG_UNWINDER_ORC
3030 	/* Skip 1 to skip this function. */
3031 	skip++;
3032 #endif
3033 	__ftrace_trace_stack(printk_trace->array_buffer.buffer,
3034 			     tracing_gen_ctx(), skip, NULL);
3035 }
3036 EXPORT_SYMBOL_GPL(trace_dump_stack);
3037 
3038 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3039 static DEFINE_PER_CPU(int, user_stack_count);
3040 
3041 static void
3042 ftrace_trace_userstack(struct trace_array *tr,
3043 		       struct trace_buffer *buffer, unsigned int trace_ctx)
3044 {
3045 	struct ring_buffer_event *event;
3046 	struct userstack_entry *entry;
3047 
3048 	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3049 		return;
3050 
3051 	/*
3052 	 * NMIs can not handle page faults, even with fix ups.
3053 	 * The save user stack can (and often does) fault.
3054 	 */
3055 	if (unlikely(in_nmi()))
3056 		return;
3057 
3058 	/*
3059 	 * prevent recursion, since the user stack tracing may
3060 	 * trigger other kernel events.
3061 	 */
3062 	preempt_disable();
3063 	if (__this_cpu_read(user_stack_count))
3064 		goto out;
3065 
3066 	__this_cpu_inc(user_stack_count);
3067 
3068 	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3069 					    sizeof(*entry), trace_ctx);
3070 	if (!event)
3071 		goto out_drop_count;
3072 	entry	= ring_buffer_event_data(event);
3073 
3074 	entry->tgid		= current->tgid;
3075 	memset(&entry->caller, 0, sizeof(entry->caller));
3076 
3077 	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3078 	__buffer_unlock_commit(buffer, event);
3079 
3080  out_drop_count:
3081 	__this_cpu_dec(user_stack_count);
3082  out:
3083 	preempt_enable();
3084 }
3085 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3086 static void ftrace_trace_userstack(struct trace_array *tr,
3087 				   struct trace_buffer *buffer,
3088 				   unsigned int trace_ctx)
3089 {
3090 }
3091 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3092 
3093 #endif /* CONFIG_STACKTRACE */
3094 
3095 static inline void
3096 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3097 			  unsigned long long delta)
3098 {
3099 	entry->bottom_delta_ts = delta & U32_MAX;
3100 	entry->top_delta_ts = (delta >> 32);
3101 }
3102 
3103 void trace_last_func_repeats(struct trace_array *tr,
3104 			     struct trace_func_repeats *last_info,
3105 			     unsigned int trace_ctx)
3106 {
3107 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3108 	struct func_repeats_entry *entry;
3109 	struct ring_buffer_event *event;
3110 	u64 delta;
3111 
3112 	event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3113 					    sizeof(*entry), trace_ctx);
3114 	if (!event)
3115 		return;
3116 
3117 	delta = ring_buffer_event_time_stamp(buffer, event) -
3118 		last_info->ts_last_call;
3119 
3120 	entry = ring_buffer_event_data(event);
3121 	entry->ip = last_info->ip;
3122 	entry->parent_ip = last_info->parent_ip;
3123 	entry->count = last_info->count;
3124 	func_repeats_set_delta_ts(entry, delta);
3125 
3126 	__buffer_unlock_commit(buffer, event);
3127 }
3128 
3129 /* created for use with alloc_percpu */
3130 struct trace_buffer_struct {
3131 	int nesting;
3132 	char buffer[4][TRACE_BUF_SIZE];
3133 };
3134 
3135 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3136 
3137 /*
3138  * This allows for lockless recording.  If we're nested too deeply, then
3139  * this returns NULL.
3140  */
3141 static char *get_trace_buf(void)
3142 {
3143 	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3144 
3145 	if (!trace_percpu_buffer || buffer->nesting >= 4)
3146 		return NULL;
3147 
3148 	buffer->nesting++;
3149 
3150 	/* Interrupts must see nesting incremented before we use the buffer */
3151 	barrier();
3152 	return &buffer->buffer[buffer->nesting - 1][0];
3153 }
3154 
3155 static void put_trace_buf(void)
3156 {
3157 	/* Don't let the decrement of nesting leak before this */
3158 	barrier();
3159 	this_cpu_dec(trace_percpu_buffer->nesting);
3160 }
3161 
3162 static int alloc_percpu_trace_buffer(void)
3163 {
3164 	struct trace_buffer_struct __percpu *buffers;
3165 
3166 	if (trace_percpu_buffer)
3167 		return 0;
3168 
3169 	buffers = alloc_percpu(struct trace_buffer_struct);
3170 	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3171 		return -ENOMEM;
3172 
3173 	trace_percpu_buffer = buffers;
3174 	return 0;
3175 }
3176 
3177 static int buffers_allocated;
3178 
3179 void trace_printk_init_buffers(void)
3180 {
3181 	if (buffers_allocated)
3182 		return;
3183 
3184 	if (alloc_percpu_trace_buffer())
3185 		return;
3186 
3187 	/* trace_printk() is for debug use only. Don't use it in production. */
3188 
3189 	pr_warn("\n");
3190 	pr_warn("**********************************************************\n");
3191 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3192 	pr_warn("**                                                      **\n");
3193 	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
3194 	pr_warn("**                                                      **\n");
3195 	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
3196 	pr_warn("** unsafe for production use.                           **\n");
3197 	pr_warn("**                                                      **\n");
3198 	pr_warn("** If you see this message and you are not debugging    **\n");
3199 	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
3200 	pr_warn("**                                                      **\n");
3201 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3202 	pr_warn("**********************************************************\n");
3203 
3204 	/* Expand the buffers to set size */
3205 	tracing_update_buffers(&global_trace);
3206 
3207 	buffers_allocated = 1;
3208 
3209 	/*
3210 	 * trace_printk_init_buffers() can be called by modules.
3211 	 * If that happens, then we need to start cmdline recording
3212 	 * directly here. If the global_trace.buffer is already
3213 	 * allocated here, then this was called by module code.
3214 	 */
3215 	if (global_trace.array_buffer.buffer)
3216 		tracing_start_cmdline_record();
3217 }
3218 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3219 
3220 void trace_printk_start_comm(void)
3221 {
3222 	/* Start tracing comms if trace printk is set */
3223 	if (!buffers_allocated)
3224 		return;
3225 	tracing_start_cmdline_record();
3226 }
3227 
3228 static void trace_printk_start_stop_comm(int enabled)
3229 {
3230 	if (!buffers_allocated)
3231 		return;
3232 
3233 	if (enabled)
3234 		tracing_start_cmdline_record();
3235 	else
3236 		tracing_stop_cmdline_record();
3237 }
3238 
3239 /**
3240  * trace_vbprintk - write binary msg to tracing buffer
3241  * @ip:    The address of the caller
3242  * @fmt:   The string format to write to the buffer
3243  * @args:  Arguments for @fmt
3244  */
3245 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3246 {
3247 	struct ring_buffer_event *event;
3248 	struct trace_buffer *buffer;
3249 	struct trace_array *tr = READ_ONCE(printk_trace);
3250 	struct bprint_entry *entry;
3251 	unsigned int trace_ctx;
3252 	char *tbuffer;
3253 	int len = 0, size;
3254 
3255 	if (!printk_binsafe(tr))
3256 		return trace_vprintk(ip, fmt, args);
3257 
3258 	if (unlikely(tracing_selftest_running || tracing_disabled))
3259 		return 0;
3260 
3261 	/* Don't pollute graph traces with trace_vprintk internals */
3262 	pause_graph_tracing();
3263 
3264 	trace_ctx = tracing_gen_ctx();
3265 	preempt_disable_notrace();
3266 
3267 	tbuffer = get_trace_buf();
3268 	if (!tbuffer) {
3269 		len = 0;
3270 		goto out_nobuffer;
3271 	}
3272 
3273 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3274 
3275 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3276 		goto out_put;
3277 
3278 	size = sizeof(*entry) + sizeof(u32) * len;
3279 	buffer = tr->array_buffer.buffer;
3280 	ring_buffer_nest_start(buffer);
3281 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3282 					    trace_ctx);
3283 	if (!event)
3284 		goto out;
3285 	entry = ring_buffer_event_data(event);
3286 	entry->ip			= ip;
3287 	entry->fmt			= fmt;
3288 
3289 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3290 	__buffer_unlock_commit(buffer, event);
3291 	ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3292 
3293 out:
3294 	ring_buffer_nest_end(buffer);
3295 out_put:
3296 	put_trace_buf();
3297 
3298 out_nobuffer:
3299 	preempt_enable_notrace();
3300 	unpause_graph_tracing();
3301 
3302 	return len;
3303 }
3304 EXPORT_SYMBOL_GPL(trace_vbprintk);
3305 
3306 __printf(3, 0)
3307 static int
3308 __trace_array_vprintk(struct trace_buffer *buffer,
3309 		      unsigned long ip, const char *fmt, va_list args)
3310 {
3311 	struct ring_buffer_event *event;
3312 	int len = 0, size;
3313 	struct print_entry *entry;
3314 	unsigned int trace_ctx;
3315 	char *tbuffer;
3316 
3317 	if (tracing_disabled)
3318 		return 0;
3319 
3320 	/* Don't pollute graph traces with trace_vprintk internals */
3321 	pause_graph_tracing();
3322 
3323 	trace_ctx = tracing_gen_ctx();
3324 	preempt_disable_notrace();
3325 
3326 
3327 	tbuffer = get_trace_buf();
3328 	if (!tbuffer) {
3329 		len = 0;
3330 		goto out_nobuffer;
3331 	}
3332 
3333 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3334 
3335 	size = sizeof(*entry) + len + 1;
3336 	ring_buffer_nest_start(buffer);
3337 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3338 					    trace_ctx);
3339 	if (!event)
3340 		goto out;
3341 	entry = ring_buffer_event_data(event);
3342 	entry->ip = ip;
3343 
3344 	memcpy(&entry->buf, tbuffer, len + 1);
3345 	__buffer_unlock_commit(buffer, event);
3346 	ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
3347 
3348 out:
3349 	ring_buffer_nest_end(buffer);
3350 	put_trace_buf();
3351 
3352 out_nobuffer:
3353 	preempt_enable_notrace();
3354 	unpause_graph_tracing();
3355 
3356 	return len;
3357 }
3358 
3359 __printf(3, 0)
3360 int trace_array_vprintk(struct trace_array *tr,
3361 			unsigned long ip, const char *fmt, va_list args)
3362 {
3363 	if (tracing_selftest_running && tr == &global_trace)
3364 		return 0;
3365 
3366 	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3367 }
3368 
3369 /**
3370  * trace_array_printk - Print a message to a specific instance
3371  * @tr: The instance trace_array descriptor
3372  * @ip: The instruction pointer that this is called from.
3373  * @fmt: The format to print (printf format)
3374  *
3375  * If a subsystem sets up its own instance, they have the right to
3376  * printk strings into their tracing instance buffer using this
3377  * function. Note, this function will not write into the top level
3378  * buffer (use trace_printk() for that), as writing into the top level
3379  * buffer should only have events that can be individually disabled.
3380  * trace_printk() is only used for debugging a kernel, and should not
3381  * be ever incorporated in normal use.
3382  *
3383  * trace_array_printk() can be used, as it will not add noise to the
3384  * top level tracing buffer.
3385  *
3386  * Note, trace_array_init_printk() must be called on @tr before this
3387  * can be used.
3388  */
3389 __printf(3, 0)
3390 int trace_array_printk(struct trace_array *tr,
3391 		       unsigned long ip, const char *fmt, ...)
3392 {
3393 	int ret;
3394 	va_list ap;
3395 
3396 	if (!tr)
3397 		return -ENOENT;
3398 
3399 	/* This is only allowed for created instances */
3400 	if (tr == &global_trace)
3401 		return 0;
3402 
3403 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3404 		return 0;
3405 
3406 	va_start(ap, fmt);
3407 	ret = trace_array_vprintk(tr, ip, fmt, ap);
3408 	va_end(ap);
3409 	return ret;
3410 }
3411 EXPORT_SYMBOL_GPL(trace_array_printk);
3412 
3413 /**
3414  * trace_array_init_printk - Initialize buffers for trace_array_printk()
3415  * @tr: The trace array to initialize the buffers for
3416  *
3417  * As trace_array_printk() only writes into instances, they are OK to
3418  * have in the kernel (unlike trace_printk()). This needs to be called
3419  * before trace_array_printk() can be used on a trace_array.
3420  */
3421 int trace_array_init_printk(struct trace_array *tr)
3422 {
3423 	if (!tr)
3424 		return -ENOENT;
3425 
3426 	/* This is only allowed for created instances */
3427 	if (tr == &global_trace)
3428 		return -EINVAL;
3429 
3430 	return alloc_percpu_trace_buffer();
3431 }
3432 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3433 
3434 __printf(3, 4)
3435 int trace_array_printk_buf(struct trace_buffer *buffer,
3436 			   unsigned long ip, const char *fmt, ...)
3437 {
3438 	int ret;
3439 	va_list ap;
3440 
3441 	if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK))
3442 		return 0;
3443 
3444 	va_start(ap, fmt);
3445 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3446 	va_end(ap);
3447 	return ret;
3448 }
3449 
3450 __printf(2, 0)
3451 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3452 {
3453 	return trace_array_vprintk(printk_trace, ip, fmt, args);
3454 }
3455 EXPORT_SYMBOL_GPL(trace_vprintk);
3456 
3457 static void trace_iterator_increment(struct trace_iterator *iter)
3458 {
3459 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3460 
3461 	iter->idx++;
3462 	if (buf_iter)
3463 		ring_buffer_iter_advance(buf_iter);
3464 }
3465 
3466 static struct trace_entry *
3467 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3468 		unsigned long *lost_events)
3469 {
3470 	struct ring_buffer_event *event;
3471 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3472 
3473 	if (buf_iter) {
3474 		event = ring_buffer_iter_peek(buf_iter, ts);
3475 		if (lost_events)
3476 			*lost_events = ring_buffer_iter_dropped(buf_iter) ?
3477 				(unsigned long)-1 : 0;
3478 	} else {
3479 		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3480 					 lost_events);
3481 	}
3482 
3483 	if (event) {
3484 		iter->ent_size = ring_buffer_event_length(event);
3485 		return ring_buffer_event_data(event);
3486 	}
3487 	iter->ent_size = 0;
3488 	return NULL;
3489 }
3490 
3491 static struct trace_entry *
3492 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3493 		  unsigned long *missing_events, u64 *ent_ts)
3494 {
3495 	struct trace_buffer *buffer = iter->array_buffer->buffer;
3496 	struct trace_entry *ent, *next = NULL;
3497 	unsigned long lost_events = 0, next_lost = 0;
3498 	int cpu_file = iter->cpu_file;
3499 	u64 next_ts = 0, ts;
3500 	int next_cpu = -1;
3501 	int next_size = 0;
3502 	int cpu;
3503 
3504 	/*
3505 	 * If we are in a per_cpu trace file, don't bother by iterating over
3506 	 * all cpu and peek directly.
3507 	 */
3508 	if (cpu_file > RING_BUFFER_ALL_CPUS) {
3509 		if (ring_buffer_empty_cpu(buffer, cpu_file))
3510 			return NULL;
3511 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3512 		if (ent_cpu)
3513 			*ent_cpu = cpu_file;
3514 
3515 		return ent;
3516 	}
3517 
3518 	for_each_tracing_cpu(cpu) {
3519 
3520 		if (ring_buffer_empty_cpu(buffer, cpu))
3521 			continue;
3522 
3523 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3524 
3525 		/*
3526 		 * Pick the entry with the smallest timestamp:
3527 		 */
3528 		if (ent && (!next || ts < next_ts)) {
3529 			next = ent;
3530 			next_cpu = cpu;
3531 			next_ts = ts;
3532 			next_lost = lost_events;
3533 			next_size = iter->ent_size;
3534 		}
3535 	}
3536 
3537 	iter->ent_size = next_size;
3538 
3539 	if (ent_cpu)
3540 		*ent_cpu = next_cpu;
3541 
3542 	if (ent_ts)
3543 		*ent_ts = next_ts;
3544 
3545 	if (missing_events)
3546 		*missing_events = next_lost;
3547 
3548 	return next;
3549 }
3550 
3551 #define STATIC_FMT_BUF_SIZE	128
3552 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3553 
3554 char *trace_iter_expand_format(struct trace_iterator *iter)
3555 {
3556 	char *tmp;
3557 
3558 	/*
3559 	 * iter->tr is NULL when used with tp_printk, which makes
3560 	 * this get called where it is not safe to call krealloc().
3561 	 */
3562 	if (!iter->tr || iter->fmt == static_fmt_buf)
3563 		return NULL;
3564 
3565 	tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3566 		       GFP_KERNEL);
3567 	if (tmp) {
3568 		iter->fmt_size += STATIC_FMT_BUF_SIZE;
3569 		iter->fmt = tmp;
3570 	}
3571 
3572 	return tmp;
3573 }
3574 
3575 /* Returns true if the string is safe to dereference from an event */
3576 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3577 			   bool star, int len)
3578 {
3579 	unsigned long addr = (unsigned long)str;
3580 	struct trace_event *trace_event;
3581 	struct trace_event_call *event;
3582 
3583 	/* Ignore strings with no length */
3584 	if (star && !len)
3585 		return true;
3586 
3587 	/* OK if part of the event data */
3588 	if ((addr >= (unsigned long)iter->ent) &&
3589 	    (addr < (unsigned long)iter->ent + iter->ent_size))
3590 		return true;
3591 
3592 	/* OK if part of the temp seq buffer */
3593 	if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3594 	    (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
3595 		return true;
3596 
3597 	/* Core rodata can not be freed */
3598 	if (is_kernel_rodata(addr))
3599 		return true;
3600 
3601 	if (trace_is_tracepoint_string(str))
3602 		return true;
3603 
3604 	/*
3605 	 * Now this could be a module event, referencing core module
3606 	 * data, which is OK.
3607 	 */
3608 	if (!iter->ent)
3609 		return false;
3610 
3611 	trace_event = ftrace_find_event(iter->ent->type);
3612 	if (!trace_event)
3613 		return false;
3614 
3615 	event = container_of(trace_event, struct trace_event_call, event);
3616 	if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3617 		return false;
3618 
3619 	/* Would rather have rodata, but this will suffice */
3620 	if (within_module_core(addr, event->module))
3621 		return true;
3622 
3623 	return false;
3624 }
3625 
3626 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3627 
3628 static int test_can_verify_check(const char *fmt, ...)
3629 {
3630 	char buf[16];
3631 	va_list ap;
3632 	int ret;
3633 
3634 	/*
3635 	 * The verifier is dependent on vsnprintf() modifies the va_list
3636 	 * passed to it, where it is sent as a reference. Some architectures
3637 	 * (like x86_32) passes it by value, which means that vsnprintf()
3638 	 * does not modify the va_list passed to it, and the verifier
3639 	 * would then need to be able to understand all the values that
3640 	 * vsnprintf can use. If it is passed by value, then the verifier
3641 	 * is disabled.
3642 	 */
3643 	va_start(ap, fmt);
3644 	vsnprintf(buf, 16, "%d", ap);
3645 	ret = va_arg(ap, int);
3646 	va_end(ap);
3647 
3648 	return ret;
3649 }
3650 
3651 static void test_can_verify(void)
3652 {
3653 	if (!test_can_verify_check("%d %d", 0, 1)) {
3654 		pr_info("trace event string verifier disabled\n");
3655 		static_branch_inc(&trace_no_verify);
3656 	}
3657 }
3658 
3659 /**
3660  * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3661  * @iter: The iterator that holds the seq buffer and the event being printed
3662  * @fmt: The format used to print the event
3663  * @ap: The va_list holding the data to print from @fmt.
3664  *
3665  * This writes the data into the @iter->seq buffer using the data from
3666  * @fmt and @ap. If the format has a %s, then the source of the string
3667  * is examined to make sure it is safe to print, otherwise it will
3668  * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3669  * pointer.
3670  */
3671 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3672 			 va_list ap)
3673 {
3674 	long text_delta = 0;
3675 	long data_delta = 0;
3676 	const char *p = fmt;
3677 	const char *str;
3678 	bool good;
3679 	int i, j;
3680 
3681 	if (WARN_ON_ONCE(!fmt))
3682 		return;
3683 
3684 	if (static_branch_unlikely(&trace_no_verify))
3685 		goto print;
3686 
3687 	/*
3688 	 * When the kernel is booted with the tp_printk command line
3689 	 * parameter, trace events go directly through to printk().
3690 	 * It also is checked by this function, but it does not
3691 	 * have an associated trace_array (tr) for it.
3692 	 */
3693 	if (iter->tr) {
3694 		text_delta = iter->tr->text_delta;
3695 		data_delta = iter->tr->data_delta;
3696 	}
3697 
3698 	/* Don't bother checking when doing a ftrace_dump() */
3699 	if (iter->fmt == static_fmt_buf)
3700 		goto print;
3701 
3702 	while (*p) {
3703 		bool star = false;
3704 		int len = 0;
3705 
3706 		j = 0;
3707 
3708 		/*
3709 		 * We only care about %s and variants
3710 		 * as well as %p[sS] if delta is non-zero
3711 		 */
3712 		for (i = 0; p[i]; i++) {
3713 			if (i + 1 >= iter->fmt_size) {
3714 				/*
3715 				 * If we can't expand the copy buffer,
3716 				 * just print it.
3717 				 */
3718 				if (!trace_iter_expand_format(iter))
3719 					goto print;
3720 			}
3721 
3722 			if (p[i] == '\\' && p[i+1]) {
3723 				i++;
3724 				continue;
3725 			}
3726 			if (p[i] == '%') {
3727 				/* Need to test cases like %08.*s */
3728 				for (j = 1; p[i+j]; j++) {
3729 					if (isdigit(p[i+j]) ||
3730 					    p[i+j] == '.')
3731 						continue;
3732 					if (p[i+j] == '*') {
3733 						star = true;
3734 						continue;
3735 					}
3736 					break;
3737 				}
3738 				if (p[i+j] == 's')
3739 					break;
3740 
3741 				if (text_delta && p[i+1] == 'p' &&
3742 				    ((p[i+2] == 's' || p[i+2] == 'S')))
3743 					break;
3744 
3745 				star = false;
3746 			}
3747 			j = 0;
3748 		}
3749 		/* If no %s found then just print normally */
3750 		if (!p[i])
3751 			break;
3752 
3753 		/* Copy up to the %s, and print that */
3754 		strncpy(iter->fmt, p, i);
3755 		iter->fmt[i] = '\0';
3756 		trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3757 
3758 		/* Add delta to %pS pointers */
3759 		if (p[i+1] == 'p') {
3760 			unsigned long addr;
3761 			char fmt[4];
3762 
3763 			fmt[0] = '%';
3764 			fmt[1] = 'p';
3765 			fmt[2] = p[i+2]; /* Either %ps or %pS */
3766 			fmt[3] = '\0';
3767 
3768 			addr = va_arg(ap, unsigned long);
3769 			addr += text_delta;
3770 			trace_seq_printf(&iter->seq, fmt, (void *)addr);
3771 
3772 			p += i + 3;
3773 			continue;
3774 		}
3775 
3776 		/*
3777 		 * If iter->seq is full, the above call no longer guarantees
3778 		 * that ap is in sync with fmt processing, and further calls
3779 		 * to va_arg() can return wrong positional arguments.
3780 		 *
3781 		 * Ensure that ap is no longer used in this case.
3782 		 */
3783 		if (iter->seq.full) {
3784 			p = "";
3785 			break;
3786 		}
3787 
3788 		if (star)
3789 			len = va_arg(ap, int);
3790 
3791 		/* The ap now points to the string data of the %s */
3792 		str = va_arg(ap, const char *);
3793 
3794 		good = trace_safe_str(iter, str, star, len);
3795 
3796 		/* Could be from the last boot */
3797 		if (data_delta && !good) {
3798 			str += data_delta;
3799 			good = trace_safe_str(iter, str, star, len);
3800 		}
3801 
3802 		/*
3803 		 * If you hit this warning, it is likely that the
3804 		 * trace event in question used %s on a string that
3805 		 * was saved at the time of the event, but may not be
3806 		 * around when the trace is read. Use __string(),
3807 		 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3808 		 * instead. See samples/trace_events/trace-events-sample.h
3809 		 * for reference.
3810 		 */
3811 		if (WARN_ONCE(!good, "fmt: '%s' current_buffer: '%s'",
3812 			      fmt, seq_buf_str(&iter->seq.seq))) {
3813 			int ret;
3814 
3815 			/* Try to safely read the string */
3816 			if (star) {
3817 				if (len + 1 > iter->fmt_size)
3818 					len = iter->fmt_size - 1;
3819 				if (len < 0)
3820 					len = 0;
3821 				ret = copy_from_kernel_nofault(iter->fmt, str, len);
3822 				iter->fmt[len] = 0;
3823 				star = false;
3824 			} else {
3825 				ret = strncpy_from_kernel_nofault(iter->fmt, str,
3826 								  iter->fmt_size);
3827 			}
3828 			if (ret < 0)
3829 				trace_seq_printf(&iter->seq, "(0x%px)", str);
3830 			else
3831 				trace_seq_printf(&iter->seq, "(0x%px:%s)",
3832 						 str, iter->fmt);
3833 			str = "[UNSAFE-MEMORY]";
3834 			strcpy(iter->fmt, "%s");
3835 		} else {
3836 			strncpy(iter->fmt, p + i, j + 1);
3837 			iter->fmt[j+1] = '\0';
3838 		}
3839 		if (star)
3840 			trace_seq_printf(&iter->seq, iter->fmt, len, str);
3841 		else
3842 			trace_seq_printf(&iter->seq, iter->fmt, str);
3843 
3844 		p += i + j + 1;
3845 	}
3846  print:
3847 	if (*p)
3848 		trace_seq_vprintf(&iter->seq, p, ap);
3849 }
3850 
3851 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3852 {
3853 	const char *p, *new_fmt;
3854 	char *q;
3855 
3856 	if (WARN_ON_ONCE(!fmt))
3857 		return fmt;
3858 
3859 	if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3860 		return fmt;
3861 
3862 	p = fmt;
3863 	new_fmt = q = iter->fmt;
3864 	while (*p) {
3865 		if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3866 			if (!trace_iter_expand_format(iter))
3867 				return fmt;
3868 
3869 			q += iter->fmt - new_fmt;
3870 			new_fmt = iter->fmt;
3871 		}
3872 
3873 		*q++ = *p++;
3874 
3875 		/* Replace %p with %px */
3876 		if (p[-1] == '%') {
3877 			if (p[0] == '%') {
3878 				*q++ = *p++;
3879 			} else if (p[0] == 'p' && !isalnum(p[1])) {
3880 				*q++ = *p++;
3881 				*q++ = 'x';
3882 			}
3883 		}
3884 	}
3885 	*q = '\0';
3886 
3887 	return new_fmt;
3888 }
3889 
3890 #define STATIC_TEMP_BUF_SIZE	128
3891 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3892 
3893 /* Find the next real entry, without updating the iterator itself */
3894 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3895 					  int *ent_cpu, u64 *ent_ts)
3896 {
3897 	/* __find_next_entry will reset ent_size */
3898 	int ent_size = iter->ent_size;
3899 	struct trace_entry *entry;
3900 
3901 	/*
3902 	 * If called from ftrace_dump(), then the iter->temp buffer
3903 	 * will be the static_temp_buf and not created from kmalloc.
3904 	 * If the entry size is greater than the buffer, we can
3905 	 * not save it. Just return NULL in that case. This is only
3906 	 * used to add markers when two consecutive events' time
3907 	 * stamps have a large delta. See trace_print_lat_context()
3908 	 */
3909 	if (iter->temp == static_temp_buf &&
3910 	    STATIC_TEMP_BUF_SIZE < ent_size)
3911 		return NULL;
3912 
3913 	/*
3914 	 * The __find_next_entry() may call peek_next_entry(), which may
3915 	 * call ring_buffer_peek() that may make the contents of iter->ent
3916 	 * undefined. Need to copy iter->ent now.
3917 	 */
3918 	if (iter->ent && iter->ent != iter->temp) {
3919 		if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3920 		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3921 			void *temp;
3922 			temp = kmalloc(iter->ent_size, GFP_KERNEL);
3923 			if (!temp)
3924 				return NULL;
3925 			kfree(iter->temp);
3926 			iter->temp = temp;
3927 			iter->temp_size = iter->ent_size;
3928 		}
3929 		memcpy(iter->temp, iter->ent, iter->ent_size);
3930 		iter->ent = iter->temp;
3931 	}
3932 	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3933 	/* Put back the original ent_size */
3934 	iter->ent_size = ent_size;
3935 
3936 	return entry;
3937 }
3938 
3939 /* Find the next real entry, and increment the iterator to the next entry */
3940 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3941 {
3942 	iter->ent = __find_next_entry(iter, &iter->cpu,
3943 				      &iter->lost_events, &iter->ts);
3944 
3945 	if (iter->ent)
3946 		trace_iterator_increment(iter);
3947 
3948 	return iter->ent ? iter : NULL;
3949 }
3950 
3951 static void trace_consume(struct trace_iterator *iter)
3952 {
3953 	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3954 			    &iter->lost_events);
3955 }
3956 
3957 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3958 {
3959 	struct trace_iterator *iter = m->private;
3960 	int i = (int)*pos;
3961 	void *ent;
3962 
3963 	WARN_ON_ONCE(iter->leftover);
3964 
3965 	(*pos)++;
3966 
3967 	/* can't go backwards */
3968 	if (iter->idx > i)
3969 		return NULL;
3970 
3971 	if (iter->idx < 0)
3972 		ent = trace_find_next_entry_inc(iter);
3973 	else
3974 		ent = iter;
3975 
3976 	while (ent && iter->idx < i)
3977 		ent = trace_find_next_entry_inc(iter);
3978 
3979 	iter->pos = *pos;
3980 
3981 	return ent;
3982 }
3983 
3984 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3985 {
3986 	struct ring_buffer_iter *buf_iter;
3987 	unsigned long entries = 0;
3988 	u64 ts;
3989 
3990 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3991 
3992 	buf_iter = trace_buffer_iter(iter, cpu);
3993 	if (!buf_iter)
3994 		return;
3995 
3996 	ring_buffer_iter_reset(buf_iter);
3997 
3998 	/*
3999 	 * We could have the case with the max latency tracers
4000 	 * that a reset never took place on a cpu. This is evident
4001 	 * by the timestamp being before the start of the buffer.
4002 	 */
4003 	while (ring_buffer_iter_peek(buf_iter, &ts)) {
4004 		if (ts >= iter->array_buffer->time_start)
4005 			break;
4006 		entries++;
4007 		ring_buffer_iter_advance(buf_iter);
4008 		/* This could be a big loop */
4009 		cond_resched();
4010 	}
4011 
4012 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4013 }
4014 
4015 /*
4016  * The current tracer is copied to avoid a global locking
4017  * all around.
4018  */
4019 static void *s_start(struct seq_file *m, loff_t *pos)
4020 {
4021 	struct trace_iterator *iter = m->private;
4022 	struct trace_array *tr = iter->tr;
4023 	int cpu_file = iter->cpu_file;
4024 	void *p = NULL;
4025 	loff_t l = 0;
4026 	int cpu;
4027 
4028 	mutex_lock(&trace_types_lock);
4029 	if (unlikely(tr->current_trace != iter->trace)) {
4030 		/* Close iter->trace before switching to the new current tracer */
4031 		if (iter->trace->close)
4032 			iter->trace->close(iter);
4033 		iter->trace = tr->current_trace;
4034 		/* Reopen the new current tracer */
4035 		if (iter->trace->open)
4036 			iter->trace->open(iter);
4037 	}
4038 	mutex_unlock(&trace_types_lock);
4039 
4040 #ifdef CONFIG_TRACER_MAX_TRACE
4041 	if (iter->snapshot && iter->trace->use_max_tr)
4042 		return ERR_PTR(-EBUSY);
4043 #endif
4044 
4045 	if (*pos != iter->pos) {
4046 		iter->ent = NULL;
4047 		iter->cpu = 0;
4048 		iter->idx = -1;
4049 
4050 		if (cpu_file == RING_BUFFER_ALL_CPUS) {
4051 			for_each_tracing_cpu(cpu)
4052 				tracing_iter_reset(iter, cpu);
4053 		} else
4054 			tracing_iter_reset(iter, cpu_file);
4055 
4056 		iter->leftover = 0;
4057 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4058 			;
4059 
4060 	} else {
4061 		/*
4062 		 * If we overflowed the seq_file before, then we want
4063 		 * to just reuse the trace_seq buffer again.
4064 		 */
4065 		if (iter->leftover)
4066 			p = iter;
4067 		else {
4068 			l = *pos - 1;
4069 			p = s_next(m, p, &l);
4070 		}
4071 	}
4072 
4073 	trace_event_read_lock();
4074 	trace_access_lock(cpu_file);
4075 	return p;
4076 }
4077 
4078 static void s_stop(struct seq_file *m, void *p)
4079 {
4080 	struct trace_iterator *iter = m->private;
4081 
4082 #ifdef CONFIG_TRACER_MAX_TRACE
4083 	if (iter->snapshot && iter->trace->use_max_tr)
4084 		return;
4085 #endif
4086 
4087 	trace_access_unlock(iter->cpu_file);
4088 	trace_event_read_unlock();
4089 }
4090 
4091 static void
4092 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4093 		      unsigned long *entries, int cpu)
4094 {
4095 	unsigned long count;
4096 
4097 	count = ring_buffer_entries_cpu(buf->buffer, cpu);
4098 	/*
4099 	 * If this buffer has skipped entries, then we hold all
4100 	 * entries for the trace and we need to ignore the
4101 	 * ones before the time stamp.
4102 	 */
4103 	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4104 		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4105 		/* total is the same as the entries */
4106 		*total = count;
4107 	} else
4108 		*total = count +
4109 			ring_buffer_overrun_cpu(buf->buffer, cpu);
4110 	*entries = count;
4111 }
4112 
4113 static void
4114 get_total_entries(struct array_buffer *buf,
4115 		  unsigned long *total, unsigned long *entries)
4116 {
4117 	unsigned long t, e;
4118 	int cpu;
4119 
4120 	*total = 0;
4121 	*entries = 0;
4122 
4123 	for_each_tracing_cpu(cpu) {
4124 		get_total_entries_cpu(buf, &t, &e, cpu);
4125 		*total += t;
4126 		*entries += e;
4127 	}
4128 }
4129 
4130 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4131 {
4132 	unsigned long total, entries;
4133 
4134 	if (!tr)
4135 		tr = &global_trace;
4136 
4137 	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4138 
4139 	return entries;
4140 }
4141 
4142 unsigned long trace_total_entries(struct trace_array *tr)
4143 {
4144 	unsigned long total, entries;
4145 
4146 	if (!tr)
4147 		tr = &global_trace;
4148 
4149 	get_total_entries(&tr->array_buffer, &total, &entries);
4150 
4151 	return entries;
4152 }
4153 
4154 static void print_lat_help_header(struct seq_file *m)
4155 {
4156 	seq_puts(m, "#                    _------=> CPU#            \n"
4157 		    "#                   / _-----=> irqs-off/BH-disabled\n"
4158 		    "#                  | / _----=> need-resched    \n"
4159 		    "#                  || / _---=> hardirq/softirq \n"
4160 		    "#                  ||| / _--=> preempt-depth   \n"
4161 		    "#                  |||| / _-=> migrate-disable \n"
4162 		    "#                  ||||| /     delay           \n"
4163 		    "#  cmd     pid     |||||| time  |   caller     \n"
4164 		    "#     \\   /        ||||||  \\    |    /       \n");
4165 }
4166 
4167 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4168 {
4169 	unsigned long total;
4170 	unsigned long entries;
4171 
4172 	get_total_entries(buf, &total, &entries);
4173 	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
4174 		   entries, total, num_online_cpus());
4175 	seq_puts(m, "#\n");
4176 }
4177 
4178 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4179 				   unsigned int flags)
4180 {
4181 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4182 
4183 	print_event_info(buf, m);
4184 
4185 	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
4186 	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
4187 }
4188 
4189 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4190 				       unsigned int flags)
4191 {
4192 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4193 	static const char space[] = "            ";
4194 	int prec = tgid ? 12 : 2;
4195 
4196 	print_event_info(buf, m);
4197 
4198 	seq_printf(m, "#                            %.*s  _-----=> irqs-off/BH-disabled\n", prec, space);
4199 	seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
4200 	seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
4201 	seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
4202 	seq_printf(m, "#                            %.*s||| / _-=> migrate-disable\n", prec, space);
4203 	seq_printf(m, "#                            %.*s|||| /     delay\n", prec, space);
4204 	seq_printf(m, "#           TASK-PID  %.*s CPU#  |||||  TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
4205 	seq_printf(m, "#              | |    %.*s   |   |||||     |         |\n", prec, "       |    ");
4206 }
4207 
4208 void
4209 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4210 {
4211 	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4212 	struct array_buffer *buf = iter->array_buffer;
4213 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4214 	struct tracer *type = iter->trace;
4215 	unsigned long entries;
4216 	unsigned long total;
4217 	const char *name = type->name;
4218 
4219 	get_total_entries(buf, &total, &entries);
4220 
4221 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4222 		   name, init_utsname()->release);
4223 	seq_puts(m, "# -----------------------------------"
4224 		 "---------------------------------\n");
4225 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4226 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4227 		   nsecs_to_usecs(data->saved_latency),
4228 		   entries,
4229 		   total,
4230 		   buf->cpu,
4231 		   preempt_model_none()      ? "server" :
4232 		   preempt_model_voluntary() ? "desktop" :
4233 		   preempt_model_full()      ? "preempt" :
4234 		   preempt_model_rt()        ? "preempt_rt" :
4235 		   "unknown",
4236 		   /* These are reserved for later use */
4237 		   0, 0, 0, 0);
4238 #ifdef CONFIG_SMP
4239 	seq_printf(m, " #P:%d)\n", num_online_cpus());
4240 #else
4241 	seq_puts(m, ")\n");
4242 #endif
4243 	seq_puts(m, "#    -----------------\n");
4244 	seq_printf(m, "#    | task: %.16s-%d "
4245 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4246 		   data->comm, data->pid,
4247 		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4248 		   data->policy, data->rt_priority);
4249 	seq_puts(m, "#    -----------------\n");
4250 
4251 	if (data->critical_start) {
4252 		seq_puts(m, "#  => started at: ");
4253 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4254 		trace_print_seq(m, &iter->seq);
4255 		seq_puts(m, "\n#  => ended at:   ");
4256 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4257 		trace_print_seq(m, &iter->seq);
4258 		seq_puts(m, "\n#\n");
4259 	}
4260 
4261 	seq_puts(m, "#\n");
4262 }
4263 
4264 static void test_cpu_buff_start(struct trace_iterator *iter)
4265 {
4266 	struct trace_seq *s = &iter->seq;
4267 	struct trace_array *tr = iter->tr;
4268 
4269 	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4270 		return;
4271 
4272 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4273 		return;
4274 
4275 	if (cpumask_available(iter->started) &&
4276 	    cpumask_test_cpu(iter->cpu, iter->started))
4277 		return;
4278 
4279 	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4280 		return;
4281 
4282 	if (cpumask_available(iter->started))
4283 		cpumask_set_cpu(iter->cpu, iter->started);
4284 
4285 	/* Don't print started cpu buffer for the first entry of the trace */
4286 	if (iter->idx > 1)
4287 		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4288 				iter->cpu);
4289 }
4290 
4291 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4292 {
4293 	struct trace_array *tr = iter->tr;
4294 	struct trace_seq *s = &iter->seq;
4295 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4296 	struct trace_entry *entry;
4297 	struct trace_event *event;
4298 
4299 	entry = iter->ent;
4300 
4301 	test_cpu_buff_start(iter);
4302 
4303 	event = ftrace_find_event(entry->type);
4304 
4305 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4306 		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4307 			trace_print_lat_context(iter);
4308 		else
4309 			trace_print_context(iter);
4310 	}
4311 
4312 	if (trace_seq_has_overflowed(s))
4313 		return TRACE_TYPE_PARTIAL_LINE;
4314 
4315 	if (event) {
4316 		if (tr->trace_flags & TRACE_ITER_FIELDS)
4317 			return print_event_fields(iter, event);
4318 		return event->funcs->trace(iter, sym_flags, event);
4319 	}
4320 
4321 	trace_seq_printf(s, "Unknown type %d\n", entry->type);
4322 
4323 	return trace_handle_return(s);
4324 }
4325 
4326 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4327 {
4328 	struct trace_array *tr = iter->tr;
4329 	struct trace_seq *s = &iter->seq;
4330 	struct trace_entry *entry;
4331 	struct trace_event *event;
4332 
4333 	entry = iter->ent;
4334 
4335 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4336 		trace_seq_printf(s, "%d %d %llu ",
4337 				 entry->pid, iter->cpu, iter->ts);
4338 
4339 	if (trace_seq_has_overflowed(s))
4340 		return TRACE_TYPE_PARTIAL_LINE;
4341 
4342 	event = ftrace_find_event(entry->type);
4343 	if (event)
4344 		return event->funcs->raw(iter, 0, event);
4345 
4346 	trace_seq_printf(s, "%d ?\n", entry->type);
4347 
4348 	return trace_handle_return(s);
4349 }
4350 
4351 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4352 {
4353 	struct trace_array *tr = iter->tr;
4354 	struct trace_seq *s = &iter->seq;
4355 	unsigned char newline = '\n';
4356 	struct trace_entry *entry;
4357 	struct trace_event *event;
4358 
4359 	entry = iter->ent;
4360 
4361 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4362 		SEQ_PUT_HEX_FIELD(s, entry->pid);
4363 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
4364 		SEQ_PUT_HEX_FIELD(s, iter->ts);
4365 		if (trace_seq_has_overflowed(s))
4366 			return TRACE_TYPE_PARTIAL_LINE;
4367 	}
4368 
4369 	event = ftrace_find_event(entry->type);
4370 	if (event) {
4371 		enum print_line_t ret = event->funcs->hex(iter, 0, event);
4372 		if (ret != TRACE_TYPE_HANDLED)
4373 			return ret;
4374 	}
4375 
4376 	SEQ_PUT_FIELD(s, newline);
4377 
4378 	return trace_handle_return(s);
4379 }
4380 
4381 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4382 {
4383 	struct trace_array *tr = iter->tr;
4384 	struct trace_seq *s = &iter->seq;
4385 	struct trace_entry *entry;
4386 	struct trace_event *event;
4387 
4388 	entry = iter->ent;
4389 
4390 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4391 		SEQ_PUT_FIELD(s, entry->pid);
4392 		SEQ_PUT_FIELD(s, iter->cpu);
4393 		SEQ_PUT_FIELD(s, iter->ts);
4394 		if (trace_seq_has_overflowed(s))
4395 			return TRACE_TYPE_PARTIAL_LINE;
4396 	}
4397 
4398 	event = ftrace_find_event(entry->type);
4399 	return event ? event->funcs->binary(iter, 0, event) :
4400 		TRACE_TYPE_HANDLED;
4401 }
4402 
4403 int trace_empty(struct trace_iterator *iter)
4404 {
4405 	struct ring_buffer_iter *buf_iter;
4406 	int cpu;
4407 
4408 	/* If we are looking at one CPU buffer, only check that one */
4409 	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4410 		cpu = iter->cpu_file;
4411 		buf_iter = trace_buffer_iter(iter, cpu);
4412 		if (buf_iter) {
4413 			if (!ring_buffer_iter_empty(buf_iter))
4414 				return 0;
4415 		} else {
4416 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4417 				return 0;
4418 		}
4419 		return 1;
4420 	}
4421 
4422 	for_each_tracing_cpu(cpu) {
4423 		buf_iter = trace_buffer_iter(iter, cpu);
4424 		if (buf_iter) {
4425 			if (!ring_buffer_iter_empty(buf_iter))
4426 				return 0;
4427 		} else {
4428 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4429 				return 0;
4430 		}
4431 	}
4432 
4433 	return 1;
4434 }
4435 
4436 /*  Called with trace_event_read_lock() held. */
4437 enum print_line_t print_trace_line(struct trace_iterator *iter)
4438 {
4439 	struct trace_array *tr = iter->tr;
4440 	unsigned long trace_flags = tr->trace_flags;
4441 	enum print_line_t ret;
4442 
4443 	if (iter->lost_events) {
4444 		if (iter->lost_events == (unsigned long)-1)
4445 			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4446 					 iter->cpu);
4447 		else
4448 			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4449 					 iter->cpu, iter->lost_events);
4450 		if (trace_seq_has_overflowed(&iter->seq))
4451 			return TRACE_TYPE_PARTIAL_LINE;
4452 	}
4453 
4454 	if (iter->trace && iter->trace->print_line) {
4455 		ret = iter->trace->print_line(iter);
4456 		if (ret != TRACE_TYPE_UNHANDLED)
4457 			return ret;
4458 	}
4459 
4460 	if (iter->ent->type == TRACE_BPUTS &&
4461 			trace_flags & TRACE_ITER_PRINTK &&
4462 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4463 		return trace_print_bputs_msg_only(iter);
4464 
4465 	if (iter->ent->type == TRACE_BPRINT &&
4466 			trace_flags & TRACE_ITER_PRINTK &&
4467 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4468 		return trace_print_bprintk_msg_only(iter);
4469 
4470 	if (iter->ent->type == TRACE_PRINT &&
4471 			trace_flags & TRACE_ITER_PRINTK &&
4472 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4473 		return trace_print_printk_msg_only(iter);
4474 
4475 	if (trace_flags & TRACE_ITER_BIN)
4476 		return print_bin_fmt(iter);
4477 
4478 	if (trace_flags & TRACE_ITER_HEX)
4479 		return print_hex_fmt(iter);
4480 
4481 	if (trace_flags & TRACE_ITER_RAW)
4482 		return print_raw_fmt(iter);
4483 
4484 	return print_trace_fmt(iter);
4485 }
4486 
4487 void trace_latency_header(struct seq_file *m)
4488 {
4489 	struct trace_iterator *iter = m->private;
4490 	struct trace_array *tr = iter->tr;
4491 
4492 	/* print nothing if the buffers are empty */
4493 	if (trace_empty(iter))
4494 		return;
4495 
4496 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4497 		print_trace_header(m, iter);
4498 
4499 	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4500 		print_lat_help_header(m);
4501 }
4502 
4503 void trace_default_header(struct seq_file *m)
4504 {
4505 	struct trace_iterator *iter = m->private;
4506 	struct trace_array *tr = iter->tr;
4507 	unsigned long trace_flags = tr->trace_flags;
4508 
4509 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4510 		return;
4511 
4512 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4513 		/* print nothing if the buffers are empty */
4514 		if (trace_empty(iter))
4515 			return;
4516 		print_trace_header(m, iter);
4517 		if (!(trace_flags & TRACE_ITER_VERBOSE))
4518 			print_lat_help_header(m);
4519 	} else {
4520 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4521 			if (trace_flags & TRACE_ITER_IRQ_INFO)
4522 				print_func_help_header_irq(iter->array_buffer,
4523 							   m, trace_flags);
4524 			else
4525 				print_func_help_header(iter->array_buffer, m,
4526 						       trace_flags);
4527 		}
4528 	}
4529 }
4530 
4531 static void test_ftrace_alive(struct seq_file *m)
4532 {
4533 	if (!ftrace_is_dead())
4534 		return;
4535 	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4536 		    "#          MAY BE MISSING FUNCTION EVENTS\n");
4537 }
4538 
4539 #ifdef CONFIG_TRACER_MAX_TRACE
4540 static void show_snapshot_main_help(struct seq_file *m)
4541 {
4542 	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4543 		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4544 		    "#                      Takes a snapshot of the main buffer.\n"
4545 		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4546 		    "#                      (Doesn't have to be '2' works with any number that\n"
4547 		    "#                       is not a '0' or '1')\n");
4548 }
4549 
4550 static void show_snapshot_percpu_help(struct seq_file *m)
4551 {
4552 	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4553 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4554 	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4555 		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
4556 #else
4557 	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4558 		    "#                     Must use main snapshot file to allocate.\n");
4559 #endif
4560 	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4561 		    "#                      (Doesn't have to be '2' works with any number that\n"
4562 		    "#                       is not a '0' or '1')\n");
4563 }
4564 
4565 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4566 {
4567 	if (iter->tr->allocated_snapshot)
4568 		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4569 	else
4570 		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4571 
4572 	seq_puts(m, "# Snapshot commands:\n");
4573 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4574 		show_snapshot_main_help(m);
4575 	else
4576 		show_snapshot_percpu_help(m);
4577 }
4578 #else
4579 /* Should never be called */
4580 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4581 #endif
4582 
4583 static int s_show(struct seq_file *m, void *v)
4584 {
4585 	struct trace_iterator *iter = v;
4586 	int ret;
4587 
4588 	if (iter->ent == NULL) {
4589 		if (iter->tr) {
4590 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
4591 			seq_puts(m, "#\n");
4592 			test_ftrace_alive(m);
4593 		}
4594 		if (iter->snapshot && trace_empty(iter))
4595 			print_snapshot_help(m, iter);
4596 		else if (iter->trace && iter->trace->print_header)
4597 			iter->trace->print_header(m);
4598 		else
4599 			trace_default_header(m);
4600 
4601 	} else if (iter->leftover) {
4602 		/*
4603 		 * If we filled the seq_file buffer earlier, we
4604 		 * want to just show it now.
4605 		 */
4606 		ret = trace_print_seq(m, &iter->seq);
4607 
4608 		/* ret should this time be zero, but you never know */
4609 		iter->leftover = ret;
4610 
4611 	} else {
4612 		ret = print_trace_line(iter);
4613 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4614 			iter->seq.full = 0;
4615 			trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4616 		}
4617 		ret = trace_print_seq(m, &iter->seq);
4618 		/*
4619 		 * If we overflow the seq_file buffer, then it will
4620 		 * ask us for this data again at start up.
4621 		 * Use that instead.
4622 		 *  ret is 0 if seq_file write succeeded.
4623 		 *        -1 otherwise.
4624 		 */
4625 		iter->leftover = ret;
4626 	}
4627 
4628 	return 0;
4629 }
4630 
4631 /*
4632  * Should be used after trace_array_get(), trace_types_lock
4633  * ensures that i_cdev was already initialized.
4634  */
4635 static inline int tracing_get_cpu(struct inode *inode)
4636 {
4637 	if (inode->i_cdev) /* See trace_create_cpu_file() */
4638 		return (long)inode->i_cdev - 1;
4639 	return RING_BUFFER_ALL_CPUS;
4640 }
4641 
4642 static const struct seq_operations tracer_seq_ops = {
4643 	.start		= s_start,
4644 	.next		= s_next,
4645 	.stop		= s_stop,
4646 	.show		= s_show,
4647 };
4648 
4649 /*
4650  * Note, as iter itself can be allocated and freed in different
4651  * ways, this function is only used to free its content, and not
4652  * the iterator itself. The only requirement to all the allocations
4653  * is that it must zero all fields (kzalloc), as freeing works with
4654  * ethier allocated content or NULL.
4655  */
4656 static void free_trace_iter_content(struct trace_iterator *iter)
4657 {
4658 	/* The fmt is either NULL, allocated or points to static_fmt_buf */
4659 	if (iter->fmt != static_fmt_buf)
4660 		kfree(iter->fmt);
4661 
4662 	kfree(iter->temp);
4663 	kfree(iter->buffer_iter);
4664 	mutex_destroy(&iter->mutex);
4665 	free_cpumask_var(iter->started);
4666 }
4667 
4668 static struct trace_iterator *
4669 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4670 {
4671 	struct trace_array *tr = inode->i_private;
4672 	struct trace_iterator *iter;
4673 	int cpu;
4674 
4675 	if (tracing_disabled)
4676 		return ERR_PTR(-ENODEV);
4677 
4678 	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4679 	if (!iter)
4680 		return ERR_PTR(-ENOMEM);
4681 
4682 	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4683 				    GFP_KERNEL);
4684 	if (!iter->buffer_iter)
4685 		goto release;
4686 
4687 	/*
4688 	 * trace_find_next_entry() may need to save off iter->ent.
4689 	 * It will place it into the iter->temp buffer. As most
4690 	 * events are less than 128, allocate a buffer of that size.
4691 	 * If one is greater, then trace_find_next_entry() will
4692 	 * allocate a new buffer to adjust for the bigger iter->ent.
4693 	 * It's not critical if it fails to get allocated here.
4694 	 */
4695 	iter->temp = kmalloc(128, GFP_KERNEL);
4696 	if (iter->temp)
4697 		iter->temp_size = 128;
4698 
4699 	/*
4700 	 * trace_event_printf() may need to modify given format
4701 	 * string to replace %p with %px so that it shows real address
4702 	 * instead of hash value. However, that is only for the event
4703 	 * tracing, other tracer may not need. Defer the allocation
4704 	 * until it is needed.
4705 	 */
4706 	iter->fmt = NULL;
4707 	iter->fmt_size = 0;
4708 
4709 	mutex_lock(&trace_types_lock);
4710 	iter->trace = tr->current_trace;
4711 
4712 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4713 		goto fail;
4714 
4715 	iter->tr = tr;
4716 
4717 #ifdef CONFIG_TRACER_MAX_TRACE
4718 	/* Currently only the top directory has a snapshot */
4719 	if (tr->current_trace->print_max || snapshot)
4720 		iter->array_buffer = &tr->max_buffer;
4721 	else
4722 #endif
4723 		iter->array_buffer = &tr->array_buffer;
4724 	iter->snapshot = snapshot;
4725 	iter->pos = -1;
4726 	iter->cpu_file = tracing_get_cpu(inode);
4727 	mutex_init(&iter->mutex);
4728 
4729 	/* Notify the tracer early; before we stop tracing. */
4730 	if (iter->trace->open)
4731 		iter->trace->open(iter);
4732 
4733 	/* Annotate start of buffers if we had overruns */
4734 	if (ring_buffer_overruns(iter->array_buffer->buffer))
4735 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
4736 
4737 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4738 	if (trace_clocks[tr->clock_id].in_ns)
4739 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4740 
4741 	/*
4742 	 * If pause-on-trace is enabled, then stop the trace while
4743 	 * dumping, unless this is the "snapshot" file
4744 	 */
4745 	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4746 		tracing_stop_tr(tr);
4747 
4748 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4749 		for_each_tracing_cpu(cpu) {
4750 			iter->buffer_iter[cpu] =
4751 				ring_buffer_read_prepare(iter->array_buffer->buffer,
4752 							 cpu, GFP_KERNEL);
4753 		}
4754 		ring_buffer_read_prepare_sync();
4755 		for_each_tracing_cpu(cpu) {
4756 			ring_buffer_read_start(iter->buffer_iter[cpu]);
4757 			tracing_iter_reset(iter, cpu);
4758 		}
4759 	} else {
4760 		cpu = iter->cpu_file;
4761 		iter->buffer_iter[cpu] =
4762 			ring_buffer_read_prepare(iter->array_buffer->buffer,
4763 						 cpu, GFP_KERNEL);
4764 		ring_buffer_read_prepare_sync();
4765 		ring_buffer_read_start(iter->buffer_iter[cpu]);
4766 		tracing_iter_reset(iter, cpu);
4767 	}
4768 
4769 	mutex_unlock(&trace_types_lock);
4770 
4771 	return iter;
4772 
4773  fail:
4774 	mutex_unlock(&trace_types_lock);
4775 	free_trace_iter_content(iter);
4776 release:
4777 	seq_release_private(inode, file);
4778 	return ERR_PTR(-ENOMEM);
4779 }
4780 
4781 int tracing_open_generic(struct inode *inode, struct file *filp)
4782 {
4783 	int ret;
4784 
4785 	ret = tracing_check_open_get_tr(NULL);
4786 	if (ret)
4787 		return ret;
4788 
4789 	filp->private_data = inode->i_private;
4790 	return 0;
4791 }
4792 
4793 bool tracing_is_disabled(void)
4794 {
4795 	return (tracing_disabled) ? true: false;
4796 }
4797 
4798 /*
4799  * Open and update trace_array ref count.
4800  * Must have the current trace_array passed to it.
4801  */
4802 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4803 {
4804 	struct trace_array *tr = inode->i_private;
4805 	int ret;
4806 
4807 	ret = tracing_check_open_get_tr(tr);
4808 	if (ret)
4809 		return ret;
4810 
4811 	filp->private_data = inode->i_private;
4812 
4813 	return 0;
4814 }
4815 
4816 /*
4817  * The private pointer of the inode is the trace_event_file.
4818  * Update the tr ref count associated to it.
4819  */
4820 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4821 {
4822 	struct trace_event_file *file = inode->i_private;
4823 	int ret;
4824 
4825 	ret = tracing_check_open_get_tr(file->tr);
4826 	if (ret)
4827 		return ret;
4828 
4829 	mutex_lock(&event_mutex);
4830 
4831 	/* Fail if the file is marked for removal */
4832 	if (file->flags & EVENT_FILE_FL_FREED) {
4833 		trace_array_put(file->tr);
4834 		ret = -ENODEV;
4835 	} else {
4836 		event_file_get(file);
4837 	}
4838 
4839 	mutex_unlock(&event_mutex);
4840 	if (ret)
4841 		return ret;
4842 
4843 	filp->private_data = inode->i_private;
4844 
4845 	return 0;
4846 }
4847 
4848 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4849 {
4850 	struct trace_event_file *file = inode->i_private;
4851 
4852 	trace_array_put(file->tr);
4853 	event_file_put(file);
4854 
4855 	return 0;
4856 }
4857 
4858 int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
4859 {
4860 	tracing_release_file_tr(inode, filp);
4861 	return single_release(inode, filp);
4862 }
4863 
4864 static int tracing_mark_open(struct inode *inode, struct file *filp)
4865 {
4866 	stream_open(inode, filp);
4867 	return tracing_open_generic_tr(inode, filp);
4868 }
4869 
4870 static int tracing_release(struct inode *inode, struct file *file)
4871 {
4872 	struct trace_array *tr = inode->i_private;
4873 	struct seq_file *m = file->private_data;
4874 	struct trace_iterator *iter;
4875 	int cpu;
4876 
4877 	if (!(file->f_mode & FMODE_READ)) {
4878 		trace_array_put(tr);
4879 		return 0;
4880 	}
4881 
4882 	/* Writes do not use seq_file */
4883 	iter = m->private;
4884 	mutex_lock(&trace_types_lock);
4885 
4886 	for_each_tracing_cpu(cpu) {
4887 		if (iter->buffer_iter[cpu])
4888 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
4889 	}
4890 
4891 	if (iter->trace && iter->trace->close)
4892 		iter->trace->close(iter);
4893 
4894 	if (!iter->snapshot && tr->stop_count)
4895 		/* reenable tracing if it was previously enabled */
4896 		tracing_start_tr(tr);
4897 
4898 	__trace_array_put(tr);
4899 
4900 	mutex_unlock(&trace_types_lock);
4901 
4902 	free_trace_iter_content(iter);
4903 	seq_release_private(inode, file);
4904 
4905 	return 0;
4906 }
4907 
4908 int tracing_release_generic_tr(struct inode *inode, struct file *file)
4909 {
4910 	struct trace_array *tr = inode->i_private;
4911 
4912 	trace_array_put(tr);
4913 	return 0;
4914 }
4915 
4916 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4917 {
4918 	struct trace_array *tr = inode->i_private;
4919 
4920 	trace_array_put(tr);
4921 
4922 	return single_release(inode, file);
4923 }
4924 
4925 static int tracing_open(struct inode *inode, struct file *file)
4926 {
4927 	struct trace_array *tr = inode->i_private;
4928 	struct trace_iterator *iter;
4929 	int ret;
4930 
4931 	ret = tracing_check_open_get_tr(tr);
4932 	if (ret)
4933 		return ret;
4934 
4935 	/* If this file was open for write, then erase contents */
4936 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4937 		int cpu = tracing_get_cpu(inode);
4938 		struct array_buffer *trace_buf = &tr->array_buffer;
4939 
4940 #ifdef CONFIG_TRACER_MAX_TRACE
4941 		if (tr->current_trace->print_max)
4942 			trace_buf = &tr->max_buffer;
4943 #endif
4944 
4945 		if (cpu == RING_BUFFER_ALL_CPUS)
4946 			tracing_reset_online_cpus(trace_buf);
4947 		else
4948 			tracing_reset_cpu(trace_buf, cpu);
4949 	}
4950 
4951 	if (file->f_mode & FMODE_READ) {
4952 		iter = __tracing_open(inode, file, false);
4953 		if (IS_ERR(iter))
4954 			ret = PTR_ERR(iter);
4955 		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4956 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
4957 	}
4958 
4959 	if (ret < 0)
4960 		trace_array_put(tr);
4961 
4962 	return ret;
4963 }
4964 
4965 /*
4966  * Some tracers are not suitable for instance buffers.
4967  * A tracer is always available for the global array (toplevel)
4968  * or if it explicitly states that it is.
4969  */
4970 static bool
4971 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4972 {
4973 #ifdef CONFIG_TRACER_SNAPSHOT
4974 	/* arrays with mapped buffer range do not have snapshots */
4975 	if (tr->range_addr_start && t->use_max_tr)
4976 		return false;
4977 #endif
4978 	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4979 }
4980 
4981 /* Find the next tracer that this trace array may use */
4982 static struct tracer *
4983 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4984 {
4985 	while (t && !trace_ok_for_array(t, tr))
4986 		t = t->next;
4987 
4988 	return t;
4989 }
4990 
4991 static void *
4992 t_next(struct seq_file *m, void *v, loff_t *pos)
4993 {
4994 	struct trace_array *tr = m->private;
4995 	struct tracer *t = v;
4996 
4997 	(*pos)++;
4998 
4999 	if (t)
5000 		t = get_tracer_for_array(tr, t->next);
5001 
5002 	return t;
5003 }
5004 
5005 static void *t_start(struct seq_file *m, loff_t *pos)
5006 {
5007 	struct trace_array *tr = m->private;
5008 	struct tracer *t;
5009 	loff_t l = 0;
5010 
5011 	mutex_lock(&trace_types_lock);
5012 
5013 	t = get_tracer_for_array(tr, trace_types);
5014 	for (; t && l < *pos; t = t_next(m, t, &l))
5015 			;
5016 
5017 	return t;
5018 }
5019 
5020 static void t_stop(struct seq_file *m, void *p)
5021 {
5022 	mutex_unlock(&trace_types_lock);
5023 }
5024 
5025 static int t_show(struct seq_file *m, void *v)
5026 {
5027 	struct tracer *t = v;
5028 
5029 	if (!t)
5030 		return 0;
5031 
5032 	seq_puts(m, t->name);
5033 	if (t->next)
5034 		seq_putc(m, ' ');
5035 	else
5036 		seq_putc(m, '\n');
5037 
5038 	return 0;
5039 }
5040 
5041 static const struct seq_operations show_traces_seq_ops = {
5042 	.start		= t_start,
5043 	.next		= t_next,
5044 	.stop		= t_stop,
5045 	.show		= t_show,
5046 };
5047 
5048 static int show_traces_open(struct inode *inode, struct file *file)
5049 {
5050 	struct trace_array *tr = inode->i_private;
5051 	struct seq_file *m;
5052 	int ret;
5053 
5054 	ret = tracing_check_open_get_tr(tr);
5055 	if (ret)
5056 		return ret;
5057 
5058 	ret = seq_open(file, &show_traces_seq_ops);
5059 	if (ret) {
5060 		trace_array_put(tr);
5061 		return ret;
5062 	}
5063 
5064 	m = file->private_data;
5065 	m->private = tr;
5066 
5067 	return 0;
5068 }
5069 
5070 static int tracing_seq_release(struct inode *inode, struct file *file)
5071 {
5072 	struct trace_array *tr = inode->i_private;
5073 
5074 	trace_array_put(tr);
5075 	return seq_release(inode, file);
5076 }
5077 
5078 static ssize_t
5079 tracing_write_stub(struct file *filp, const char __user *ubuf,
5080 		   size_t count, loff_t *ppos)
5081 {
5082 	return count;
5083 }
5084 
5085 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5086 {
5087 	int ret;
5088 
5089 	if (file->f_mode & FMODE_READ)
5090 		ret = seq_lseek(file, offset, whence);
5091 	else
5092 		file->f_pos = ret = 0;
5093 
5094 	return ret;
5095 }
5096 
5097 static const struct file_operations tracing_fops = {
5098 	.open		= tracing_open,
5099 	.read		= seq_read,
5100 	.read_iter	= seq_read_iter,
5101 	.splice_read	= copy_splice_read,
5102 	.write		= tracing_write_stub,
5103 	.llseek		= tracing_lseek,
5104 	.release	= tracing_release,
5105 };
5106 
5107 static const struct file_operations show_traces_fops = {
5108 	.open		= show_traces_open,
5109 	.read		= seq_read,
5110 	.llseek		= seq_lseek,
5111 	.release	= tracing_seq_release,
5112 };
5113 
5114 static ssize_t
5115 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5116 		     size_t count, loff_t *ppos)
5117 {
5118 	struct trace_array *tr = file_inode(filp)->i_private;
5119 	char *mask_str;
5120 	int len;
5121 
5122 	len = snprintf(NULL, 0, "%*pb\n",
5123 		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
5124 	mask_str = kmalloc(len, GFP_KERNEL);
5125 	if (!mask_str)
5126 		return -ENOMEM;
5127 
5128 	len = snprintf(mask_str, len, "%*pb\n",
5129 		       cpumask_pr_args(tr->tracing_cpumask));
5130 	if (len >= count) {
5131 		count = -EINVAL;
5132 		goto out_err;
5133 	}
5134 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5135 
5136 out_err:
5137 	kfree(mask_str);
5138 
5139 	return count;
5140 }
5141 
5142 int tracing_set_cpumask(struct trace_array *tr,
5143 			cpumask_var_t tracing_cpumask_new)
5144 {
5145 	int cpu;
5146 
5147 	if (!tr)
5148 		return -EINVAL;
5149 
5150 	local_irq_disable();
5151 	arch_spin_lock(&tr->max_lock);
5152 	for_each_tracing_cpu(cpu) {
5153 		/*
5154 		 * Increase/decrease the disabled counter if we are
5155 		 * about to flip a bit in the cpumask:
5156 		 */
5157 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5158 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5159 			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5160 			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5161 #ifdef CONFIG_TRACER_MAX_TRACE
5162 			ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5163 #endif
5164 		}
5165 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5166 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5167 			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5168 			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5169 #ifdef CONFIG_TRACER_MAX_TRACE
5170 			ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5171 #endif
5172 		}
5173 	}
5174 	arch_spin_unlock(&tr->max_lock);
5175 	local_irq_enable();
5176 
5177 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5178 
5179 	return 0;
5180 }
5181 
5182 static ssize_t
5183 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5184 		      size_t count, loff_t *ppos)
5185 {
5186 	struct trace_array *tr = file_inode(filp)->i_private;
5187 	cpumask_var_t tracing_cpumask_new;
5188 	int err;
5189 
5190 	if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5191 		return -ENOMEM;
5192 
5193 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5194 	if (err)
5195 		goto err_free;
5196 
5197 	err = tracing_set_cpumask(tr, tracing_cpumask_new);
5198 	if (err)
5199 		goto err_free;
5200 
5201 	free_cpumask_var(tracing_cpumask_new);
5202 
5203 	return count;
5204 
5205 err_free:
5206 	free_cpumask_var(tracing_cpumask_new);
5207 
5208 	return err;
5209 }
5210 
5211 static const struct file_operations tracing_cpumask_fops = {
5212 	.open		= tracing_open_generic_tr,
5213 	.read		= tracing_cpumask_read,
5214 	.write		= tracing_cpumask_write,
5215 	.release	= tracing_release_generic_tr,
5216 	.llseek		= generic_file_llseek,
5217 };
5218 
5219 static int tracing_trace_options_show(struct seq_file *m, void *v)
5220 {
5221 	struct tracer_opt *trace_opts;
5222 	struct trace_array *tr = m->private;
5223 	u32 tracer_flags;
5224 	int i;
5225 
5226 	mutex_lock(&trace_types_lock);
5227 	tracer_flags = tr->current_trace->flags->val;
5228 	trace_opts = tr->current_trace->flags->opts;
5229 
5230 	for (i = 0; trace_options[i]; i++) {
5231 		if (tr->trace_flags & (1 << i))
5232 			seq_printf(m, "%s\n", trace_options[i]);
5233 		else
5234 			seq_printf(m, "no%s\n", trace_options[i]);
5235 	}
5236 
5237 	for (i = 0; trace_opts[i].name; i++) {
5238 		if (tracer_flags & trace_opts[i].bit)
5239 			seq_printf(m, "%s\n", trace_opts[i].name);
5240 		else
5241 			seq_printf(m, "no%s\n", trace_opts[i].name);
5242 	}
5243 	mutex_unlock(&trace_types_lock);
5244 
5245 	return 0;
5246 }
5247 
5248 static int __set_tracer_option(struct trace_array *tr,
5249 			       struct tracer_flags *tracer_flags,
5250 			       struct tracer_opt *opts, int neg)
5251 {
5252 	struct tracer *trace = tracer_flags->trace;
5253 	int ret;
5254 
5255 	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5256 	if (ret)
5257 		return ret;
5258 
5259 	if (neg)
5260 		tracer_flags->val &= ~opts->bit;
5261 	else
5262 		tracer_flags->val |= opts->bit;
5263 	return 0;
5264 }
5265 
5266 /* Try to assign a tracer specific option */
5267 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5268 {
5269 	struct tracer *trace = tr->current_trace;
5270 	struct tracer_flags *tracer_flags = trace->flags;
5271 	struct tracer_opt *opts = NULL;
5272 	int i;
5273 
5274 	for (i = 0; tracer_flags->opts[i].name; i++) {
5275 		opts = &tracer_flags->opts[i];
5276 
5277 		if (strcmp(cmp, opts->name) == 0)
5278 			return __set_tracer_option(tr, trace->flags, opts, neg);
5279 	}
5280 
5281 	return -EINVAL;
5282 }
5283 
5284 /* Some tracers require overwrite to stay enabled */
5285 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5286 {
5287 	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5288 		return -1;
5289 
5290 	return 0;
5291 }
5292 
5293 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5294 {
5295 	if ((mask == TRACE_ITER_RECORD_TGID) ||
5296 	    (mask == TRACE_ITER_RECORD_CMD) ||
5297 	    (mask == TRACE_ITER_TRACE_PRINTK))
5298 		lockdep_assert_held(&event_mutex);
5299 
5300 	/* do nothing if flag is already set */
5301 	if (!!(tr->trace_flags & mask) == !!enabled)
5302 		return 0;
5303 
5304 	/* Give the tracer a chance to approve the change */
5305 	if (tr->current_trace->flag_changed)
5306 		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5307 			return -EINVAL;
5308 
5309 	if (mask == TRACE_ITER_TRACE_PRINTK) {
5310 		if (enabled) {
5311 			update_printk_trace(tr);
5312 		} else {
5313 			/*
5314 			 * The global_trace cannot clear this.
5315 			 * It's flag only gets cleared if another instance sets it.
5316 			 */
5317 			if (printk_trace == &global_trace)
5318 				return -EINVAL;
5319 			/*
5320 			 * An instance must always have it set.
5321 			 * by default, that's the global_trace instane.
5322 			 */
5323 			if (printk_trace == tr)
5324 				update_printk_trace(&global_trace);
5325 		}
5326 	}
5327 
5328 	if (enabled)
5329 		tr->trace_flags |= mask;
5330 	else
5331 		tr->trace_flags &= ~mask;
5332 
5333 	if (mask == TRACE_ITER_RECORD_CMD)
5334 		trace_event_enable_cmd_record(enabled);
5335 
5336 	if (mask == TRACE_ITER_RECORD_TGID) {
5337 
5338 		if (trace_alloc_tgid_map() < 0) {
5339 			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5340 			return -ENOMEM;
5341 		}
5342 
5343 		trace_event_enable_tgid_record(enabled);
5344 	}
5345 
5346 	if (mask == TRACE_ITER_EVENT_FORK)
5347 		trace_event_follow_fork(tr, enabled);
5348 
5349 	if (mask == TRACE_ITER_FUNC_FORK)
5350 		ftrace_pid_follow_fork(tr, enabled);
5351 
5352 	if (mask == TRACE_ITER_OVERWRITE) {
5353 		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5354 #ifdef CONFIG_TRACER_MAX_TRACE
5355 		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5356 #endif
5357 	}
5358 
5359 	if (mask == TRACE_ITER_PRINTK) {
5360 		trace_printk_start_stop_comm(enabled);
5361 		trace_printk_control(enabled);
5362 	}
5363 
5364 	return 0;
5365 }
5366 
5367 int trace_set_options(struct trace_array *tr, char *option)
5368 {
5369 	char *cmp;
5370 	int neg = 0;
5371 	int ret;
5372 	size_t orig_len = strlen(option);
5373 	int len;
5374 
5375 	cmp = strstrip(option);
5376 
5377 	len = str_has_prefix(cmp, "no");
5378 	if (len)
5379 		neg = 1;
5380 
5381 	cmp += len;
5382 
5383 	mutex_lock(&event_mutex);
5384 	mutex_lock(&trace_types_lock);
5385 
5386 	ret = match_string(trace_options, -1, cmp);
5387 	/* If no option could be set, test the specific tracer options */
5388 	if (ret < 0)
5389 		ret = set_tracer_option(tr, cmp, neg);
5390 	else
5391 		ret = set_tracer_flag(tr, 1 << ret, !neg);
5392 
5393 	mutex_unlock(&trace_types_lock);
5394 	mutex_unlock(&event_mutex);
5395 
5396 	/*
5397 	 * If the first trailing whitespace is replaced with '\0' by strstrip,
5398 	 * turn it back into a space.
5399 	 */
5400 	if (orig_len > strlen(option))
5401 		option[strlen(option)] = ' ';
5402 
5403 	return ret;
5404 }
5405 
5406 static void __init apply_trace_boot_options(void)
5407 {
5408 	char *buf = trace_boot_options_buf;
5409 	char *option;
5410 
5411 	while (true) {
5412 		option = strsep(&buf, ",");
5413 
5414 		if (!option)
5415 			break;
5416 
5417 		if (*option)
5418 			trace_set_options(&global_trace, option);
5419 
5420 		/* Put back the comma to allow this to be called again */
5421 		if (buf)
5422 			*(buf - 1) = ',';
5423 	}
5424 }
5425 
5426 static ssize_t
5427 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5428 			size_t cnt, loff_t *ppos)
5429 {
5430 	struct seq_file *m = filp->private_data;
5431 	struct trace_array *tr = m->private;
5432 	char buf[64];
5433 	int ret;
5434 
5435 	if (cnt >= sizeof(buf))
5436 		return -EINVAL;
5437 
5438 	if (copy_from_user(buf, ubuf, cnt))
5439 		return -EFAULT;
5440 
5441 	buf[cnt] = 0;
5442 
5443 	ret = trace_set_options(tr, buf);
5444 	if (ret < 0)
5445 		return ret;
5446 
5447 	*ppos += cnt;
5448 
5449 	return cnt;
5450 }
5451 
5452 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5453 {
5454 	struct trace_array *tr = inode->i_private;
5455 	int ret;
5456 
5457 	ret = tracing_check_open_get_tr(tr);
5458 	if (ret)
5459 		return ret;
5460 
5461 	ret = single_open(file, tracing_trace_options_show, inode->i_private);
5462 	if (ret < 0)
5463 		trace_array_put(tr);
5464 
5465 	return ret;
5466 }
5467 
5468 static const struct file_operations tracing_iter_fops = {
5469 	.open		= tracing_trace_options_open,
5470 	.read		= seq_read,
5471 	.llseek		= seq_lseek,
5472 	.release	= tracing_single_release_tr,
5473 	.write		= tracing_trace_options_write,
5474 };
5475 
5476 static const char readme_msg[] =
5477 	"tracing mini-HOWTO:\n\n"
5478 	"# echo 0 > tracing_on : quick way to disable tracing\n"
5479 	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5480 	" Important files:\n"
5481 	"  trace\t\t\t- The static contents of the buffer\n"
5482 	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
5483 	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5484 	"  current_tracer\t- function and latency tracers\n"
5485 	"  available_tracers\t- list of configured tracers for current_tracer\n"
5486 	"  error_log\t- error log for failed commands (that support it)\n"
5487 	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
5488 	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
5489 	"  trace_clock\t\t- change the clock used to order events\n"
5490 	"       local:   Per cpu clock but may not be synced across CPUs\n"
5491 	"      global:   Synced across CPUs but slows tracing down.\n"
5492 	"     counter:   Not a clock, but just an increment\n"
5493 	"      uptime:   Jiffy counter from time of boot\n"
5494 	"        perf:   Same clock that perf events use\n"
5495 #ifdef CONFIG_X86_64
5496 	"     x86-tsc:   TSC cycle counter\n"
5497 #endif
5498 	"\n  timestamp_mode\t- view the mode used to timestamp events\n"
5499 	"       delta:   Delta difference against a buffer-wide timestamp\n"
5500 	"    absolute:   Absolute (standalone) timestamp\n"
5501 	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5502 	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5503 	"  tracing_cpumask\t- Limit which CPUs to trace\n"
5504 	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5505 	"\t\t\t  Remove sub-buffer with rmdir\n"
5506 	"  trace_options\t\t- Set format or modify how tracing happens\n"
5507 	"\t\t\t  Disable an option by prefixing 'no' to the\n"
5508 	"\t\t\t  option name\n"
5509 	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5510 #ifdef CONFIG_DYNAMIC_FTRACE
5511 	"\n  available_filter_functions - list of functions that can be filtered on\n"
5512 	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
5513 	"\t\t\t  functions\n"
5514 	"\t     accepts: func_full_name or glob-matching-pattern\n"
5515 	"\t     modules: Can select a group via module\n"
5516 	"\t      Format: :mod:<module-name>\n"
5517 	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
5518 	"\t    triggers: a command to perform when function is hit\n"
5519 	"\t      Format: <function>:<trigger>[:count]\n"
5520 	"\t     trigger: traceon, traceoff\n"
5521 	"\t\t      enable_event:<system>:<event>\n"
5522 	"\t\t      disable_event:<system>:<event>\n"
5523 #ifdef CONFIG_STACKTRACE
5524 	"\t\t      stacktrace\n"
5525 #endif
5526 #ifdef CONFIG_TRACER_SNAPSHOT
5527 	"\t\t      snapshot\n"
5528 #endif
5529 	"\t\t      dump\n"
5530 	"\t\t      cpudump\n"
5531 	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
5532 	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
5533 	"\t     The first one will disable tracing every time do_fault is hit\n"
5534 	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
5535 	"\t       The first time do trap is hit and it disables tracing, the\n"
5536 	"\t       counter will decrement to 2. If tracing is already disabled,\n"
5537 	"\t       the counter will not decrement. It only decrements when the\n"
5538 	"\t       trigger did work\n"
5539 	"\t     To remove trigger without count:\n"
5540 	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
5541 	"\t     To remove trigger with a count:\n"
5542 	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5543 	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
5544 	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5545 	"\t    modules: Can select a group via module command :mod:\n"
5546 	"\t    Does not accept triggers\n"
5547 #endif /* CONFIG_DYNAMIC_FTRACE */
5548 #ifdef CONFIG_FUNCTION_TRACER
5549 	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5550 	"\t\t    (function)\n"
5551 	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5552 	"\t\t    (function)\n"
5553 #endif
5554 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5555 	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5556 	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5557 	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5558 #endif
5559 #ifdef CONFIG_TRACER_SNAPSHOT
5560 	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
5561 	"\t\t\t  snapshot buffer. Read the contents for more\n"
5562 	"\t\t\t  information\n"
5563 #endif
5564 #ifdef CONFIG_STACK_TRACER
5565 	"  stack_trace\t\t- Shows the max stack trace when active\n"
5566 	"  stack_max_size\t- Shows current max stack size that was traced\n"
5567 	"\t\t\t  Write into this file to reset the max size (trigger a\n"
5568 	"\t\t\t  new trace)\n"
5569 #ifdef CONFIG_DYNAMIC_FTRACE
5570 	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5571 	"\t\t\t  traces\n"
5572 #endif
5573 #endif /* CONFIG_STACK_TRACER */
5574 #ifdef CONFIG_DYNAMIC_EVENTS
5575 	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5576 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5577 #endif
5578 #ifdef CONFIG_KPROBE_EVENTS
5579 	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5580 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5581 #endif
5582 #ifdef CONFIG_UPROBE_EVENTS
5583 	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5584 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5585 #endif
5586 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5587     defined(CONFIG_FPROBE_EVENTS)
5588 	"\t  accepts: event-definitions (one definition per line)\n"
5589 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5590 	"\t   Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5591 	"\t           r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5592 #endif
5593 #ifdef CONFIG_FPROBE_EVENTS
5594 	"\t           f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5595 	"\t           t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5596 #endif
5597 #ifdef CONFIG_HIST_TRIGGERS
5598 	"\t           s:[synthetic/]<event> <field> [<field>]\n"
5599 #endif
5600 	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5601 	"\t           -:[<group>/][<event>]\n"
5602 #ifdef CONFIG_KPROBE_EVENTS
5603 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5604   "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5605 #endif
5606 #ifdef CONFIG_UPROBE_EVENTS
5607   "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5608 #endif
5609 	"\t     args: <name>=fetcharg[:type]\n"
5610 	"\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5611 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5612 	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5613 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5614 	"\t           <argname>[->field[->field|.field...]],\n"
5615 #endif
5616 #else
5617 	"\t           $stack<index>, $stack, $retval, $comm,\n"
5618 #endif
5619 	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5620 	"\t     kernel return probes support: $retval, $arg<N>, $comm\n"
5621 	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5622 	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5623 	"\t           symstr, %pd/%pD, <type>\\[<array-size>\\]\n"
5624 #ifdef CONFIG_HIST_TRIGGERS
5625 	"\t    field: <stype> <name>;\n"
5626 	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5627 	"\t           [unsigned] char/int/long\n"
5628 #endif
5629 	"\t    efield: For event probes ('e' types), the field is on of the fields\n"
5630 	"\t            of the <attached-group>/<attached-event>.\n"
5631 #endif
5632 	"  events/\t\t- Directory containing all trace event subsystems:\n"
5633 	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5634 	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
5635 	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5636 	"\t\t\t  events\n"
5637 	"      filter\t\t- If set, only events passing filter are traced\n"
5638 	"  events/<system>/<event>/\t- Directory containing control files for\n"
5639 	"\t\t\t  <event>:\n"
5640 	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5641 	"      filter\t\t- If set, only events passing filter are traced\n"
5642 	"      trigger\t\t- If set, a command to perform when event is hit\n"
5643 	"\t    Format: <trigger>[:count][if <filter>]\n"
5644 	"\t   trigger: traceon, traceoff\n"
5645 	"\t            enable_event:<system>:<event>\n"
5646 	"\t            disable_event:<system>:<event>\n"
5647 #ifdef CONFIG_HIST_TRIGGERS
5648 	"\t            enable_hist:<system>:<event>\n"
5649 	"\t            disable_hist:<system>:<event>\n"
5650 #endif
5651 #ifdef CONFIG_STACKTRACE
5652 	"\t\t    stacktrace\n"
5653 #endif
5654 #ifdef CONFIG_TRACER_SNAPSHOT
5655 	"\t\t    snapshot\n"
5656 #endif
5657 #ifdef CONFIG_HIST_TRIGGERS
5658 	"\t\t    hist (see below)\n"
5659 #endif
5660 	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
5661 	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
5662 	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5663 	"\t                  events/block/block_unplug/trigger\n"
5664 	"\t   The first disables tracing every time block_unplug is hit.\n"
5665 	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
5666 	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
5667 	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5668 	"\t   Like function triggers, the counter is only decremented if it\n"
5669 	"\t    enabled or disabled tracing.\n"
5670 	"\t   To remove a trigger without a count:\n"
5671 	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
5672 	"\t   To remove a trigger with a count:\n"
5673 	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
5674 	"\t   Filters can be ignored when removing a trigger.\n"
5675 #ifdef CONFIG_HIST_TRIGGERS
5676 	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
5677 	"\t    Format: hist:keys=<field1[,field2,...]>\n"
5678 	"\t            [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5679 	"\t            [:values=<field1[,field2,...]>]\n"
5680 	"\t            [:sort=<field1[,field2,...]>]\n"
5681 	"\t            [:size=#entries]\n"
5682 	"\t            [:pause][:continue][:clear]\n"
5683 	"\t            [:name=histname1]\n"
5684 	"\t            [:nohitcount]\n"
5685 	"\t            [:<handler>.<action>]\n"
5686 	"\t            [if <filter>]\n\n"
5687 	"\t    Note, special fields can be used as well:\n"
5688 	"\t            common_timestamp - to record current timestamp\n"
5689 	"\t            common_cpu - to record the CPU the event happened on\n"
5690 	"\n"
5691 	"\t    A hist trigger variable can be:\n"
5692 	"\t        - a reference to a field e.g. x=current_timestamp,\n"
5693 	"\t        - a reference to another variable e.g. y=$x,\n"
5694 	"\t        - a numeric literal: e.g. ms_per_sec=1000,\n"
5695 	"\t        - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5696 	"\n"
5697 	"\t    hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5698 	"\t    multiplication(*) and division(/) operators. An operand can be either a\n"
5699 	"\t    variable reference, field or numeric literal.\n"
5700 	"\n"
5701 	"\t    When a matching event is hit, an entry is added to a hash\n"
5702 	"\t    table using the key(s) and value(s) named, and the value of a\n"
5703 	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
5704 	"\t    correspond to fields in the event's format description.  Keys\n"
5705 	"\t    can be any field, or the special string 'common_stacktrace'.\n"
5706 	"\t    Compound keys consisting of up to two fields can be specified\n"
5707 	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
5708 	"\t    fields.  Sort keys consisting of up to two fields can be\n"
5709 	"\t    specified using the 'sort' keyword.  The sort direction can\n"
5710 	"\t    be modified by appending '.descending' or '.ascending' to a\n"
5711 	"\t    sort field.  The 'size' parameter can be used to specify more\n"
5712 	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
5713 	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
5714 	"\t    its histogram data will be shared with other triggers of the\n"
5715 	"\t    same name, and trigger hits will update this common data.\n\n"
5716 	"\t    Reading the 'hist' file for the event will dump the hash\n"
5717 	"\t    table in its entirety to stdout.  If there are multiple hist\n"
5718 	"\t    triggers attached to an event, there will be a table for each\n"
5719 	"\t    trigger in the output.  The table displayed for a named\n"
5720 	"\t    trigger will be the same as any other instance having the\n"
5721 	"\t    same name.  The default format used to display a given field\n"
5722 	"\t    can be modified by appending any of the following modifiers\n"
5723 	"\t    to the field name, as applicable:\n\n"
5724 	"\t            .hex        display a number as a hex value\n"
5725 	"\t            .sym        display an address as a symbol\n"
5726 	"\t            .sym-offset display an address as a symbol and offset\n"
5727 	"\t            .execname   display a common_pid as a program name\n"
5728 	"\t            .syscall    display a syscall id as a syscall name\n"
5729 	"\t            .log2       display log2 value rather than raw number\n"
5730 	"\t            .buckets=size  display values in groups of size rather than raw number\n"
5731 	"\t            .usecs      display a common_timestamp in microseconds\n"
5732 	"\t            .percent    display a number of percentage value\n"
5733 	"\t            .graph      display a bar-graph of a value\n\n"
5734 	"\t    The 'pause' parameter can be used to pause an existing hist\n"
5735 	"\t    trigger or to start a hist trigger but not log any events\n"
5736 	"\t    until told to do so.  'continue' can be used to start or\n"
5737 	"\t    restart a paused hist trigger.\n\n"
5738 	"\t    The 'clear' parameter will clear the contents of a running\n"
5739 	"\t    hist trigger and leave its current paused/active state\n"
5740 	"\t    unchanged.\n\n"
5741 	"\t    The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5742 	"\t    raw hitcount in the histogram.\n\n"
5743 	"\t    The enable_hist and disable_hist triggers can be used to\n"
5744 	"\t    have one event conditionally start and stop another event's\n"
5745 	"\t    already-attached hist trigger.  The syntax is analogous to\n"
5746 	"\t    the enable_event and disable_event triggers.\n\n"
5747 	"\t    Hist trigger handlers and actions are executed whenever a\n"
5748 	"\t    a histogram entry is added or updated.  They take the form:\n\n"
5749 	"\t        <handler>.<action>\n\n"
5750 	"\t    The available handlers are:\n\n"
5751 	"\t        onmatch(matching.event)  - invoke on addition or update\n"
5752 	"\t        onmax(var)               - invoke if var exceeds current max\n"
5753 	"\t        onchange(var)            - invoke action if var changes\n\n"
5754 	"\t    The available actions are:\n\n"
5755 	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
5756 	"\t        save(field,...)                      - save current event fields\n"
5757 #ifdef CONFIG_TRACER_SNAPSHOT
5758 	"\t        snapshot()                           - snapshot the trace buffer\n\n"
5759 #endif
5760 #ifdef CONFIG_SYNTH_EVENTS
5761 	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5762 	"\t  Write into this file to define/undefine new synthetic events.\n"
5763 	"\t     example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5764 #endif
5765 #endif
5766 ;
5767 
5768 static ssize_t
5769 tracing_readme_read(struct file *filp, char __user *ubuf,
5770 		       size_t cnt, loff_t *ppos)
5771 {
5772 	return simple_read_from_buffer(ubuf, cnt, ppos,
5773 					readme_msg, strlen(readme_msg));
5774 }
5775 
5776 static const struct file_operations tracing_readme_fops = {
5777 	.open		= tracing_open_generic,
5778 	.read		= tracing_readme_read,
5779 	.llseek		= generic_file_llseek,
5780 };
5781 
5782 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5783 static union trace_eval_map_item *
5784 update_eval_map(union trace_eval_map_item *ptr)
5785 {
5786 	if (!ptr->map.eval_string) {
5787 		if (ptr->tail.next) {
5788 			ptr = ptr->tail.next;
5789 			/* Set ptr to the next real item (skip head) */
5790 			ptr++;
5791 		} else
5792 			return NULL;
5793 	}
5794 	return ptr;
5795 }
5796 
5797 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5798 {
5799 	union trace_eval_map_item *ptr = v;
5800 
5801 	/*
5802 	 * Paranoid! If ptr points to end, we don't want to increment past it.
5803 	 * This really should never happen.
5804 	 */
5805 	(*pos)++;
5806 	ptr = update_eval_map(ptr);
5807 	if (WARN_ON_ONCE(!ptr))
5808 		return NULL;
5809 
5810 	ptr++;
5811 	ptr = update_eval_map(ptr);
5812 
5813 	return ptr;
5814 }
5815 
5816 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5817 {
5818 	union trace_eval_map_item *v;
5819 	loff_t l = 0;
5820 
5821 	mutex_lock(&trace_eval_mutex);
5822 
5823 	v = trace_eval_maps;
5824 	if (v)
5825 		v++;
5826 
5827 	while (v && l < *pos) {
5828 		v = eval_map_next(m, v, &l);
5829 	}
5830 
5831 	return v;
5832 }
5833 
5834 static void eval_map_stop(struct seq_file *m, void *v)
5835 {
5836 	mutex_unlock(&trace_eval_mutex);
5837 }
5838 
5839 static int eval_map_show(struct seq_file *m, void *v)
5840 {
5841 	union trace_eval_map_item *ptr = v;
5842 
5843 	seq_printf(m, "%s %ld (%s)\n",
5844 		   ptr->map.eval_string, ptr->map.eval_value,
5845 		   ptr->map.system);
5846 
5847 	return 0;
5848 }
5849 
5850 static const struct seq_operations tracing_eval_map_seq_ops = {
5851 	.start		= eval_map_start,
5852 	.next		= eval_map_next,
5853 	.stop		= eval_map_stop,
5854 	.show		= eval_map_show,
5855 };
5856 
5857 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5858 {
5859 	int ret;
5860 
5861 	ret = tracing_check_open_get_tr(NULL);
5862 	if (ret)
5863 		return ret;
5864 
5865 	return seq_open(filp, &tracing_eval_map_seq_ops);
5866 }
5867 
5868 static const struct file_operations tracing_eval_map_fops = {
5869 	.open		= tracing_eval_map_open,
5870 	.read		= seq_read,
5871 	.llseek		= seq_lseek,
5872 	.release	= seq_release,
5873 };
5874 
5875 static inline union trace_eval_map_item *
5876 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5877 {
5878 	/* Return tail of array given the head */
5879 	return ptr + ptr->head.length + 1;
5880 }
5881 
5882 static void
5883 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5884 			   int len)
5885 {
5886 	struct trace_eval_map **stop;
5887 	struct trace_eval_map **map;
5888 	union trace_eval_map_item *map_array;
5889 	union trace_eval_map_item *ptr;
5890 
5891 	stop = start + len;
5892 
5893 	/*
5894 	 * The trace_eval_maps contains the map plus a head and tail item,
5895 	 * where the head holds the module and length of array, and the
5896 	 * tail holds a pointer to the next list.
5897 	 */
5898 	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5899 	if (!map_array) {
5900 		pr_warn("Unable to allocate trace eval mapping\n");
5901 		return;
5902 	}
5903 
5904 	mutex_lock(&trace_eval_mutex);
5905 
5906 	if (!trace_eval_maps)
5907 		trace_eval_maps = map_array;
5908 	else {
5909 		ptr = trace_eval_maps;
5910 		for (;;) {
5911 			ptr = trace_eval_jmp_to_tail(ptr);
5912 			if (!ptr->tail.next)
5913 				break;
5914 			ptr = ptr->tail.next;
5915 
5916 		}
5917 		ptr->tail.next = map_array;
5918 	}
5919 	map_array->head.mod = mod;
5920 	map_array->head.length = len;
5921 	map_array++;
5922 
5923 	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5924 		map_array->map = **map;
5925 		map_array++;
5926 	}
5927 	memset(map_array, 0, sizeof(*map_array));
5928 
5929 	mutex_unlock(&trace_eval_mutex);
5930 }
5931 
5932 static void trace_create_eval_file(struct dentry *d_tracer)
5933 {
5934 	trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
5935 			  NULL, &tracing_eval_map_fops);
5936 }
5937 
5938 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5939 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5940 static inline void trace_insert_eval_map_file(struct module *mod,
5941 			      struct trace_eval_map **start, int len) { }
5942 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5943 
5944 static void trace_insert_eval_map(struct module *mod,
5945 				  struct trace_eval_map **start, int len)
5946 {
5947 	struct trace_eval_map **map;
5948 
5949 	if (len <= 0)
5950 		return;
5951 
5952 	map = start;
5953 
5954 	trace_event_eval_update(map, len);
5955 
5956 	trace_insert_eval_map_file(mod, start, len);
5957 }
5958 
5959 static ssize_t
5960 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5961 		       size_t cnt, loff_t *ppos)
5962 {
5963 	struct trace_array *tr = filp->private_data;
5964 	char buf[MAX_TRACER_SIZE+2];
5965 	int r;
5966 
5967 	mutex_lock(&trace_types_lock);
5968 	r = sprintf(buf, "%s\n", tr->current_trace->name);
5969 	mutex_unlock(&trace_types_lock);
5970 
5971 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5972 }
5973 
5974 int tracer_init(struct tracer *t, struct trace_array *tr)
5975 {
5976 	tracing_reset_online_cpus(&tr->array_buffer);
5977 	return t->init(tr);
5978 }
5979 
5980 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5981 {
5982 	int cpu;
5983 
5984 	for_each_tracing_cpu(cpu)
5985 		per_cpu_ptr(buf->data, cpu)->entries = val;
5986 }
5987 
5988 static void update_buffer_entries(struct array_buffer *buf, int cpu)
5989 {
5990 	if (cpu == RING_BUFFER_ALL_CPUS) {
5991 		set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
5992 	} else {
5993 		per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
5994 	}
5995 }
5996 
5997 #ifdef CONFIG_TRACER_MAX_TRACE
5998 /* resize @tr's buffer to the size of @size_tr's entries */
5999 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6000 					struct array_buffer *size_buf, int cpu_id)
6001 {
6002 	int cpu, ret = 0;
6003 
6004 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
6005 		for_each_tracing_cpu(cpu) {
6006 			ret = ring_buffer_resize(trace_buf->buffer,
6007 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6008 			if (ret < 0)
6009 				break;
6010 			per_cpu_ptr(trace_buf->data, cpu)->entries =
6011 				per_cpu_ptr(size_buf->data, cpu)->entries;
6012 		}
6013 	} else {
6014 		ret = ring_buffer_resize(trace_buf->buffer,
6015 				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6016 		if (ret == 0)
6017 			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6018 				per_cpu_ptr(size_buf->data, cpu_id)->entries;
6019 	}
6020 
6021 	return ret;
6022 }
6023 #endif /* CONFIG_TRACER_MAX_TRACE */
6024 
6025 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6026 					unsigned long size, int cpu)
6027 {
6028 	int ret;
6029 
6030 	/*
6031 	 * If kernel or user changes the size of the ring buffer
6032 	 * we use the size that was given, and we can forget about
6033 	 * expanding it later.
6034 	 */
6035 	trace_set_ring_buffer_expanded(tr);
6036 
6037 	/* May be called before buffers are initialized */
6038 	if (!tr->array_buffer.buffer)
6039 		return 0;
6040 
6041 	/* Do not allow tracing while resizing ring buffer */
6042 	tracing_stop_tr(tr);
6043 
6044 	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6045 	if (ret < 0)
6046 		goto out_start;
6047 
6048 #ifdef CONFIG_TRACER_MAX_TRACE
6049 	if (!tr->allocated_snapshot)
6050 		goto out;
6051 
6052 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6053 	if (ret < 0) {
6054 		int r = resize_buffer_duplicate_size(&tr->array_buffer,
6055 						     &tr->array_buffer, cpu);
6056 		if (r < 0) {
6057 			/*
6058 			 * AARGH! We are left with different
6059 			 * size max buffer!!!!
6060 			 * The max buffer is our "snapshot" buffer.
6061 			 * When a tracer needs a snapshot (one of the
6062 			 * latency tracers), it swaps the max buffer
6063 			 * with the saved snap shot. We succeeded to
6064 			 * update the size of the main buffer, but failed to
6065 			 * update the size of the max buffer. But when we tried
6066 			 * to reset the main buffer to the original size, we
6067 			 * failed there too. This is very unlikely to
6068 			 * happen, but if it does, warn and kill all
6069 			 * tracing.
6070 			 */
6071 			WARN_ON(1);
6072 			tracing_disabled = 1;
6073 		}
6074 		goto out_start;
6075 	}
6076 
6077 	update_buffer_entries(&tr->max_buffer, cpu);
6078 
6079  out:
6080 #endif /* CONFIG_TRACER_MAX_TRACE */
6081 
6082 	update_buffer_entries(&tr->array_buffer, cpu);
6083  out_start:
6084 	tracing_start_tr(tr);
6085 	return ret;
6086 }
6087 
6088 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6089 				  unsigned long size, int cpu_id)
6090 {
6091 	int ret;
6092 
6093 	mutex_lock(&trace_types_lock);
6094 
6095 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
6096 		/* make sure, this cpu is enabled in the mask */
6097 		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6098 			ret = -EINVAL;
6099 			goto out;
6100 		}
6101 	}
6102 
6103 	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6104 	if (ret < 0)
6105 		ret = -ENOMEM;
6106 
6107 out:
6108 	mutex_unlock(&trace_types_lock);
6109 
6110 	return ret;
6111 }
6112 
6113 static void update_last_data(struct trace_array *tr)
6114 {
6115 	if (!tr->text_delta && !tr->data_delta)
6116 		return;
6117 
6118 	/* Clear old data */
6119 	tracing_reset_online_cpus(&tr->array_buffer);
6120 
6121 	/* Using current data now */
6122 	tr->text_delta = 0;
6123 	tr->data_delta = 0;
6124 }
6125 
6126 /**
6127  * tracing_update_buffers - used by tracing facility to expand ring buffers
6128  * @tr: The tracing instance
6129  *
6130  * To save on memory when the tracing is never used on a system with it
6131  * configured in. The ring buffers are set to a minimum size. But once
6132  * a user starts to use the tracing facility, then they need to grow
6133  * to their default size.
6134  *
6135  * This function is to be called when a tracer is about to be used.
6136  */
6137 int tracing_update_buffers(struct trace_array *tr)
6138 {
6139 	int ret = 0;
6140 
6141 	mutex_lock(&trace_types_lock);
6142 
6143 	update_last_data(tr);
6144 
6145 	if (!tr->ring_buffer_expanded)
6146 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6147 						RING_BUFFER_ALL_CPUS);
6148 	mutex_unlock(&trace_types_lock);
6149 
6150 	return ret;
6151 }
6152 
6153 struct trace_option_dentry;
6154 
6155 static void
6156 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6157 
6158 /*
6159  * Used to clear out the tracer before deletion of an instance.
6160  * Must have trace_types_lock held.
6161  */
6162 static void tracing_set_nop(struct trace_array *tr)
6163 {
6164 	if (tr->current_trace == &nop_trace)
6165 		return;
6166 
6167 	tr->current_trace->enabled--;
6168 
6169 	if (tr->current_trace->reset)
6170 		tr->current_trace->reset(tr);
6171 
6172 	tr->current_trace = &nop_trace;
6173 }
6174 
6175 static bool tracer_options_updated;
6176 
6177 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6178 {
6179 	/* Only enable if the directory has been created already. */
6180 	if (!tr->dir)
6181 		return;
6182 
6183 	/* Only create trace option files after update_tracer_options finish */
6184 	if (!tracer_options_updated)
6185 		return;
6186 
6187 	create_trace_option_files(tr, t);
6188 }
6189 
6190 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6191 {
6192 	struct tracer *t;
6193 #ifdef CONFIG_TRACER_MAX_TRACE
6194 	bool had_max_tr;
6195 #endif
6196 	int ret = 0;
6197 
6198 	mutex_lock(&trace_types_lock);
6199 
6200 	update_last_data(tr);
6201 
6202 	if (!tr->ring_buffer_expanded) {
6203 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6204 						RING_BUFFER_ALL_CPUS);
6205 		if (ret < 0)
6206 			goto out;
6207 		ret = 0;
6208 	}
6209 
6210 	for (t = trace_types; t; t = t->next) {
6211 		if (strcmp(t->name, buf) == 0)
6212 			break;
6213 	}
6214 	if (!t) {
6215 		ret = -EINVAL;
6216 		goto out;
6217 	}
6218 	if (t == tr->current_trace)
6219 		goto out;
6220 
6221 #ifdef CONFIG_TRACER_SNAPSHOT
6222 	if (t->use_max_tr) {
6223 		local_irq_disable();
6224 		arch_spin_lock(&tr->max_lock);
6225 		if (tr->cond_snapshot)
6226 			ret = -EBUSY;
6227 		arch_spin_unlock(&tr->max_lock);
6228 		local_irq_enable();
6229 		if (ret)
6230 			goto out;
6231 	}
6232 #endif
6233 	/* Some tracers won't work on kernel command line */
6234 	if (system_state < SYSTEM_RUNNING && t->noboot) {
6235 		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6236 			t->name);
6237 		goto out;
6238 	}
6239 
6240 	/* Some tracers are only allowed for the top level buffer */
6241 	if (!trace_ok_for_array(t, tr)) {
6242 		ret = -EINVAL;
6243 		goto out;
6244 	}
6245 
6246 	/* If trace pipe files are being read, we can't change the tracer */
6247 	if (tr->trace_ref) {
6248 		ret = -EBUSY;
6249 		goto out;
6250 	}
6251 
6252 	trace_branch_disable();
6253 
6254 	tr->current_trace->enabled--;
6255 
6256 	if (tr->current_trace->reset)
6257 		tr->current_trace->reset(tr);
6258 
6259 #ifdef CONFIG_TRACER_MAX_TRACE
6260 	had_max_tr = tr->current_trace->use_max_tr;
6261 
6262 	/* Current trace needs to be nop_trace before synchronize_rcu */
6263 	tr->current_trace = &nop_trace;
6264 
6265 	if (had_max_tr && !t->use_max_tr) {
6266 		/*
6267 		 * We need to make sure that the update_max_tr sees that
6268 		 * current_trace changed to nop_trace to keep it from
6269 		 * swapping the buffers after we resize it.
6270 		 * The update_max_tr is called from interrupts disabled
6271 		 * so a synchronized_sched() is sufficient.
6272 		 */
6273 		synchronize_rcu();
6274 		free_snapshot(tr);
6275 		tracing_disarm_snapshot(tr);
6276 	}
6277 
6278 	if (!had_max_tr && t->use_max_tr) {
6279 		ret = tracing_arm_snapshot_locked(tr);
6280 		if (ret)
6281 			goto out;
6282 	}
6283 #else
6284 	tr->current_trace = &nop_trace;
6285 #endif
6286 
6287 	if (t->init) {
6288 		ret = tracer_init(t, tr);
6289 		if (ret) {
6290 #ifdef CONFIG_TRACER_MAX_TRACE
6291 			if (t->use_max_tr)
6292 				tracing_disarm_snapshot(tr);
6293 #endif
6294 			goto out;
6295 		}
6296 	}
6297 
6298 	tr->current_trace = t;
6299 	tr->current_trace->enabled++;
6300 	trace_branch_enable(tr);
6301  out:
6302 	mutex_unlock(&trace_types_lock);
6303 
6304 	return ret;
6305 }
6306 
6307 static ssize_t
6308 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6309 			size_t cnt, loff_t *ppos)
6310 {
6311 	struct trace_array *tr = filp->private_data;
6312 	char buf[MAX_TRACER_SIZE+1];
6313 	char *name;
6314 	size_t ret;
6315 	int err;
6316 
6317 	ret = cnt;
6318 
6319 	if (cnt > MAX_TRACER_SIZE)
6320 		cnt = MAX_TRACER_SIZE;
6321 
6322 	if (copy_from_user(buf, ubuf, cnt))
6323 		return -EFAULT;
6324 
6325 	buf[cnt] = 0;
6326 
6327 	name = strim(buf);
6328 
6329 	err = tracing_set_tracer(tr, name);
6330 	if (err)
6331 		return err;
6332 
6333 	*ppos += ret;
6334 
6335 	return ret;
6336 }
6337 
6338 static ssize_t
6339 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6340 		   size_t cnt, loff_t *ppos)
6341 {
6342 	char buf[64];
6343 	int r;
6344 
6345 	r = snprintf(buf, sizeof(buf), "%ld\n",
6346 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6347 	if (r > sizeof(buf))
6348 		r = sizeof(buf);
6349 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6350 }
6351 
6352 static ssize_t
6353 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6354 		    size_t cnt, loff_t *ppos)
6355 {
6356 	unsigned long val;
6357 	int ret;
6358 
6359 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6360 	if (ret)
6361 		return ret;
6362 
6363 	*ptr = val * 1000;
6364 
6365 	return cnt;
6366 }
6367 
6368 static ssize_t
6369 tracing_thresh_read(struct file *filp, char __user *ubuf,
6370 		    size_t cnt, loff_t *ppos)
6371 {
6372 	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6373 }
6374 
6375 static ssize_t
6376 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6377 		     size_t cnt, loff_t *ppos)
6378 {
6379 	struct trace_array *tr = filp->private_data;
6380 	int ret;
6381 
6382 	mutex_lock(&trace_types_lock);
6383 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6384 	if (ret < 0)
6385 		goto out;
6386 
6387 	if (tr->current_trace->update_thresh) {
6388 		ret = tr->current_trace->update_thresh(tr);
6389 		if (ret < 0)
6390 			goto out;
6391 	}
6392 
6393 	ret = cnt;
6394 out:
6395 	mutex_unlock(&trace_types_lock);
6396 
6397 	return ret;
6398 }
6399 
6400 #ifdef CONFIG_TRACER_MAX_TRACE
6401 
6402 static ssize_t
6403 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6404 		     size_t cnt, loff_t *ppos)
6405 {
6406 	struct trace_array *tr = filp->private_data;
6407 
6408 	return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6409 }
6410 
6411 static ssize_t
6412 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6413 		      size_t cnt, loff_t *ppos)
6414 {
6415 	struct trace_array *tr = filp->private_data;
6416 
6417 	return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6418 }
6419 
6420 #endif
6421 
6422 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6423 {
6424 	if (cpu == RING_BUFFER_ALL_CPUS) {
6425 		if (cpumask_empty(tr->pipe_cpumask)) {
6426 			cpumask_setall(tr->pipe_cpumask);
6427 			return 0;
6428 		}
6429 	} else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6430 		cpumask_set_cpu(cpu, tr->pipe_cpumask);
6431 		return 0;
6432 	}
6433 	return -EBUSY;
6434 }
6435 
6436 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6437 {
6438 	if (cpu == RING_BUFFER_ALL_CPUS) {
6439 		WARN_ON(!cpumask_full(tr->pipe_cpumask));
6440 		cpumask_clear(tr->pipe_cpumask);
6441 	} else {
6442 		WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6443 		cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6444 	}
6445 }
6446 
6447 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6448 {
6449 	struct trace_array *tr = inode->i_private;
6450 	struct trace_iterator *iter;
6451 	int cpu;
6452 	int ret;
6453 
6454 	ret = tracing_check_open_get_tr(tr);
6455 	if (ret)
6456 		return ret;
6457 
6458 	mutex_lock(&trace_types_lock);
6459 	cpu = tracing_get_cpu(inode);
6460 	ret = open_pipe_on_cpu(tr, cpu);
6461 	if (ret)
6462 		goto fail_pipe_on_cpu;
6463 
6464 	/* create a buffer to store the information to pass to userspace */
6465 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6466 	if (!iter) {
6467 		ret = -ENOMEM;
6468 		goto fail_alloc_iter;
6469 	}
6470 
6471 	trace_seq_init(&iter->seq);
6472 	iter->trace = tr->current_trace;
6473 
6474 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6475 		ret = -ENOMEM;
6476 		goto fail;
6477 	}
6478 
6479 	/* trace pipe does not show start of buffer */
6480 	cpumask_setall(iter->started);
6481 
6482 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6483 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
6484 
6485 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6486 	if (trace_clocks[tr->clock_id].in_ns)
6487 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6488 
6489 	iter->tr = tr;
6490 	iter->array_buffer = &tr->array_buffer;
6491 	iter->cpu_file = cpu;
6492 	mutex_init(&iter->mutex);
6493 	filp->private_data = iter;
6494 
6495 	if (iter->trace->pipe_open)
6496 		iter->trace->pipe_open(iter);
6497 
6498 	nonseekable_open(inode, filp);
6499 
6500 	tr->trace_ref++;
6501 
6502 	mutex_unlock(&trace_types_lock);
6503 	return ret;
6504 
6505 fail:
6506 	kfree(iter);
6507 fail_alloc_iter:
6508 	close_pipe_on_cpu(tr, cpu);
6509 fail_pipe_on_cpu:
6510 	__trace_array_put(tr);
6511 	mutex_unlock(&trace_types_lock);
6512 	return ret;
6513 }
6514 
6515 static int tracing_release_pipe(struct inode *inode, struct file *file)
6516 {
6517 	struct trace_iterator *iter = file->private_data;
6518 	struct trace_array *tr = inode->i_private;
6519 
6520 	mutex_lock(&trace_types_lock);
6521 
6522 	tr->trace_ref--;
6523 
6524 	if (iter->trace->pipe_close)
6525 		iter->trace->pipe_close(iter);
6526 	close_pipe_on_cpu(tr, iter->cpu_file);
6527 	mutex_unlock(&trace_types_lock);
6528 
6529 	free_trace_iter_content(iter);
6530 	kfree(iter);
6531 
6532 	trace_array_put(tr);
6533 
6534 	return 0;
6535 }
6536 
6537 static __poll_t
6538 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6539 {
6540 	struct trace_array *tr = iter->tr;
6541 
6542 	/* Iterators are static, they should be filled or empty */
6543 	if (trace_buffer_iter(iter, iter->cpu_file))
6544 		return EPOLLIN | EPOLLRDNORM;
6545 
6546 	if (tr->trace_flags & TRACE_ITER_BLOCK)
6547 		/*
6548 		 * Always select as readable when in blocking mode
6549 		 */
6550 		return EPOLLIN | EPOLLRDNORM;
6551 	else
6552 		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6553 					     filp, poll_table, iter->tr->buffer_percent);
6554 }
6555 
6556 static __poll_t
6557 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6558 {
6559 	struct trace_iterator *iter = filp->private_data;
6560 
6561 	return trace_poll(iter, filp, poll_table);
6562 }
6563 
6564 /* Must be called with iter->mutex held. */
6565 static int tracing_wait_pipe(struct file *filp)
6566 {
6567 	struct trace_iterator *iter = filp->private_data;
6568 	int ret;
6569 
6570 	while (trace_empty(iter)) {
6571 
6572 		if ((filp->f_flags & O_NONBLOCK)) {
6573 			return -EAGAIN;
6574 		}
6575 
6576 		/*
6577 		 * We block until we read something and tracing is disabled.
6578 		 * We still block if tracing is disabled, but we have never
6579 		 * read anything. This allows a user to cat this file, and
6580 		 * then enable tracing. But after we have read something,
6581 		 * we give an EOF when tracing is again disabled.
6582 		 *
6583 		 * iter->pos will be 0 if we haven't read anything.
6584 		 */
6585 		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6586 			break;
6587 
6588 		mutex_unlock(&iter->mutex);
6589 
6590 		ret = wait_on_pipe(iter, 0);
6591 
6592 		mutex_lock(&iter->mutex);
6593 
6594 		if (ret)
6595 			return ret;
6596 	}
6597 
6598 	return 1;
6599 }
6600 
6601 /*
6602  * Consumer reader.
6603  */
6604 static ssize_t
6605 tracing_read_pipe(struct file *filp, char __user *ubuf,
6606 		  size_t cnt, loff_t *ppos)
6607 {
6608 	struct trace_iterator *iter = filp->private_data;
6609 	ssize_t sret;
6610 
6611 	/*
6612 	 * Avoid more than one consumer on a single file descriptor
6613 	 * This is just a matter of traces coherency, the ring buffer itself
6614 	 * is protected.
6615 	 */
6616 	mutex_lock(&iter->mutex);
6617 
6618 	/* return any leftover data */
6619 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6620 	if (sret != -EBUSY)
6621 		goto out;
6622 
6623 	trace_seq_init(&iter->seq);
6624 
6625 	if (iter->trace->read) {
6626 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6627 		if (sret)
6628 			goto out;
6629 	}
6630 
6631 waitagain:
6632 	sret = tracing_wait_pipe(filp);
6633 	if (sret <= 0)
6634 		goto out;
6635 
6636 	/* stop when tracing is finished */
6637 	if (trace_empty(iter)) {
6638 		sret = 0;
6639 		goto out;
6640 	}
6641 
6642 	if (cnt >= TRACE_SEQ_BUFFER_SIZE)
6643 		cnt = TRACE_SEQ_BUFFER_SIZE - 1;
6644 
6645 	/* reset all but tr, trace, and overruns */
6646 	trace_iterator_reset(iter);
6647 	cpumask_clear(iter->started);
6648 	trace_seq_init(&iter->seq);
6649 
6650 	trace_event_read_lock();
6651 	trace_access_lock(iter->cpu_file);
6652 	while (trace_find_next_entry_inc(iter) != NULL) {
6653 		enum print_line_t ret;
6654 		int save_len = iter->seq.seq.len;
6655 
6656 		ret = print_trace_line(iter);
6657 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6658 			/*
6659 			 * If one print_trace_line() fills entire trace_seq in one shot,
6660 			 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6661 			 * In this case, we need to consume it, otherwise, loop will peek
6662 			 * this event next time, resulting in an infinite loop.
6663 			 */
6664 			if (save_len == 0) {
6665 				iter->seq.full = 0;
6666 				trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6667 				trace_consume(iter);
6668 				break;
6669 			}
6670 
6671 			/* In other cases, don't print partial lines */
6672 			iter->seq.seq.len = save_len;
6673 			break;
6674 		}
6675 		if (ret != TRACE_TYPE_NO_CONSUME)
6676 			trace_consume(iter);
6677 
6678 		if (trace_seq_used(&iter->seq) >= cnt)
6679 			break;
6680 
6681 		/*
6682 		 * Setting the full flag means we reached the trace_seq buffer
6683 		 * size and we should leave by partial output condition above.
6684 		 * One of the trace_seq_* functions is not used properly.
6685 		 */
6686 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6687 			  iter->ent->type);
6688 	}
6689 	trace_access_unlock(iter->cpu_file);
6690 	trace_event_read_unlock();
6691 
6692 	/* Now copy what we have to the user */
6693 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6694 	if (iter->seq.readpos >= trace_seq_used(&iter->seq))
6695 		trace_seq_init(&iter->seq);
6696 
6697 	/*
6698 	 * If there was nothing to send to user, in spite of consuming trace
6699 	 * entries, go back to wait for more entries.
6700 	 */
6701 	if (sret == -EBUSY)
6702 		goto waitagain;
6703 
6704 out:
6705 	mutex_unlock(&iter->mutex);
6706 
6707 	return sret;
6708 }
6709 
6710 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6711 				     unsigned int idx)
6712 {
6713 	__free_page(spd->pages[idx]);
6714 }
6715 
6716 static size_t
6717 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6718 {
6719 	size_t count;
6720 	int save_len;
6721 	int ret;
6722 
6723 	/* Seq buffer is page-sized, exactly what we need. */
6724 	for (;;) {
6725 		save_len = iter->seq.seq.len;
6726 		ret = print_trace_line(iter);
6727 
6728 		if (trace_seq_has_overflowed(&iter->seq)) {
6729 			iter->seq.seq.len = save_len;
6730 			break;
6731 		}
6732 
6733 		/*
6734 		 * This should not be hit, because it should only
6735 		 * be set if the iter->seq overflowed. But check it
6736 		 * anyway to be safe.
6737 		 */
6738 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6739 			iter->seq.seq.len = save_len;
6740 			break;
6741 		}
6742 
6743 		count = trace_seq_used(&iter->seq) - save_len;
6744 		if (rem < count) {
6745 			rem = 0;
6746 			iter->seq.seq.len = save_len;
6747 			break;
6748 		}
6749 
6750 		if (ret != TRACE_TYPE_NO_CONSUME)
6751 			trace_consume(iter);
6752 		rem -= count;
6753 		if (!trace_find_next_entry_inc(iter))	{
6754 			rem = 0;
6755 			iter->ent = NULL;
6756 			break;
6757 		}
6758 	}
6759 
6760 	return rem;
6761 }
6762 
6763 static ssize_t tracing_splice_read_pipe(struct file *filp,
6764 					loff_t *ppos,
6765 					struct pipe_inode_info *pipe,
6766 					size_t len,
6767 					unsigned int flags)
6768 {
6769 	struct page *pages_def[PIPE_DEF_BUFFERS];
6770 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
6771 	struct trace_iterator *iter = filp->private_data;
6772 	struct splice_pipe_desc spd = {
6773 		.pages		= pages_def,
6774 		.partial	= partial_def,
6775 		.nr_pages	= 0, /* This gets updated below. */
6776 		.nr_pages_max	= PIPE_DEF_BUFFERS,
6777 		.ops		= &default_pipe_buf_ops,
6778 		.spd_release	= tracing_spd_release_pipe,
6779 	};
6780 	ssize_t ret;
6781 	size_t rem;
6782 	unsigned int i;
6783 
6784 	if (splice_grow_spd(pipe, &spd))
6785 		return -ENOMEM;
6786 
6787 	mutex_lock(&iter->mutex);
6788 
6789 	if (iter->trace->splice_read) {
6790 		ret = iter->trace->splice_read(iter, filp,
6791 					       ppos, pipe, len, flags);
6792 		if (ret)
6793 			goto out_err;
6794 	}
6795 
6796 	ret = tracing_wait_pipe(filp);
6797 	if (ret <= 0)
6798 		goto out_err;
6799 
6800 	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6801 		ret = -EFAULT;
6802 		goto out_err;
6803 	}
6804 
6805 	trace_event_read_lock();
6806 	trace_access_lock(iter->cpu_file);
6807 
6808 	/* Fill as many pages as possible. */
6809 	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6810 		spd.pages[i] = alloc_page(GFP_KERNEL);
6811 		if (!spd.pages[i])
6812 			break;
6813 
6814 		rem = tracing_fill_pipe_page(rem, iter);
6815 
6816 		/* Copy the data into the page, so we can start over. */
6817 		ret = trace_seq_to_buffer(&iter->seq,
6818 					  page_address(spd.pages[i]),
6819 					  trace_seq_used(&iter->seq));
6820 		if (ret < 0) {
6821 			__free_page(spd.pages[i]);
6822 			break;
6823 		}
6824 		spd.partial[i].offset = 0;
6825 		spd.partial[i].len = trace_seq_used(&iter->seq);
6826 
6827 		trace_seq_init(&iter->seq);
6828 	}
6829 
6830 	trace_access_unlock(iter->cpu_file);
6831 	trace_event_read_unlock();
6832 	mutex_unlock(&iter->mutex);
6833 
6834 	spd.nr_pages = i;
6835 
6836 	if (i)
6837 		ret = splice_to_pipe(pipe, &spd);
6838 	else
6839 		ret = 0;
6840 out:
6841 	splice_shrink_spd(&spd);
6842 	return ret;
6843 
6844 out_err:
6845 	mutex_unlock(&iter->mutex);
6846 	goto out;
6847 }
6848 
6849 static ssize_t
6850 tracing_entries_read(struct file *filp, char __user *ubuf,
6851 		     size_t cnt, loff_t *ppos)
6852 {
6853 	struct inode *inode = file_inode(filp);
6854 	struct trace_array *tr = inode->i_private;
6855 	int cpu = tracing_get_cpu(inode);
6856 	char buf[64];
6857 	int r = 0;
6858 	ssize_t ret;
6859 
6860 	mutex_lock(&trace_types_lock);
6861 
6862 	if (cpu == RING_BUFFER_ALL_CPUS) {
6863 		int cpu, buf_size_same;
6864 		unsigned long size;
6865 
6866 		size = 0;
6867 		buf_size_same = 1;
6868 		/* check if all cpu sizes are same */
6869 		for_each_tracing_cpu(cpu) {
6870 			/* fill in the size from first enabled cpu */
6871 			if (size == 0)
6872 				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6873 			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6874 				buf_size_same = 0;
6875 				break;
6876 			}
6877 		}
6878 
6879 		if (buf_size_same) {
6880 			if (!tr->ring_buffer_expanded)
6881 				r = sprintf(buf, "%lu (expanded: %lu)\n",
6882 					    size >> 10,
6883 					    trace_buf_size >> 10);
6884 			else
6885 				r = sprintf(buf, "%lu\n", size >> 10);
6886 		} else
6887 			r = sprintf(buf, "X\n");
6888 	} else
6889 		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6890 
6891 	mutex_unlock(&trace_types_lock);
6892 
6893 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6894 	return ret;
6895 }
6896 
6897 static ssize_t
6898 tracing_entries_write(struct file *filp, const char __user *ubuf,
6899 		      size_t cnt, loff_t *ppos)
6900 {
6901 	struct inode *inode = file_inode(filp);
6902 	struct trace_array *tr = inode->i_private;
6903 	unsigned long val;
6904 	int ret;
6905 
6906 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6907 	if (ret)
6908 		return ret;
6909 
6910 	/* must have at least 1 entry */
6911 	if (!val)
6912 		return -EINVAL;
6913 
6914 	/* value is in KB */
6915 	val <<= 10;
6916 	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6917 	if (ret < 0)
6918 		return ret;
6919 
6920 	*ppos += cnt;
6921 
6922 	return cnt;
6923 }
6924 
6925 static ssize_t
6926 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6927 				size_t cnt, loff_t *ppos)
6928 {
6929 	struct trace_array *tr = filp->private_data;
6930 	char buf[64];
6931 	int r, cpu;
6932 	unsigned long size = 0, expanded_size = 0;
6933 
6934 	mutex_lock(&trace_types_lock);
6935 	for_each_tracing_cpu(cpu) {
6936 		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6937 		if (!tr->ring_buffer_expanded)
6938 			expanded_size += trace_buf_size >> 10;
6939 	}
6940 	if (tr->ring_buffer_expanded)
6941 		r = sprintf(buf, "%lu\n", size);
6942 	else
6943 		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6944 	mutex_unlock(&trace_types_lock);
6945 
6946 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6947 }
6948 
6949 static ssize_t
6950 tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
6951 {
6952 	struct trace_array *tr = filp->private_data;
6953 	struct seq_buf seq;
6954 	char buf[64];
6955 
6956 	seq_buf_init(&seq, buf, 64);
6957 
6958 	seq_buf_printf(&seq, "text delta:\t%ld\n", tr->text_delta);
6959 	seq_buf_printf(&seq, "data delta:\t%ld\n", tr->data_delta);
6960 
6961 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, seq_buf_used(&seq));
6962 }
6963 
6964 static int tracing_buffer_meta_open(struct inode *inode, struct file *filp)
6965 {
6966 	struct trace_array *tr = inode->i_private;
6967 	int cpu = tracing_get_cpu(inode);
6968 	int ret;
6969 
6970 	ret = tracing_check_open_get_tr(tr);
6971 	if (ret)
6972 		return ret;
6973 
6974 	ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu);
6975 	if (ret < 0)
6976 		__trace_array_put(tr);
6977 	return ret;
6978 }
6979 
6980 static ssize_t
6981 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6982 			  size_t cnt, loff_t *ppos)
6983 {
6984 	/*
6985 	 * There is no need to read what the user has written, this function
6986 	 * is just to make sure that there is no error when "echo" is used
6987 	 */
6988 
6989 	*ppos += cnt;
6990 
6991 	return cnt;
6992 }
6993 
6994 static int
6995 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6996 {
6997 	struct trace_array *tr = inode->i_private;
6998 
6999 	/* disable tracing ? */
7000 	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7001 		tracer_tracing_off(tr);
7002 	/* resize the ring buffer to 0 */
7003 	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7004 
7005 	trace_array_put(tr);
7006 
7007 	return 0;
7008 }
7009 
7010 #define TRACE_MARKER_MAX_SIZE		4096
7011 
7012 static ssize_t
7013 tracing_mark_write(struct file *filp, const char __user *ubuf,
7014 					size_t cnt, loff_t *fpos)
7015 {
7016 	struct trace_array *tr = filp->private_data;
7017 	struct ring_buffer_event *event;
7018 	enum event_trigger_type tt = ETT_NONE;
7019 	struct trace_buffer *buffer;
7020 	struct print_entry *entry;
7021 	int meta_size;
7022 	ssize_t written;
7023 	size_t size;
7024 	int len;
7025 
7026 /* Used in tracing_mark_raw_write() as well */
7027 #define FAULTED_STR "<faulted>"
7028 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7029 
7030 	if (tracing_disabled)
7031 		return -EINVAL;
7032 
7033 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7034 		return -EINVAL;
7035 
7036 	if ((ssize_t)cnt < 0)
7037 		return -EINVAL;
7038 
7039 	if (cnt > TRACE_MARKER_MAX_SIZE)
7040 		cnt = TRACE_MARKER_MAX_SIZE;
7041 
7042 	meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
7043  again:
7044 	size = cnt + meta_size;
7045 
7046 	/* If less than "<faulted>", then make sure we can still add that */
7047 	if (cnt < FAULTED_SIZE)
7048 		size += FAULTED_SIZE - cnt;
7049 
7050 	buffer = tr->array_buffer.buffer;
7051 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7052 					    tracing_gen_ctx());
7053 	if (unlikely(!event)) {
7054 		/*
7055 		 * If the size was greater than what was allowed, then
7056 		 * make it smaller and try again.
7057 		 */
7058 		if (size > ring_buffer_max_event_size(buffer)) {
7059 			/* cnt < FAULTED size should never be bigger than max */
7060 			if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
7061 				return -EBADF;
7062 			cnt = ring_buffer_max_event_size(buffer) - meta_size;
7063 			/* The above should only happen once */
7064 			if (WARN_ON_ONCE(cnt + meta_size == size))
7065 				return -EBADF;
7066 			goto again;
7067 		}
7068 
7069 		/* Ring buffer disabled, return as if not open for write */
7070 		return -EBADF;
7071 	}
7072 
7073 	entry = ring_buffer_event_data(event);
7074 	entry->ip = _THIS_IP_;
7075 
7076 	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7077 	if (len) {
7078 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7079 		cnt = FAULTED_SIZE;
7080 		written = -EFAULT;
7081 	} else
7082 		written = cnt;
7083 
7084 	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7085 		/* do not add \n before testing triggers, but add \0 */
7086 		entry->buf[cnt] = '\0';
7087 		tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7088 	}
7089 
7090 	if (entry->buf[cnt - 1] != '\n') {
7091 		entry->buf[cnt] = '\n';
7092 		entry->buf[cnt + 1] = '\0';
7093 	} else
7094 		entry->buf[cnt] = '\0';
7095 
7096 	if (static_branch_unlikely(&trace_marker_exports_enabled))
7097 		ftrace_exports(event, TRACE_EXPORT_MARKER);
7098 	__buffer_unlock_commit(buffer, event);
7099 
7100 	if (tt)
7101 		event_triggers_post_call(tr->trace_marker_file, tt);
7102 
7103 	return written;
7104 }
7105 
7106 static ssize_t
7107 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7108 					size_t cnt, loff_t *fpos)
7109 {
7110 	struct trace_array *tr = filp->private_data;
7111 	struct ring_buffer_event *event;
7112 	struct trace_buffer *buffer;
7113 	struct raw_data_entry *entry;
7114 	ssize_t written;
7115 	int size;
7116 	int len;
7117 
7118 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7119 
7120 	if (tracing_disabled)
7121 		return -EINVAL;
7122 
7123 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7124 		return -EINVAL;
7125 
7126 	/* The marker must at least have a tag id */
7127 	if (cnt < sizeof(unsigned int))
7128 		return -EINVAL;
7129 
7130 	size = sizeof(*entry) + cnt;
7131 	if (cnt < FAULT_SIZE_ID)
7132 		size += FAULT_SIZE_ID - cnt;
7133 
7134 	buffer = tr->array_buffer.buffer;
7135 
7136 	if (size > ring_buffer_max_event_size(buffer))
7137 		return -EINVAL;
7138 
7139 	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7140 					    tracing_gen_ctx());
7141 	if (!event)
7142 		/* Ring buffer disabled, return as if not open for write */
7143 		return -EBADF;
7144 
7145 	entry = ring_buffer_event_data(event);
7146 
7147 	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7148 	if (len) {
7149 		entry->id = -1;
7150 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7151 		written = -EFAULT;
7152 	} else
7153 		written = cnt;
7154 
7155 	__buffer_unlock_commit(buffer, event);
7156 
7157 	return written;
7158 }
7159 
7160 static int tracing_clock_show(struct seq_file *m, void *v)
7161 {
7162 	struct trace_array *tr = m->private;
7163 	int i;
7164 
7165 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7166 		seq_printf(m,
7167 			"%s%s%s%s", i ? " " : "",
7168 			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7169 			i == tr->clock_id ? "]" : "");
7170 	seq_putc(m, '\n');
7171 
7172 	return 0;
7173 }
7174 
7175 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7176 {
7177 	int i;
7178 
7179 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7180 		if (strcmp(trace_clocks[i].name, clockstr) == 0)
7181 			break;
7182 	}
7183 	if (i == ARRAY_SIZE(trace_clocks))
7184 		return -EINVAL;
7185 
7186 	mutex_lock(&trace_types_lock);
7187 
7188 	tr->clock_id = i;
7189 
7190 	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7191 
7192 	/*
7193 	 * New clock may not be consistent with the previous clock.
7194 	 * Reset the buffer so that it doesn't have incomparable timestamps.
7195 	 */
7196 	tracing_reset_online_cpus(&tr->array_buffer);
7197 
7198 #ifdef CONFIG_TRACER_MAX_TRACE
7199 	if (tr->max_buffer.buffer)
7200 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7201 	tracing_reset_online_cpus(&tr->max_buffer);
7202 #endif
7203 
7204 	mutex_unlock(&trace_types_lock);
7205 
7206 	return 0;
7207 }
7208 
7209 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7210 				   size_t cnt, loff_t *fpos)
7211 {
7212 	struct seq_file *m = filp->private_data;
7213 	struct trace_array *tr = m->private;
7214 	char buf[64];
7215 	const char *clockstr;
7216 	int ret;
7217 
7218 	if (cnt >= sizeof(buf))
7219 		return -EINVAL;
7220 
7221 	if (copy_from_user(buf, ubuf, cnt))
7222 		return -EFAULT;
7223 
7224 	buf[cnt] = 0;
7225 
7226 	clockstr = strstrip(buf);
7227 
7228 	ret = tracing_set_clock(tr, clockstr);
7229 	if (ret)
7230 		return ret;
7231 
7232 	*fpos += cnt;
7233 
7234 	return cnt;
7235 }
7236 
7237 static int tracing_clock_open(struct inode *inode, struct file *file)
7238 {
7239 	struct trace_array *tr = inode->i_private;
7240 	int ret;
7241 
7242 	ret = tracing_check_open_get_tr(tr);
7243 	if (ret)
7244 		return ret;
7245 
7246 	ret = single_open(file, tracing_clock_show, inode->i_private);
7247 	if (ret < 0)
7248 		trace_array_put(tr);
7249 
7250 	return ret;
7251 }
7252 
7253 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7254 {
7255 	struct trace_array *tr = m->private;
7256 
7257 	mutex_lock(&trace_types_lock);
7258 
7259 	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7260 		seq_puts(m, "delta [absolute]\n");
7261 	else
7262 		seq_puts(m, "[delta] absolute\n");
7263 
7264 	mutex_unlock(&trace_types_lock);
7265 
7266 	return 0;
7267 }
7268 
7269 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7270 {
7271 	struct trace_array *tr = inode->i_private;
7272 	int ret;
7273 
7274 	ret = tracing_check_open_get_tr(tr);
7275 	if (ret)
7276 		return ret;
7277 
7278 	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7279 	if (ret < 0)
7280 		trace_array_put(tr);
7281 
7282 	return ret;
7283 }
7284 
7285 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7286 {
7287 	if (rbe == this_cpu_read(trace_buffered_event))
7288 		return ring_buffer_time_stamp(buffer);
7289 
7290 	return ring_buffer_event_time_stamp(buffer, rbe);
7291 }
7292 
7293 /*
7294  * Set or disable using the per CPU trace_buffer_event when possible.
7295  */
7296 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7297 {
7298 	int ret = 0;
7299 
7300 	mutex_lock(&trace_types_lock);
7301 
7302 	if (set && tr->no_filter_buffering_ref++)
7303 		goto out;
7304 
7305 	if (!set) {
7306 		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7307 			ret = -EINVAL;
7308 			goto out;
7309 		}
7310 
7311 		--tr->no_filter_buffering_ref;
7312 	}
7313  out:
7314 	mutex_unlock(&trace_types_lock);
7315 
7316 	return ret;
7317 }
7318 
7319 struct ftrace_buffer_info {
7320 	struct trace_iterator	iter;
7321 	void			*spare;
7322 	unsigned int		spare_cpu;
7323 	unsigned int		spare_size;
7324 	unsigned int		read;
7325 };
7326 
7327 #ifdef CONFIG_TRACER_SNAPSHOT
7328 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7329 {
7330 	struct trace_array *tr = inode->i_private;
7331 	struct trace_iterator *iter;
7332 	struct seq_file *m;
7333 	int ret;
7334 
7335 	ret = tracing_check_open_get_tr(tr);
7336 	if (ret)
7337 		return ret;
7338 
7339 	if (file->f_mode & FMODE_READ) {
7340 		iter = __tracing_open(inode, file, true);
7341 		if (IS_ERR(iter))
7342 			ret = PTR_ERR(iter);
7343 	} else {
7344 		/* Writes still need the seq_file to hold the private data */
7345 		ret = -ENOMEM;
7346 		m = kzalloc(sizeof(*m), GFP_KERNEL);
7347 		if (!m)
7348 			goto out;
7349 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7350 		if (!iter) {
7351 			kfree(m);
7352 			goto out;
7353 		}
7354 		ret = 0;
7355 
7356 		iter->tr = tr;
7357 		iter->array_buffer = &tr->max_buffer;
7358 		iter->cpu_file = tracing_get_cpu(inode);
7359 		m->private = iter;
7360 		file->private_data = m;
7361 	}
7362 out:
7363 	if (ret < 0)
7364 		trace_array_put(tr);
7365 
7366 	return ret;
7367 }
7368 
7369 static void tracing_swap_cpu_buffer(void *tr)
7370 {
7371 	update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7372 }
7373 
7374 static ssize_t
7375 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7376 		       loff_t *ppos)
7377 {
7378 	struct seq_file *m = filp->private_data;
7379 	struct trace_iterator *iter = m->private;
7380 	struct trace_array *tr = iter->tr;
7381 	unsigned long val;
7382 	int ret;
7383 
7384 	ret = tracing_update_buffers(tr);
7385 	if (ret < 0)
7386 		return ret;
7387 
7388 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7389 	if (ret)
7390 		return ret;
7391 
7392 	mutex_lock(&trace_types_lock);
7393 
7394 	if (tr->current_trace->use_max_tr) {
7395 		ret = -EBUSY;
7396 		goto out;
7397 	}
7398 
7399 	local_irq_disable();
7400 	arch_spin_lock(&tr->max_lock);
7401 	if (tr->cond_snapshot)
7402 		ret = -EBUSY;
7403 	arch_spin_unlock(&tr->max_lock);
7404 	local_irq_enable();
7405 	if (ret)
7406 		goto out;
7407 
7408 	switch (val) {
7409 	case 0:
7410 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7411 			ret = -EINVAL;
7412 			break;
7413 		}
7414 		if (tr->allocated_snapshot)
7415 			free_snapshot(tr);
7416 		break;
7417 	case 1:
7418 /* Only allow per-cpu swap if the ring buffer supports it */
7419 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7420 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7421 			ret = -EINVAL;
7422 			break;
7423 		}
7424 #endif
7425 		if (tr->allocated_snapshot)
7426 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
7427 					&tr->array_buffer, iter->cpu_file);
7428 
7429 		ret = tracing_arm_snapshot_locked(tr);
7430 		if (ret)
7431 			break;
7432 
7433 		/* Now, we're going to swap */
7434 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7435 			local_irq_disable();
7436 			update_max_tr(tr, current, smp_processor_id(), NULL);
7437 			local_irq_enable();
7438 		} else {
7439 			smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7440 						 (void *)tr, 1);
7441 		}
7442 		tracing_disarm_snapshot(tr);
7443 		break;
7444 	default:
7445 		if (tr->allocated_snapshot) {
7446 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7447 				tracing_reset_online_cpus(&tr->max_buffer);
7448 			else
7449 				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7450 		}
7451 		break;
7452 	}
7453 
7454 	if (ret >= 0) {
7455 		*ppos += cnt;
7456 		ret = cnt;
7457 	}
7458 out:
7459 	mutex_unlock(&trace_types_lock);
7460 	return ret;
7461 }
7462 
7463 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7464 {
7465 	struct seq_file *m = file->private_data;
7466 	int ret;
7467 
7468 	ret = tracing_release(inode, file);
7469 
7470 	if (file->f_mode & FMODE_READ)
7471 		return ret;
7472 
7473 	/* If write only, the seq_file is just a stub */
7474 	if (m)
7475 		kfree(m->private);
7476 	kfree(m);
7477 
7478 	return 0;
7479 }
7480 
7481 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7482 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7483 				    size_t count, loff_t *ppos);
7484 static int tracing_buffers_release(struct inode *inode, struct file *file);
7485 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7486 		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7487 
7488 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7489 {
7490 	struct ftrace_buffer_info *info;
7491 	int ret;
7492 
7493 	/* The following checks for tracefs lockdown */
7494 	ret = tracing_buffers_open(inode, filp);
7495 	if (ret < 0)
7496 		return ret;
7497 
7498 	info = filp->private_data;
7499 
7500 	if (info->iter.trace->use_max_tr) {
7501 		tracing_buffers_release(inode, filp);
7502 		return -EBUSY;
7503 	}
7504 
7505 	info->iter.snapshot = true;
7506 	info->iter.array_buffer = &info->iter.tr->max_buffer;
7507 
7508 	return ret;
7509 }
7510 
7511 #endif /* CONFIG_TRACER_SNAPSHOT */
7512 
7513 
7514 static const struct file_operations tracing_thresh_fops = {
7515 	.open		= tracing_open_generic,
7516 	.read		= tracing_thresh_read,
7517 	.write		= tracing_thresh_write,
7518 	.llseek		= generic_file_llseek,
7519 };
7520 
7521 #ifdef CONFIG_TRACER_MAX_TRACE
7522 static const struct file_operations tracing_max_lat_fops = {
7523 	.open		= tracing_open_generic_tr,
7524 	.read		= tracing_max_lat_read,
7525 	.write		= tracing_max_lat_write,
7526 	.llseek		= generic_file_llseek,
7527 	.release	= tracing_release_generic_tr,
7528 };
7529 #endif
7530 
7531 static const struct file_operations set_tracer_fops = {
7532 	.open		= tracing_open_generic_tr,
7533 	.read		= tracing_set_trace_read,
7534 	.write		= tracing_set_trace_write,
7535 	.llseek		= generic_file_llseek,
7536 	.release	= tracing_release_generic_tr,
7537 };
7538 
7539 static const struct file_operations tracing_pipe_fops = {
7540 	.open		= tracing_open_pipe,
7541 	.poll		= tracing_poll_pipe,
7542 	.read		= tracing_read_pipe,
7543 	.splice_read	= tracing_splice_read_pipe,
7544 	.release	= tracing_release_pipe,
7545 };
7546 
7547 static const struct file_operations tracing_entries_fops = {
7548 	.open		= tracing_open_generic_tr,
7549 	.read		= tracing_entries_read,
7550 	.write		= tracing_entries_write,
7551 	.llseek		= generic_file_llseek,
7552 	.release	= tracing_release_generic_tr,
7553 };
7554 
7555 static const struct file_operations tracing_buffer_meta_fops = {
7556 	.open		= tracing_buffer_meta_open,
7557 	.read		= seq_read,
7558 	.llseek		= seq_lseek,
7559 	.release	= tracing_seq_release,
7560 };
7561 
7562 static const struct file_operations tracing_total_entries_fops = {
7563 	.open		= tracing_open_generic_tr,
7564 	.read		= tracing_total_entries_read,
7565 	.llseek		= generic_file_llseek,
7566 	.release	= tracing_release_generic_tr,
7567 };
7568 
7569 static const struct file_operations tracing_free_buffer_fops = {
7570 	.open		= tracing_open_generic_tr,
7571 	.write		= tracing_free_buffer_write,
7572 	.release	= tracing_free_buffer_release,
7573 };
7574 
7575 static const struct file_operations tracing_mark_fops = {
7576 	.open		= tracing_mark_open,
7577 	.write		= tracing_mark_write,
7578 	.release	= tracing_release_generic_tr,
7579 };
7580 
7581 static const struct file_operations tracing_mark_raw_fops = {
7582 	.open		= tracing_mark_open,
7583 	.write		= tracing_mark_raw_write,
7584 	.release	= tracing_release_generic_tr,
7585 };
7586 
7587 static const struct file_operations trace_clock_fops = {
7588 	.open		= tracing_clock_open,
7589 	.read		= seq_read,
7590 	.llseek		= seq_lseek,
7591 	.release	= tracing_single_release_tr,
7592 	.write		= tracing_clock_write,
7593 };
7594 
7595 static const struct file_operations trace_time_stamp_mode_fops = {
7596 	.open		= tracing_time_stamp_mode_open,
7597 	.read		= seq_read,
7598 	.llseek		= seq_lseek,
7599 	.release	= tracing_single_release_tr,
7600 };
7601 
7602 static const struct file_operations last_boot_fops = {
7603 	.open		= tracing_open_generic_tr,
7604 	.read		= tracing_last_boot_read,
7605 	.llseek		= generic_file_llseek,
7606 	.release	= tracing_release_generic_tr,
7607 };
7608 
7609 #ifdef CONFIG_TRACER_SNAPSHOT
7610 static const struct file_operations snapshot_fops = {
7611 	.open		= tracing_snapshot_open,
7612 	.read		= seq_read,
7613 	.write		= tracing_snapshot_write,
7614 	.llseek		= tracing_lseek,
7615 	.release	= tracing_snapshot_release,
7616 };
7617 
7618 static const struct file_operations snapshot_raw_fops = {
7619 	.open		= snapshot_raw_open,
7620 	.read		= tracing_buffers_read,
7621 	.release	= tracing_buffers_release,
7622 	.splice_read	= tracing_buffers_splice_read,
7623 };
7624 
7625 #endif /* CONFIG_TRACER_SNAPSHOT */
7626 
7627 /*
7628  * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7629  * @filp: The active open file structure
7630  * @ubuf: The userspace provided buffer to read value into
7631  * @cnt: The maximum number of bytes to read
7632  * @ppos: The current "file" position
7633  *
7634  * This function implements the write interface for a struct trace_min_max_param.
7635  * The filp->private_data must point to a trace_min_max_param structure that
7636  * defines where to write the value, the min and the max acceptable values,
7637  * and a lock to protect the write.
7638  */
7639 static ssize_t
7640 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7641 {
7642 	struct trace_min_max_param *param = filp->private_data;
7643 	u64 val;
7644 	int err;
7645 
7646 	if (!param)
7647 		return -EFAULT;
7648 
7649 	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7650 	if (err)
7651 		return err;
7652 
7653 	if (param->lock)
7654 		mutex_lock(param->lock);
7655 
7656 	if (param->min && val < *param->min)
7657 		err = -EINVAL;
7658 
7659 	if (param->max && val > *param->max)
7660 		err = -EINVAL;
7661 
7662 	if (!err)
7663 		*param->val = val;
7664 
7665 	if (param->lock)
7666 		mutex_unlock(param->lock);
7667 
7668 	if (err)
7669 		return err;
7670 
7671 	return cnt;
7672 }
7673 
7674 /*
7675  * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7676  * @filp: The active open file structure
7677  * @ubuf: The userspace provided buffer to read value into
7678  * @cnt: The maximum number of bytes to read
7679  * @ppos: The current "file" position
7680  *
7681  * This function implements the read interface for a struct trace_min_max_param.
7682  * The filp->private_data must point to a trace_min_max_param struct with valid
7683  * data.
7684  */
7685 static ssize_t
7686 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7687 {
7688 	struct trace_min_max_param *param = filp->private_data;
7689 	char buf[U64_STR_SIZE];
7690 	int len;
7691 	u64 val;
7692 
7693 	if (!param)
7694 		return -EFAULT;
7695 
7696 	val = *param->val;
7697 
7698 	if (cnt > sizeof(buf))
7699 		cnt = sizeof(buf);
7700 
7701 	len = snprintf(buf, sizeof(buf), "%llu\n", val);
7702 
7703 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7704 }
7705 
7706 const struct file_operations trace_min_max_fops = {
7707 	.open		= tracing_open_generic,
7708 	.read		= trace_min_max_read,
7709 	.write		= trace_min_max_write,
7710 };
7711 
7712 #define TRACING_LOG_ERRS_MAX	8
7713 #define TRACING_LOG_LOC_MAX	128
7714 
7715 #define CMD_PREFIX "  Command: "
7716 
7717 struct err_info {
7718 	const char	**errs;	/* ptr to loc-specific array of err strings */
7719 	u8		type;	/* index into errs -> specific err string */
7720 	u16		pos;	/* caret position */
7721 	u64		ts;
7722 };
7723 
7724 struct tracing_log_err {
7725 	struct list_head	list;
7726 	struct err_info		info;
7727 	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
7728 	char			*cmd;                     /* what caused err */
7729 };
7730 
7731 static DEFINE_MUTEX(tracing_err_log_lock);
7732 
7733 static struct tracing_log_err *alloc_tracing_log_err(int len)
7734 {
7735 	struct tracing_log_err *err;
7736 
7737 	err = kzalloc(sizeof(*err), GFP_KERNEL);
7738 	if (!err)
7739 		return ERR_PTR(-ENOMEM);
7740 
7741 	err->cmd = kzalloc(len, GFP_KERNEL);
7742 	if (!err->cmd) {
7743 		kfree(err);
7744 		return ERR_PTR(-ENOMEM);
7745 	}
7746 
7747 	return err;
7748 }
7749 
7750 static void free_tracing_log_err(struct tracing_log_err *err)
7751 {
7752 	kfree(err->cmd);
7753 	kfree(err);
7754 }
7755 
7756 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7757 						   int len)
7758 {
7759 	struct tracing_log_err *err;
7760 	char *cmd;
7761 
7762 	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7763 		err = alloc_tracing_log_err(len);
7764 		if (PTR_ERR(err) != -ENOMEM)
7765 			tr->n_err_log_entries++;
7766 
7767 		return err;
7768 	}
7769 	cmd = kzalloc(len, GFP_KERNEL);
7770 	if (!cmd)
7771 		return ERR_PTR(-ENOMEM);
7772 	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7773 	kfree(err->cmd);
7774 	err->cmd = cmd;
7775 	list_del(&err->list);
7776 
7777 	return err;
7778 }
7779 
7780 /**
7781  * err_pos - find the position of a string within a command for error careting
7782  * @cmd: The tracing command that caused the error
7783  * @str: The string to position the caret at within @cmd
7784  *
7785  * Finds the position of the first occurrence of @str within @cmd.  The
7786  * return value can be passed to tracing_log_err() for caret placement
7787  * within @cmd.
7788  *
7789  * Returns the index within @cmd of the first occurrence of @str or 0
7790  * if @str was not found.
7791  */
7792 unsigned int err_pos(char *cmd, const char *str)
7793 {
7794 	char *found;
7795 
7796 	if (WARN_ON(!strlen(cmd)))
7797 		return 0;
7798 
7799 	found = strstr(cmd, str);
7800 	if (found)
7801 		return found - cmd;
7802 
7803 	return 0;
7804 }
7805 
7806 /**
7807  * tracing_log_err - write an error to the tracing error log
7808  * @tr: The associated trace array for the error (NULL for top level array)
7809  * @loc: A string describing where the error occurred
7810  * @cmd: The tracing command that caused the error
7811  * @errs: The array of loc-specific static error strings
7812  * @type: The index into errs[], which produces the specific static err string
7813  * @pos: The position the caret should be placed in the cmd
7814  *
7815  * Writes an error into tracing/error_log of the form:
7816  *
7817  * <loc>: error: <text>
7818  *   Command: <cmd>
7819  *              ^
7820  *
7821  * tracing/error_log is a small log file containing the last
7822  * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
7823  * unless there has been a tracing error, and the error log can be
7824  * cleared and have its memory freed by writing the empty string in
7825  * truncation mode to it i.e. echo > tracing/error_log.
7826  *
7827  * NOTE: the @errs array along with the @type param are used to
7828  * produce a static error string - this string is not copied and saved
7829  * when the error is logged - only a pointer to it is saved.  See
7830  * existing callers for examples of how static strings are typically
7831  * defined for use with tracing_log_err().
7832  */
7833 void tracing_log_err(struct trace_array *tr,
7834 		     const char *loc, const char *cmd,
7835 		     const char **errs, u8 type, u16 pos)
7836 {
7837 	struct tracing_log_err *err;
7838 	int len = 0;
7839 
7840 	if (!tr)
7841 		tr = &global_trace;
7842 
7843 	len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7844 
7845 	mutex_lock(&tracing_err_log_lock);
7846 	err = get_tracing_log_err(tr, len);
7847 	if (PTR_ERR(err) == -ENOMEM) {
7848 		mutex_unlock(&tracing_err_log_lock);
7849 		return;
7850 	}
7851 
7852 	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7853 	snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7854 
7855 	err->info.errs = errs;
7856 	err->info.type = type;
7857 	err->info.pos = pos;
7858 	err->info.ts = local_clock();
7859 
7860 	list_add_tail(&err->list, &tr->err_log);
7861 	mutex_unlock(&tracing_err_log_lock);
7862 }
7863 
7864 static void clear_tracing_err_log(struct trace_array *tr)
7865 {
7866 	struct tracing_log_err *err, *next;
7867 
7868 	mutex_lock(&tracing_err_log_lock);
7869 	list_for_each_entry_safe(err, next, &tr->err_log, list) {
7870 		list_del(&err->list);
7871 		free_tracing_log_err(err);
7872 	}
7873 
7874 	tr->n_err_log_entries = 0;
7875 	mutex_unlock(&tracing_err_log_lock);
7876 }
7877 
7878 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7879 {
7880 	struct trace_array *tr = m->private;
7881 
7882 	mutex_lock(&tracing_err_log_lock);
7883 
7884 	return seq_list_start(&tr->err_log, *pos);
7885 }
7886 
7887 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7888 {
7889 	struct trace_array *tr = m->private;
7890 
7891 	return seq_list_next(v, &tr->err_log, pos);
7892 }
7893 
7894 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7895 {
7896 	mutex_unlock(&tracing_err_log_lock);
7897 }
7898 
7899 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7900 {
7901 	u16 i;
7902 
7903 	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7904 		seq_putc(m, ' ');
7905 	for (i = 0; i < pos; i++)
7906 		seq_putc(m, ' ');
7907 	seq_puts(m, "^\n");
7908 }
7909 
7910 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7911 {
7912 	struct tracing_log_err *err = v;
7913 
7914 	if (err) {
7915 		const char *err_text = err->info.errs[err->info.type];
7916 		u64 sec = err->info.ts;
7917 		u32 nsec;
7918 
7919 		nsec = do_div(sec, NSEC_PER_SEC);
7920 		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7921 			   err->loc, err_text);
7922 		seq_printf(m, "%s", err->cmd);
7923 		tracing_err_log_show_pos(m, err->info.pos);
7924 	}
7925 
7926 	return 0;
7927 }
7928 
7929 static const struct seq_operations tracing_err_log_seq_ops = {
7930 	.start  = tracing_err_log_seq_start,
7931 	.next   = tracing_err_log_seq_next,
7932 	.stop   = tracing_err_log_seq_stop,
7933 	.show   = tracing_err_log_seq_show
7934 };
7935 
7936 static int tracing_err_log_open(struct inode *inode, struct file *file)
7937 {
7938 	struct trace_array *tr = inode->i_private;
7939 	int ret = 0;
7940 
7941 	ret = tracing_check_open_get_tr(tr);
7942 	if (ret)
7943 		return ret;
7944 
7945 	/* If this file was opened for write, then erase contents */
7946 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7947 		clear_tracing_err_log(tr);
7948 
7949 	if (file->f_mode & FMODE_READ) {
7950 		ret = seq_open(file, &tracing_err_log_seq_ops);
7951 		if (!ret) {
7952 			struct seq_file *m = file->private_data;
7953 			m->private = tr;
7954 		} else {
7955 			trace_array_put(tr);
7956 		}
7957 	}
7958 	return ret;
7959 }
7960 
7961 static ssize_t tracing_err_log_write(struct file *file,
7962 				     const char __user *buffer,
7963 				     size_t count, loff_t *ppos)
7964 {
7965 	return count;
7966 }
7967 
7968 static int tracing_err_log_release(struct inode *inode, struct file *file)
7969 {
7970 	struct trace_array *tr = inode->i_private;
7971 
7972 	trace_array_put(tr);
7973 
7974 	if (file->f_mode & FMODE_READ)
7975 		seq_release(inode, file);
7976 
7977 	return 0;
7978 }
7979 
7980 static const struct file_operations tracing_err_log_fops = {
7981 	.open           = tracing_err_log_open,
7982 	.write		= tracing_err_log_write,
7983 	.read           = seq_read,
7984 	.llseek         = tracing_lseek,
7985 	.release        = tracing_err_log_release,
7986 };
7987 
7988 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7989 {
7990 	struct trace_array *tr = inode->i_private;
7991 	struct ftrace_buffer_info *info;
7992 	int ret;
7993 
7994 	ret = tracing_check_open_get_tr(tr);
7995 	if (ret)
7996 		return ret;
7997 
7998 	info = kvzalloc(sizeof(*info), GFP_KERNEL);
7999 	if (!info) {
8000 		trace_array_put(tr);
8001 		return -ENOMEM;
8002 	}
8003 
8004 	mutex_lock(&trace_types_lock);
8005 
8006 	info->iter.tr		= tr;
8007 	info->iter.cpu_file	= tracing_get_cpu(inode);
8008 	info->iter.trace	= tr->current_trace;
8009 	info->iter.array_buffer = &tr->array_buffer;
8010 	info->spare		= NULL;
8011 	/* Force reading ring buffer for first read */
8012 	info->read		= (unsigned int)-1;
8013 
8014 	filp->private_data = info;
8015 
8016 	tr->trace_ref++;
8017 
8018 	mutex_unlock(&trace_types_lock);
8019 
8020 	ret = nonseekable_open(inode, filp);
8021 	if (ret < 0)
8022 		trace_array_put(tr);
8023 
8024 	return ret;
8025 }
8026 
8027 static __poll_t
8028 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8029 {
8030 	struct ftrace_buffer_info *info = filp->private_data;
8031 	struct trace_iterator *iter = &info->iter;
8032 
8033 	return trace_poll(iter, filp, poll_table);
8034 }
8035 
8036 static ssize_t
8037 tracing_buffers_read(struct file *filp, char __user *ubuf,
8038 		     size_t count, loff_t *ppos)
8039 {
8040 	struct ftrace_buffer_info *info = filp->private_data;
8041 	struct trace_iterator *iter = &info->iter;
8042 	void *trace_data;
8043 	int page_size;
8044 	ssize_t ret = 0;
8045 	ssize_t size;
8046 
8047 	if (!count)
8048 		return 0;
8049 
8050 #ifdef CONFIG_TRACER_MAX_TRACE
8051 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8052 		return -EBUSY;
8053 #endif
8054 
8055 	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8056 
8057 	/* Make sure the spare matches the current sub buffer size */
8058 	if (info->spare) {
8059 		if (page_size != info->spare_size) {
8060 			ring_buffer_free_read_page(iter->array_buffer->buffer,
8061 						   info->spare_cpu, info->spare);
8062 			info->spare = NULL;
8063 		}
8064 	}
8065 
8066 	if (!info->spare) {
8067 		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8068 							  iter->cpu_file);
8069 		if (IS_ERR(info->spare)) {
8070 			ret = PTR_ERR(info->spare);
8071 			info->spare = NULL;
8072 		} else {
8073 			info->spare_cpu = iter->cpu_file;
8074 			info->spare_size = page_size;
8075 		}
8076 	}
8077 	if (!info->spare)
8078 		return ret;
8079 
8080 	/* Do we have previous read data to read? */
8081 	if (info->read < page_size)
8082 		goto read;
8083 
8084  again:
8085 	trace_access_lock(iter->cpu_file);
8086 	ret = ring_buffer_read_page(iter->array_buffer->buffer,
8087 				    info->spare,
8088 				    count,
8089 				    iter->cpu_file, 0);
8090 	trace_access_unlock(iter->cpu_file);
8091 
8092 	if (ret < 0) {
8093 		if (trace_empty(iter) && !iter->closed) {
8094 			if ((filp->f_flags & O_NONBLOCK))
8095 				return -EAGAIN;
8096 
8097 			ret = wait_on_pipe(iter, 0);
8098 			if (ret)
8099 				return ret;
8100 
8101 			goto again;
8102 		}
8103 		return 0;
8104 	}
8105 
8106 	info->read = 0;
8107  read:
8108 	size = page_size - info->read;
8109 	if (size > count)
8110 		size = count;
8111 	trace_data = ring_buffer_read_page_data(info->spare);
8112 	ret = copy_to_user(ubuf, trace_data + info->read, size);
8113 	if (ret == size)
8114 		return -EFAULT;
8115 
8116 	size -= ret;
8117 
8118 	*ppos += size;
8119 	info->read += size;
8120 
8121 	return size;
8122 }
8123 
8124 static int tracing_buffers_flush(struct file *file, fl_owner_t id)
8125 {
8126 	struct ftrace_buffer_info *info = file->private_data;
8127 	struct trace_iterator *iter = &info->iter;
8128 
8129 	iter->closed = true;
8130 	/* Make sure the waiters see the new wait_index */
8131 	(void)atomic_fetch_inc_release(&iter->wait_index);
8132 
8133 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8134 
8135 	return 0;
8136 }
8137 
8138 static int tracing_buffers_release(struct inode *inode, struct file *file)
8139 {
8140 	struct ftrace_buffer_info *info = file->private_data;
8141 	struct trace_iterator *iter = &info->iter;
8142 
8143 	mutex_lock(&trace_types_lock);
8144 
8145 	iter->tr->trace_ref--;
8146 
8147 	__trace_array_put(iter->tr);
8148 
8149 	if (info->spare)
8150 		ring_buffer_free_read_page(iter->array_buffer->buffer,
8151 					   info->spare_cpu, info->spare);
8152 	kvfree(info);
8153 
8154 	mutex_unlock(&trace_types_lock);
8155 
8156 	return 0;
8157 }
8158 
8159 struct buffer_ref {
8160 	struct trace_buffer	*buffer;
8161 	void			*page;
8162 	int			cpu;
8163 	refcount_t		refcount;
8164 };
8165 
8166 static void buffer_ref_release(struct buffer_ref *ref)
8167 {
8168 	if (!refcount_dec_and_test(&ref->refcount))
8169 		return;
8170 	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8171 	kfree(ref);
8172 }
8173 
8174 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8175 				    struct pipe_buffer *buf)
8176 {
8177 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8178 
8179 	buffer_ref_release(ref);
8180 	buf->private = 0;
8181 }
8182 
8183 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8184 				struct pipe_buffer *buf)
8185 {
8186 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8187 
8188 	if (refcount_read(&ref->refcount) > INT_MAX/2)
8189 		return false;
8190 
8191 	refcount_inc(&ref->refcount);
8192 	return true;
8193 }
8194 
8195 /* Pipe buffer operations for a buffer. */
8196 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8197 	.release		= buffer_pipe_buf_release,
8198 	.get			= buffer_pipe_buf_get,
8199 };
8200 
8201 /*
8202  * Callback from splice_to_pipe(), if we need to release some pages
8203  * at the end of the spd in case we error'ed out in filling the pipe.
8204  */
8205 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8206 {
8207 	struct buffer_ref *ref =
8208 		(struct buffer_ref *)spd->partial[i].private;
8209 
8210 	buffer_ref_release(ref);
8211 	spd->partial[i].private = 0;
8212 }
8213 
8214 static ssize_t
8215 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8216 			    struct pipe_inode_info *pipe, size_t len,
8217 			    unsigned int flags)
8218 {
8219 	struct ftrace_buffer_info *info = file->private_data;
8220 	struct trace_iterator *iter = &info->iter;
8221 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
8222 	struct page *pages_def[PIPE_DEF_BUFFERS];
8223 	struct splice_pipe_desc spd = {
8224 		.pages		= pages_def,
8225 		.partial	= partial_def,
8226 		.nr_pages_max	= PIPE_DEF_BUFFERS,
8227 		.ops		= &buffer_pipe_buf_ops,
8228 		.spd_release	= buffer_spd_release,
8229 	};
8230 	struct buffer_ref *ref;
8231 	bool woken = false;
8232 	int page_size;
8233 	int entries, i;
8234 	ssize_t ret = 0;
8235 
8236 #ifdef CONFIG_TRACER_MAX_TRACE
8237 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8238 		return -EBUSY;
8239 #endif
8240 
8241 	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8242 	if (*ppos & (page_size - 1))
8243 		return -EINVAL;
8244 
8245 	if (len & (page_size - 1)) {
8246 		if (len < page_size)
8247 			return -EINVAL;
8248 		len &= (~(page_size - 1));
8249 	}
8250 
8251 	if (splice_grow_spd(pipe, &spd))
8252 		return -ENOMEM;
8253 
8254  again:
8255 	trace_access_lock(iter->cpu_file);
8256 	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8257 
8258 	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
8259 		struct page *page;
8260 		int r;
8261 
8262 		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8263 		if (!ref) {
8264 			ret = -ENOMEM;
8265 			break;
8266 		}
8267 
8268 		refcount_set(&ref->refcount, 1);
8269 		ref->buffer = iter->array_buffer->buffer;
8270 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8271 		if (IS_ERR(ref->page)) {
8272 			ret = PTR_ERR(ref->page);
8273 			ref->page = NULL;
8274 			kfree(ref);
8275 			break;
8276 		}
8277 		ref->cpu = iter->cpu_file;
8278 
8279 		r = ring_buffer_read_page(ref->buffer, ref->page,
8280 					  len, iter->cpu_file, 1);
8281 		if (r < 0) {
8282 			ring_buffer_free_read_page(ref->buffer, ref->cpu,
8283 						   ref->page);
8284 			kfree(ref);
8285 			break;
8286 		}
8287 
8288 		page = virt_to_page(ring_buffer_read_page_data(ref->page));
8289 
8290 		spd.pages[i] = page;
8291 		spd.partial[i].len = page_size;
8292 		spd.partial[i].offset = 0;
8293 		spd.partial[i].private = (unsigned long)ref;
8294 		spd.nr_pages++;
8295 		*ppos += page_size;
8296 
8297 		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8298 	}
8299 
8300 	trace_access_unlock(iter->cpu_file);
8301 	spd.nr_pages = i;
8302 
8303 	/* did we read anything? */
8304 	if (!spd.nr_pages) {
8305 
8306 		if (ret)
8307 			goto out;
8308 
8309 		if (woken)
8310 			goto out;
8311 
8312 		ret = -EAGAIN;
8313 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8314 			goto out;
8315 
8316 		ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8317 		if (ret)
8318 			goto out;
8319 
8320 		/* No need to wait after waking up when tracing is off */
8321 		if (!tracer_tracing_is_on(iter->tr))
8322 			goto out;
8323 
8324 		/* Iterate one more time to collect any new data then exit */
8325 		woken = true;
8326 
8327 		goto again;
8328 	}
8329 
8330 	ret = splice_to_pipe(pipe, &spd);
8331 out:
8332 	splice_shrink_spd(&spd);
8333 
8334 	return ret;
8335 }
8336 
8337 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8338 {
8339 	struct ftrace_buffer_info *info = file->private_data;
8340 	struct trace_iterator *iter = &info->iter;
8341 	int err;
8342 
8343 	if (cmd == TRACE_MMAP_IOCTL_GET_READER) {
8344 		if (!(file->f_flags & O_NONBLOCK)) {
8345 			err = ring_buffer_wait(iter->array_buffer->buffer,
8346 					       iter->cpu_file,
8347 					       iter->tr->buffer_percent,
8348 					       NULL, NULL);
8349 			if (err)
8350 				return err;
8351 		}
8352 
8353 		return ring_buffer_map_get_reader(iter->array_buffer->buffer,
8354 						  iter->cpu_file);
8355 	} else if (cmd) {
8356 		return -ENOTTY;
8357 	}
8358 
8359 	/*
8360 	 * An ioctl call with cmd 0 to the ring buffer file will wake up all
8361 	 * waiters
8362 	 */
8363 	mutex_lock(&trace_types_lock);
8364 
8365 	/* Make sure the waiters see the new wait_index */
8366 	(void)atomic_fetch_inc_release(&iter->wait_index);
8367 
8368 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8369 
8370 	mutex_unlock(&trace_types_lock);
8371 	return 0;
8372 }
8373 
8374 #ifdef CONFIG_TRACER_MAX_TRACE
8375 static int get_snapshot_map(struct trace_array *tr)
8376 {
8377 	int err = 0;
8378 
8379 	/*
8380 	 * Called with mmap_lock held. lockdep would be unhappy if we would now
8381 	 * take trace_types_lock. Instead use the specific
8382 	 * snapshot_trigger_lock.
8383 	 */
8384 	spin_lock(&tr->snapshot_trigger_lock);
8385 
8386 	if (tr->snapshot || tr->mapped == UINT_MAX)
8387 		err = -EBUSY;
8388 	else
8389 		tr->mapped++;
8390 
8391 	spin_unlock(&tr->snapshot_trigger_lock);
8392 
8393 	/* Wait for update_max_tr() to observe iter->tr->mapped */
8394 	if (tr->mapped == 1)
8395 		synchronize_rcu();
8396 
8397 	return err;
8398 
8399 }
8400 static void put_snapshot_map(struct trace_array *tr)
8401 {
8402 	spin_lock(&tr->snapshot_trigger_lock);
8403 	if (!WARN_ON(!tr->mapped))
8404 		tr->mapped--;
8405 	spin_unlock(&tr->snapshot_trigger_lock);
8406 }
8407 #else
8408 static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
8409 static inline void put_snapshot_map(struct trace_array *tr) { }
8410 #endif
8411 
8412 static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
8413 {
8414 	struct ftrace_buffer_info *info = vma->vm_file->private_data;
8415 	struct trace_iterator *iter = &info->iter;
8416 
8417 	WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
8418 	put_snapshot_map(iter->tr);
8419 }
8420 
8421 static const struct vm_operations_struct tracing_buffers_vmops = {
8422 	.close		= tracing_buffers_mmap_close,
8423 };
8424 
8425 static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
8426 {
8427 	struct ftrace_buffer_info *info = filp->private_data;
8428 	struct trace_iterator *iter = &info->iter;
8429 	int ret = 0;
8430 
8431 	ret = get_snapshot_map(iter->tr);
8432 	if (ret)
8433 		return ret;
8434 
8435 	ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
8436 	if (ret)
8437 		put_snapshot_map(iter->tr);
8438 
8439 	vma->vm_ops = &tracing_buffers_vmops;
8440 
8441 	return ret;
8442 }
8443 
8444 static const struct file_operations tracing_buffers_fops = {
8445 	.open		= tracing_buffers_open,
8446 	.read		= tracing_buffers_read,
8447 	.poll		= tracing_buffers_poll,
8448 	.release	= tracing_buffers_release,
8449 	.flush		= tracing_buffers_flush,
8450 	.splice_read	= tracing_buffers_splice_read,
8451 	.unlocked_ioctl = tracing_buffers_ioctl,
8452 	.mmap		= tracing_buffers_mmap,
8453 };
8454 
8455 static ssize_t
8456 tracing_stats_read(struct file *filp, char __user *ubuf,
8457 		   size_t count, loff_t *ppos)
8458 {
8459 	struct inode *inode = file_inode(filp);
8460 	struct trace_array *tr = inode->i_private;
8461 	struct array_buffer *trace_buf = &tr->array_buffer;
8462 	int cpu = tracing_get_cpu(inode);
8463 	struct trace_seq *s;
8464 	unsigned long cnt;
8465 	unsigned long long t;
8466 	unsigned long usec_rem;
8467 
8468 	s = kmalloc(sizeof(*s), GFP_KERNEL);
8469 	if (!s)
8470 		return -ENOMEM;
8471 
8472 	trace_seq_init(s);
8473 
8474 	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8475 	trace_seq_printf(s, "entries: %ld\n", cnt);
8476 
8477 	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8478 	trace_seq_printf(s, "overrun: %ld\n", cnt);
8479 
8480 	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8481 	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8482 
8483 	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8484 	trace_seq_printf(s, "bytes: %ld\n", cnt);
8485 
8486 	if (trace_clocks[tr->clock_id].in_ns) {
8487 		/* local or global for trace_clock */
8488 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8489 		usec_rem = do_div(t, USEC_PER_SEC);
8490 		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8491 								t, usec_rem);
8492 
8493 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8494 		usec_rem = do_div(t, USEC_PER_SEC);
8495 		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8496 	} else {
8497 		/* counter or tsc mode for trace_clock */
8498 		trace_seq_printf(s, "oldest event ts: %llu\n",
8499 				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8500 
8501 		trace_seq_printf(s, "now ts: %llu\n",
8502 				ring_buffer_time_stamp(trace_buf->buffer));
8503 	}
8504 
8505 	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8506 	trace_seq_printf(s, "dropped events: %ld\n", cnt);
8507 
8508 	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8509 	trace_seq_printf(s, "read events: %ld\n", cnt);
8510 
8511 	count = simple_read_from_buffer(ubuf, count, ppos,
8512 					s->buffer, trace_seq_used(s));
8513 
8514 	kfree(s);
8515 
8516 	return count;
8517 }
8518 
8519 static const struct file_operations tracing_stats_fops = {
8520 	.open		= tracing_open_generic_tr,
8521 	.read		= tracing_stats_read,
8522 	.llseek		= generic_file_llseek,
8523 	.release	= tracing_release_generic_tr,
8524 };
8525 
8526 #ifdef CONFIG_DYNAMIC_FTRACE
8527 
8528 static ssize_t
8529 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8530 		  size_t cnt, loff_t *ppos)
8531 {
8532 	ssize_t ret;
8533 	char *buf;
8534 	int r;
8535 
8536 	/* 256 should be plenty to hold the amount needed */
8537 	buf = kmalloc(256, GFP_KERNEL);
8538 	if (!buf)
8539 		return -ENOMEM;
8540 
8541 	r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8542 		      ftrace_update_tot_cnt,
8543 		      ftrace_number_of_pages,
8544 		      ftrace_number_of_groups);
8545 
8546 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8547 	kfree(buf);
8548 	return ret;
8549 }
8550 
8551 static const struct file_operations tracing_dyn_info_fops = {
8552 	.open		= tracing_open_generic,
8553 	.read		= tracing_read_dyn_info,
8554 	.llseek		= generic_file_llseek,
8555 };
8556 #endif /* CONFIG_DYNAMIC_FTRACE */
8557 
8558 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8559 static void
8560 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8561 		struct trace_array *tr, struct ftrace_probe_ops *ops,
8562 		void *data)
8563 {
8564 	tracing_snapshot_instance(tr);
8565 }
8566 
8567 static void
8568 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8569 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
8570 		      void *data)
8571 {
8572 	struct ftrace_func_mapper *mapper = data;
8573 	long *count = NULL;
8574 
8575 	if (mapper)
8576 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8577 
8578 	if (count) {
8579 
8580 		if (*count <= 0)
8581 			return;
8582 
8583 		(*count)--;
8584 	}
8585 
8586 	tracing_snapshot_instance(tr);
8587 }
8588 
8589 static int
8590 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8591 		      struct ftrace_probe_ops *ops, void *data)
8592 {
8593 	struct ftrace_func_mapper *mapper = data;
8594 	long *count = NULL;
8595 
8596 	seq_printf(m, "%ps:", (void *)ip);
8597 
8598 	seq_puts(m, "snapshot");
8599 
8600 	if (mapper)
8601 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8602 
8603 	if (count)
8604 		seq_printf(m, ":count=%ld\n", *count);
8605 	else
8606 		seq_puts(m, ":unlimited\n");
8607 
8608 	return 0;
8609 }
8610 
8611 static int
8612 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8613 		     unsigned long ip, void *init_data, void **data)
8614 {
8615 	struct ftrace_func_mapper *mapper = *data;
8616 
8617 	if (!mapper) {
8618 		mapper = allocate_ftrace_func_mapper();
8619 		if (!mapper)
8620 			return -ENOMEM;
8621 		*data = mapper;
8622 	}
8623 
8624 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8625 }
8626 
8627 static void
8628 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8629 		     unsigned long ip, void *data)
8630 {
8631 	struct ftrace_func_mapper *mapper = data;
8632 
8633 	if (!ip) {
8634 		if (!mapper)
8635 			return;
8636 		free_ftrace_func_mapper(mapper, NULL);
8637 		return;
8638 	}
8639 
8640 	ftrace_func_mapper_remove_ip(mapper, ip);
8641 }
8642 
8643 static struct ftrace_probe_ops snapshot_probe_ops = {
8644 	.func			= ftrace_snapshot,
8645 	.print			= ftrace_snapshot_print,
8646 };
8647 
8648 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8649 	.func			= ftrace_count_snapshot,
8650 	.print			= ftrace_snapshot_print,
8651 	.init			= ftrace_snapshot_init,
8652 	.free			= ftrace_snapshot_free,
8653 };
8654 
8655 static int
8656 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8657 			       char *glob, char *cmd, char *param, int enable)
8658 {
8659 	struct ftrace_probe_ops *ops;
8660 	void *count = (void *)-1;
8661 	char *number;
8662 	int ret;
8663 
8664 	if (!tr)
8665 		return -ENODEV;
8666 
8667 	/* hash funcs only work with set_ftrace_filter */
8668 	if (!enable)
8669 		return -EINVAL;
8670 
8671 	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
8672 
8673 	if (glob[0] == '!') {
8674 		ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
8675 		if (!ret)
8676 			tracing_disarm_snapshot(tr);
8677 
8678 		return ret;
8679 	}
8680 
8681 	if (!param)
8682 		goto out_reg;
8683 
8684 	number = strsep(&param, ":");
8685 
8686 	if (!strlen(number))
8687 		goto out_reg;
8688 
8689 	/*
8690 	 * We use the callback data field (which is a pointer)
8691 	 * as our counter.
8692 	 */
8693 	ret = kstrtoul(number, 0, (unsigned long *)&count);
8694 	if (ret)
8695 		return ret;
8696 
8697  out_reg:
8698 	ret = tracing_arm_snapshot(tr);
8699 	if (ret < 0)
8700 		goto out;
8701 
8702 	ret = register_ftrace_function_probe(glob, tr, ops, count);
8703 	if (ret < 0)
8704 		tracing_disarm_snapshot(tr);
8705  out:
8706 	return ret < 0 ? ret : 0;
8707 }
8708 
8709 static struct ftrace_func_command ftrace_snapshot_cmd = {
8710 	.name			= "snapshot",
8711 	.func			= ftrace_trace_snapshot_callback,
8712 };
8713 
8714 static __init int register_snapshot_cmd(void)
8715 {
8716 	return register_ftrace_command(&ftrace_snapshot_cmd);
8717 }
8718 #else
8719 static inline __init int register_snapshot_cmd(void) { return 0; }
8720 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8721 
8722 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8723 {
8724 	if (WARN_ON(!tr->dir))
8725 		return ERR_PTR(-ENODEV);
8726 
8727 	/* Top directory uses NULL as the parent */
8728 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8729 		return NULL;
8730 
8731 	/* All sub buffers have a descriptor */
8732 	return tr->dir;
8733 }
8734 
8735 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8736 {
8737 	struct dentry *d_tracer;
8738 
8739 	if (tr->percpu_dir)
8740 		return tr->percpu_dir;
8741 
8742 	d_tracer = tracing_get_dentry(tr);
8743 	if (IS_ERR(d_tracer))
8744 		return NULL;
8745 
8746 	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8747 
8748 	MEM_FAIL(!tr->percpu_dir,
8749 		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8750 
8751 	return tr->percpu_dir;
8752 }
8753 
8754 static struct dentry *
8755 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8756 		      void *data, long cpu, const struct file_operations *fops)
8757 {
8758 	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8759 
8760 	if (ret) /* See tracing_get_cpu() */
8761 		d_inode(ret)->i_cdev = (void *)(cpu + 1);
8762 	return ret;
8763 }
8764 
8765 static void
8766 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8767 {
8768 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8769 	struct dentry *d_cpu;
8770 	char cpu_dir[30]; /* 30 characters should be more than enough */
8771 
8772 	if (!d_percpu)
8773 		return;
8774 
8775 	snprintf(cpu_dir, 30, "cpu%ld", cpu);
8776 	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8777 	if (!d_cpu) {
8778 		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8779 		return;
8780 	}
8781 
8782 	/* per cpu trace_pipe */
8783 	trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8784 				tr, cpu, &tracing_pipe_fops);
8785 
8786 	/* per cpu trace */
8787 	trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8788 				tr, cpu, &tracing_fops);
8789 
8790 	trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8791 				tr, cpu, &tracing_buffers_fops);
8792 
8793 	trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8794 				tr, cpu, &tracing_stats_fops);
8795 
8796 	trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8797 				tr, cpu, &tracing_entries_fops);
8798 
8799 	if (tr->range_addr_start)
8800 		trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu,
8801 				      tr, cpu, &tracing_buffer_meta_fops);
8802 #ifdef CONFIG_TRACER_SNAPSHOT
8803 	if (!tr->range_addr_start) {
8804 		trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8805 				      tr, cpu, &snapshot_fops);
8806 
8807 		trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8808 				      tr, cpu, &snapshot_raw_fops);
8809 	}
8810 #endif
8811 }
8812 
8813 #ifdef CONFIG_FTRACE_SELFTEST
8814 /* Let selftest have access to static functions in this file */
8815 #include "trace_selftest.c"
8816 #endif
8817 
8818 static ssize_t
8819 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8820 			loff_t *ppos)
8821 {
8822 	struct trace_option_dentry *topt = filp->private_data;
8823 	char *buf;
8824 
8825 	if (topt->flags->val & topt->opt->bit)
8826 		buf = "1\n";
8827 	else
8828 		buf = "0\n";
8829 
8830 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8831 }
8832 
8833 static ssize_t
8834 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8835 			 loff_t *ppos)
8836 {
8837 	struct trace_option_dentry *topt = filp->private_data;
8838 	unsigned long val;
8839 	int ret;
8840 
8841 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8842 	if (ret)
8843 		return ret;
8844 
8845 	if (val != 0 && val != 1)
8846 		return -EINVAL;
8847 
8848 	if (!!(topt->flags->val & topt->opt->bit) != val) {
8849 		mutex_lock(&trace_types_lock);
8850 		ret = __set_tracer_option(topt->tr, topt->flags,
8851 					  topt->opt, !val);
8852 		mutex_unlock(&trace_types_lock);
8853 		if (ret)
8854 			return ret;
8855 	}
8856 
8857 	*ppos += cnt;
8858 
8859 	return cnt;
8860 }
8861 
8862 static int tracing_open_options(struct inode *inode, struct file *filp)
8863 {
8864 	struct trace_option_dentry *topt = inode->i_private;
8865 	int ret;
8866 
8867 	ret = tracing_check_open_get_tr(topt->tr);
8868 	if (ret)
8869 		return ret;
8870 
8871 	filp->private_data = inode->i_private;
8872 	return 0;
8873 }
8874 
8875 static int tracing_release_options(struct inode *inode, struct file *file)
8876 {
8877 	struct trace_option_dentry *topt = file->private_data;
8878 
8879 	trace_array_put(topt->tr);
8880 	return 0;
8881 }
8882 
8883 static const struct file_operations trace_options_fops = {
8884 	.open = tracing_open_options,
8885 	.read = trace_options_read,
8886 	.write = trace_options_write,
8887 	.llseek	= generic_file_llseek,
8888 	.release = tracing_release_options,
8889 };
8890 
8891 /*
8892  * In order to pass in both the trace_array descriptor as well as the index
8893  * to the flag that the trace option file represents, the trace_array
8894  * has a character array of trace_flags_index[], which holds the index
8895  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8896  * The address of this character array is passed to the flag option file
8897  * read/write callbacks.
8898  *
8899  * In order to extract both the index and the trace_array descriptor,
8900  * get_tr_index() uses the following algorithm.
8901  *
8902  *   idx = *ptr;
8903  *
8904  * As the pointer itself contains the address of the index (remember
8905  * index[1] == 1).
8906  *
8907  * Then to get the trace_array descriptor, by subtracting that index
8908  * from the ptr, we get to the start of the index itself.
8909  *
8910  *   ptr - idx == &index[0]
8911  *
8912  * Then a simple container_of() from that pointer gets us to the
8913  * trace_array descriptor.
8914  */
8915 static void get_tr_index(void *data, struct trace_array **ptr,
8916 			 unsigned int *pindex)
8917 {
8918 	*pindex = *(unsigned char *)data;
8919 
8920 	*ptr = container_of(data - *pindex, struct trace_array,
8921 			    trace_flags_index);
8922 }
8923 
8924 static ssize_t
8925 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8926 			loff_t *ppos)
8927 {
8928 	void *tr_index = filp->private_data;
8929 	struct trace_array *tr;
8930 	unsigned int index;
8931 	char *buf;
8932 
8933 	get_tr_index(tr_index, &tr, &index);
8934 
8935 	if (tr->trace_flags & (1 << index))
8936 		buf = "1\n";
8937 	else
8938 		buf = "0\n";
8939 
8940 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8941 }
8942 
8943 static ssize_t
8944 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8945 			 loff_t *ppos)
8946 {
8947 	void *tr_index = filp->private_data;
8948 	struct trace_array *tr;
8949 	unsigned int index;
8950 	unsigned long val;
8951 	int ret;
8952 
8953 	get_tr_index(tr_index, &tr, &index);
8954 
8955 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8956 	if (ret)
8957 		return ret;
8958 
8959 	if (val != 0 && val != 1)
8960 		return -EINVAL;
8961 
8962 	mutex_lock(&event_mutex);
8963 	mutex_lock(&trace_types_lock);
8964 	ret = set_tracer_flag(tr, 1 << index, val);
8965 	mutex_unlock(&trace_types_lock);
8966 	mutex_unlock(&event_mutex);
8967 
8968 	if (ret < 0)
8969 		return ret;
8970 
8971 	*ppos += cnt;
8972 
8973 	return cnt;
8974 }
8975 
8976 static const struct file_operations trace_options_core_fops = {
8977 	.open = tracing_open_generic,
8978 	.read = trace_options_core_read,
8979 	.write = trace_options_core_write,
8980 	.llseek = generic_file_llseek,
8981 };
8982 
8983 struct dentry *trace_create_file(const char *name,
8984 				 umode_t mode,
8985 				 struct dentry *parent,
8986 				 void *data,
8987 				 const struct file_operations *fops)
8988 {
8989 	struct dentry *ret;
8990 
8991 	ret = tracefs_create_file(name, mode, parent, data, fops);
8992 	if (!ret)
8993 		pr_warn("Could not create tracefs '%s' entry\n", name);
8994 
8995 	return ret;
8996 }
8997 
8998 
8999 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9000 {
9001 	struct dentry *d_tracer;
9002 
9003 	if (tr->options)
9004 		return tr->options;
9005 
9006 	d_tracer = tracing_get_dentry(tr);
9007 	if (IS_ERR(d_tracer))
9008 		return NULL;
9009 
9010 	tr->options = tracefs_create_dir("options", d_tracer);
9011 	if (!tr->options) {
9012 		pr_warn("Could not create tracefs directory 'options'\n");
9013 		return NULL;
9014 	}
9015 
9016 	return tr->options;
9017 }
9018 
9019 static void
9020 create_trace_option_file(struct trace_array *tr,
9021 			 struct trace_option_dentry *topt,
9022 			 struct tracer_flags *flags,
9023 			 struct tracer_opt *opt)
9024 {
9025 	struct dentry *t_options;
9026 
9027 	t_options = trace_options_init_dentry(tr);
9028 	if (!t_options)
9029 		return;
9030 
9031 	topt->flags = flags;
9032 	topt->opt = opt;
9033 	topt->tr = tr;
9034 
9035 	topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9036 					t_options, topt, &trace_options_fops);
9037 
9038 }
9039 
9040 static void
9041 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9042 {
9043 	struct trace_option_dentry *topts;
9044 	struct trace_options *tr_topts;
9045 	struct tracer_flags *flags;
9046 	struct tracer_opt *opts;
9047 	int cnt;
9048 	int i;
9049 
9050 	if (!tracer)
9051 		return;
9052 
9053 	flags = tracer->flags;
9054 
9055 	if (!flags || !flags->opts)
9056 		return;
9057 
9058 	/*
9059 	 * If this is an instance, only create flags for tracers
9060 	 * the instance may have.
9061 	 */
9062 	if (!trace_ok_for_array(tracer, tr))
9063 		return;
9064 
9065 	for (i = 0; i < tr->nr_topts; i++) {
9066 		/* Make sure there's no duplicate flags. */
9067 		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9068 			return;
9069 	}
9070 
9071 	opts = flags->opts;
9072 
9073 	for (cnt = 0; opts[cnt].name; cnt++)
9074 		;
9075 
9076 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9077 	if (!topts)
9078 		return;
9079 
9080 	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9081 			    GFP_KERNEL);
9082 	if (!tr_topts) {
9083 		kfree(topts);
9084 		return;
9085 	}
9086 
9087 	tr->topts = tr_topts;
9088 	tr->topts[tr->nr_topts].tracer = tracer;
9089 	tr->topts[tr->nr_topts].topts = topts;
9090 	tr->nr_topts++;
9091 
9092 	for (cnt = 0; opts[cnt].name; cnt++) {
9093 		create_trace_option_file(tr, &topts[cnt], flags,
9094 					 &opts[cnt]);
9095 		MEM_FAIL(topts[cnt].entry == NULL,
9096 			  "Failed to create trace option: %s",
9097 			  opts[cnt].name);
9098 	}
9099 }
9100 
9101 static struct dentry *
9102 create_trace_option_core_file(struct trace_array *tr,
9103 			      const char *option, long index)
9104 {
9105 	struct dentry *t_options;
9106 
9107 	t_options = trace_options_init_dentry(tr);
9108 	if (!t_options)
9109 		return NULL;
9110 
9111 	return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9112 				 (void *)&tr->trace_flags_index[index],
9113 				 &trace_options_core_fops);
9114 }
9115 
9116 static void create_trace_options_dir(struct trace_array *tr)
9117 {
9118 	struct dentry *t_options;
9119 	bool top_level = tr == &global_trace;
9120 	int i;
9121 
9122 	t_options = trace_options_init_dentry(tr);
9123 	if (!t_options)
9124 		return;
9125 
9126 	for (i = 0; trace_options[i]; i++) {
9127 		if (top_level ||
9128 		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9129 			create_trace_option_core_file(tr, trace_options[i], i);
9130 	}
9131 }
9132 
9133 static ssize_t
9134 rb_simple_read(struct file *filp, char __user *ubuf,
9135 	       size_t cnt, loff_t *ppos)
9136 {
9137 	struct trace_array *tr = filp->private_data;
9138 	char buf[64];
9139 	int r;
9140 
9141 	r = tracer_tracing_is_on(tr);
9142 	r = sprintf(buf, "%d\n", r);
9143 
9144 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9145 }
9146 
9147 static ssize_t
9148 rb_simple_write(struct file *filp, const char __user *ubuf,
9149 		size_t cnt, loff_t *ppos)
9150 {
9151 	struct trace_array *tr = filp->private_data;
9152 	struct trace_buffer *buffer = tr->array_buffer.buffer;
9153 	unsigned long val;
9154 	int ret;
9155 
9156 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9157 	if (ret)
9158 		return ret;
9159 
9160 	if (buffer) {
9161 		mutex_lock(&trace_types_lock);
9162 		if (!!val == tracer_tracing_is_on(tr)) {
9163 			val = 0; /* do nothing */
9164 		} else if (val) {
9165 			tracer_tracing_on(tr);
9166 			if (tr->current_trace->start)
9167 				tr->current_trace->start(tr);
9168 		} else {
9169 			tracer_tracing_off(tr);
9170 			if (tr->current_trace->stop)
9171 				tr->current_trace->stop(tr);
9172 			/* Wake up any waiters */
9173 			ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9174 		}
9175 		mutex_unlock(&trace_types_lock);
9176 	}
9177 
9178 	(*ppos)++;
9179 
9180 	return cnt;
9181 }
9182 
9183 static const struct file_operations rb_simple_fops = {
9184 	.open		= tracing_open_generic_tr,
9185 	.read		= rb_simple_read,
9186 	.write		= rb_simple_write,
9187 	.release	= tracing_release_generic_tr,
9188 	.llseek		= default_llseek,
9189 };
9190 
9191 static ssize_t
9192 buffer_percent_read(struct file *filp, char __user *ubuf,
9193 		    size_t cnt, loff_t *ppos)
9194 {
9195 	struct trace_array *tr = filp->private_data;
9196 	char buf[64];
9197 	int r;
9198 
9199 	r = tr->buffer_percent;
9200 	r = sprintf(buf, "%d\n", r);
9201 
9202 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9203 }
9204 
9205 static ssize_t
9206 buffer_percent_write(struct file *filp, const char __user *ubuf,
9207 		     size_t cnt, loff_t *ppos)
9208 {
9209 	struct trace_array *tr = filp->private_data;
9210 	unsigned long val;
9211 	int ret;
9212 
9213 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9214 	if (ret)
9215 		return ret;
9216 
9217 	if (val > 100)
9218 		return -EINVAL;
9219 
9220 	tr->buffer_percent = val;
9221 
9222 	(*ppos)++;
9223 
9224 	return cnt;
9225 }
9226 
9227 static const struct file_operations buffer_percent_fops = {
9228 	.open		= tracing_open_generic_tr,
9229 	.read		= buffer_percent_read,
9230 	.write		= buffer_percent_write,
9231 	.release	= tracing_release_generic_tr,
9232 	.llseek		= default_llseek,
9233 };
9234 
9235 static ssize_t
9236 buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
9237 {
9238 	struct trace_array *tr = filp->private_data;
9239 	size_t size;
9240 	char buf[64];
9241 	int order;
9242 	int r;
9243 
9244 	order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9245 	size = (PAGE_SIZE << order) / 1024;
9246 
9247 	r = sprintf(buf, "%zd\n", size);
9248 
9249 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9250 }
9251 
9252 static ssize_t
9253 buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
9254 			 size_t cnt, loff_t *ppos)
9255 {
9256 	struct trace_array *tr = filp->private_data;
9257 	unsigned long val;
9258 	int old_order;
9259 	int order;
9260 	int pages;
9261 	int ret;
9262 
9263 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9264 	if (ret)
9265 		return ret;
9266 
9267 	val *= 1024; /* value passed in is in KB */
9268 
9269 	pages = DIV_ROUND_UP(val, PAGE_SIZE);
9270 	order = fls(pages - 1);
9271 
9272 	/* limit between 1 and 128 system pages */
9273 	if (order < 0 || order > 7)
9274 		return -EINVAL;
9275 
9276 	/* Do not allow tracing while changing the order of the ring buffer */
9277 	tracing_stop_tr(tr);
9278 
9279 	old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9280 	if (old_order == order)
9281 		goto out;
9282 
9283 	ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
9284 	if (ret)
9285 		goto out;
9286 
9287 #ifdef CONFIG_TRACER_MAX_TRACE
9288 
9289 	if (!tr->allocated_snapshot)
9290 		goto out_max;
9291 
9292 	ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
9293 	if (ret) {
9294 		/* Put back the old order */
9295 		cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
9296 		if (WARN_ON_ONCE(cnt)) {
9297 			/*
9298 			 * AARGH! We are left with different orders!
9299 			 * The max buffer is our "snapshot" buffer.
9300 			 * When a tracer needs a snapshot (one of the
9301 			 * latency tracers), it swaps the max buffer
9302 			 * with the saved snap shot. We succeeded to
9303 			 * update the order of the main buffer, but failed to
9304 			 * update the order of the max buffer. But when we tried
9305 			 * to reset the main buffer to the original size, we
9306 			 * failed there too. This is very unlikely to
9307 			 * happen, but if it does, warn and kill all
9308 			 * tracing.
9309 			 */
9310 			tracing_disabled = 1;
9311 		}
9312 		goto out;
9313 	}
9314  out_max:
9315 #endif
9316 	(*ppos)++;
9317  out:
9318 	if (ret)
9319 		cnt = ret;
9320 	tracing_start_tr(tr);
9321 	return cnt;
9322 }
9323 
9324 static const struct file_operations buffer_subbuf_size_fops = {
9325 	.open		= tracing_open_generic_tr,
9326 	.read		= buffer_subbuf_size_read,
9327 	.write		= buffer_subbuf_size_write,
9328 	.release	= tracing_release_generic_tr,
9329 	.llseek		= default_llseek,
9330 };
9331 
9332 static struct dentry *trace_instance_dir;
9333 
9334 static void
9335 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9336 
9337 static int
9338 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9339 {
9340 	enum ring_buffer_flags rb_flags;
9341 
9342 	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9343 
9344 	buf->tr = tr;
9345 
9346 	if (tr->range_addr_start && tr->range_addr_size) {
9347 		buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
9348 						      tr->range_addr_start,
9349 						      tr->range_addr_size);
9350 
9351 		ring_buffer_last_boot_delta(buf->buffer,
9352 					    &tr->text_delta, &tr->data_delta);
9353 		/*
9354 		 * This is basically the same as a mapped buffer,
9355 		 * with the same restrictions.
9356 		 */
9357 		tr->mapped++;
9358 	} else {
9359 		buf->buffer = ring_buffer_alloc(size, rb_flags);
9360 	}
9361 	if (!buf->buffer)
9362 		return -ENOMEM;
9363 
9364 	buf->data = alloc_percpu(struct trace_array_cpu);
9365 	if (!buf->data) {
9366 		ring_buffer_free(buf->buffer);
9367 		buf->buffer = NULL;
9368 		return -ENOMEM;
9369 	}
9370 
9371 	/* Allocate the first page for all buffers */
9372 	set_buffer_entries(&tr->array_buffer,
9373 			   ring_buffer_size(tr->array_buffer.buffer, 0));
9374 
9375 	return 0;
9376 }
9377 
9378 static void free_trace_buffer(struct array_buffer *buf)
9379 {
9380 	if (buf->buffer) {
9381 		ring_buffer_free(buf->buffer);
9382 		buf->buffer = NULL;
9383 		free_percpu(buf->data);
9384 		buf->data = NULL;
9385 	}
9386 }
9387 
9388 static int allocate_trace_buffers(struct trace_array *tr, int size)
9389 {
9390 	int ret;
9391 
9392 	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9393 	if (ret)
9394 		return ret;
9395 
9396 #ifdef CONFIG_TRACER_MAX_TRACE
9397 	/* Fix mapped buffer trace arrays do not have snapshot buffers */
9398 	if (tr->range_addr_start)
9399 		return 0;
9400 
9401 	ret = allocate_trace_buffer(tr, &tr->max_buffer,
9402 				    allocate_snapshot ? size : 1);
9403 	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9404 		free_trace_buffer(&tr->array_buffer);
9405 		return -ENOMEM;
9406 	}
9407 	tr->allocated_snapshot = allocate_snapshot;
9408 
9409 	allocate_snapshot = false;
9410 #endif
9411 
9412 	return 0;
9413 }
9414 
9415 static void free_trace_buffers(struct trace_array *tr)
9416 {
9417 	if (!tr)
9418 		return;
9419 
9420 	free_trace_buffer(&tr->array_buffer);
9421 
9422 #ifdef CONFIG_TRACER_MAX_TRACE
9423 	free_trace_buffer(&tr->max_buffer);
9424 #endif
9425 }
9426 
9427 static void init_trace_flags_index(struct trace_array *tr)
9428 {
9429 	int i;
9430 
9431 	/* Used by the trace options files */
9432 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9433 		tr->trace_flags_index[i] = i;
9434 }
9435 
9436 static void __update_tracer_options(struct trace_array *tr)
9437 {
9438 	struct tracer *t;
9439 
9440 	for (t = trace_types; t; t = t->next)
9441 		add_tracer_options(tr, t);
9442 }
9443 
9444 static void update_tracer_options(struct trace_array *tr)
9445 {
9446 	mutex_lock(&trace_types_lock);
9447 	tracer_options_updated = true;
9448 	__update_tracer_options(tr);
9449 	mutex_unlock(&trace_types_lock);
9450 }
9451 
9452 /* Must have trace_types_lock held */
9453 struct trace_array *trace_array_find(const char *instance)
9454 {
9455 	struct trace_array *tr, *found = NULL;
9456 
9457 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9458 		if (tr->name && strcmp(tr->name, instance) == 0) {
9459 			found = tr;
9460 			break;
9461 		}
9462 	}
9463 
9464 	return found;
9465 }
9466 
9467 struct trace_array *trace_array_find_get(const char *instance)
9468 {
9469 	struct trace_array *tr;
9470 
9471 	mutex_lock(&trace_types_lock);
9472 	tr = trace_array_find(instance);
9473 	if (tr)
9474 		tr->ref++;
9475 	mutex_unlock(&trace_types_lock);
9476 
9477 	return tr;
9478 }
9479 
9480 static int trace_array_create_dir(struct trace_array *tr)
9481 {
9482 	int ret;
9483 
9484 	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9485 	if (!tr->dir)
9486 		return -EINVAL;
9487 
9488 	ret = event_trace_add_tracer(tr->dir, tr);
9489 	if (ret) {
9490 		tracefs_remove(tr->dir);
9491 		return ret;
9492 	}
9493 
9494 	init_tracer_tracefs(tr, tr->dir);
9495 	__update_tracer_options(tr);
9496 
9497 	return ret;
9498 }
9499 
9500 static struct trace_array *
9501 trace_array_create_systems(const char *name, const char *systems,
9502 			   unsigned long range_addr_start,
9503 			   unsigned long range_addr_size)
9504 {
9505 	struct trace_array *tr;
9506 	int ret;
9507 
9508 	ret = -ENOMEM;
9509 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9510 	if (!tr)
9511 		return ERR_PTR(ret);
9512 
9513 	tr->name = kstrdup(name, GFP_KERNEL);
9514 	if (!tr->name)
9515 		goto out_free_tr;
9516 
9517 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9518 		goto out_free_tr;
9519 
9520 	if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9521 		goto out_free_tr;
9522 
9523 	if (systems) {
9524 		tr->system_names = kstrdup_const(systems, GFP_KERNEL);
9525 		if (!tr->system_names)
9526 			goto out_free_tr;
9527 	}
9528 
9529 	/* Only for boot up memory mapped ring buffers */
9530 	tr->range_addr_start = range_addr_start;
9531 	tr->range_addr_size = range_addr_size;
9532 
9533 	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9534 
9535 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9536 
9537 	raw_spin_lock_init(&tr->start_lock);
9538 
9539 	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9540 #ifdef CONFIG_TRACER_MAX_TRACE
9541 	spin_lock_init(&tr->snapshot_trigger_lock);
9542 #endif
9543 	tr->current_trace = &nop_trace;
9544 
9545 	INIT_LIST_HEAD(&tr->systems);
9546 	INIT_LIST_HEAD(&tr->events);
9547 	INIT_LIST_HEAD(&tr->hist_vars);
9548 	INIT_LIST_HEAD(&tr->err_log);
9549 
9550 	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9551 		goto out_free_tr;
9552 
9553 	/* The ring buffer is defaultly expanded */
9554 	trace_set_ring_buffer_expanded(tr);
9555 
9556 	if (ftrace_allocate_ftrace_ops(tr) < 0)
9557 		goto out_free_tr;
9558 
9559 	ftrace_init_trace_array(tr);
9560 
9561 	init_trace_flags_index(tr);
9562 
9563 	if (trace_instance_dir) {
9564 		ret = trace_array_create_dir(tr);
9565 		if (ret)
9566 			goto out_free_tr;
9567 	} else
9568 		__trace_early_add_events(tr);
9569 
9570 	list_add(&tr->list, &ftrace_trace_arrays);
9571 
9572 	tr->ref++;
9573 
9574 	return tr;
9575 
9576  out_free_tr:
9577 	ftrace_free_ftrace_ops(tr);
9578 	free_trace_buffers(tr);
9579 	free_cpumask_var(tr->pipe_cpumask);
9580 	free_cpumask_var(tr->tracing_cpumask);
9581 	kfree_const(tr->system_names);
9582 	kfree(tr->name);
9583 	kfree(tr);
9584 
9585 	return ERR_PTR(ret);
9586 }
9587 
9588 static struct trace_array *trace_array_create(const char *name)
9589 {
9590 	return trace_array_create_systems(name, NULL, 0, 0);
9591 }
9592 
9593 static int instance_mkdir(const char *name)
9594 {
9595 	struct trace_array *tr;
9596 	int ret;
9597 
9598 	mutex_lock(&event_mutex);
9599 	mutex_lock(&trace_types_lock);
9600 
9601 	ret = -EEXIST;
9602 	if (trace_array_find(name))
9603 		goto out_unlock;
9604 
9605 	tr = trace_array_create(name);
9606 
9607 	ret = PTR_ERR_OR_ZERO(tr);
9608 
9609 out_unlock:
9610 	mutex_unlock(&trace_types_lock);
9611 	mutex_unlock(&event_mutex);
9612 	return ret;
9613 }
9614 
9615 static u64 map_pages(u64 start, u64 size)
9616 {
9617 	struct page **pages;
9618 	phys_addr_t page_start;
9619 	unsigned int page_count;
9620 	unsigned int i;
9621 	void *vaddr;
9622 
9623 	page_count = DIV_ROUND_UP(size, PAGE_SIZE);
9624 
9625 	page_start = start;
9626 	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
9627 	if (!pages)
9628 		return 0;
9629 
9630 	for (i = 0; i < page_count; i++) {
9631 		phys_addr_t addr = page_start + i * PAGE_SIZE;
9632 		pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
9633 	}
9634 	vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
9635 	kfree(pages);
9636 
9637 	return (u64)(unsigned long)vaddr;
9638 }
9639 
9640 /**
9641  * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9642  * @name: The name of the trace array to be looked up/created.
9643  * @systems: A list of systems to create event directories for (NULL for all)
9644  *
9645  * Returns pointer to trace array with given name.
9646  * NULL, if it cannot be created.
9647  *
9648  * NOTE: This function increments the reference counter associated with the
9649  * trace array returned. This makes sure it cannot be freed while in use.
9650  * Use trace_array_put() once the trace array is no longer needed.
9651  * If the trace_array is to be freed, trace_array_destroy() needs to
9652  * be called after the trace_array_put(), or simply let user space delete
9653  * it from the tracefs instances directory. But until the
9654  * trace_array_put() is called, user space can not delete it.
9655  *
9656  */
9657 struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
9658 {
9659 	struct trace_array *tr;
9660 
9661 	mutex_lock(&event_mutex);
9662 	mutex_lock(&trace_types_lock);
9663 
9664 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9665 		if (tr->name && strcmp(tr->name, name) == 0)
9666 			goto out_unlock;
9667 	}
9668 
9669 	tr = trace_array_create_systems(name, systems, 0, 0);
9670 
9671 	if (IS_ERR(tr))
9672 		tr = NULL;
9673 out_unlock:
9674 	if (tr)
9675 		tr->ref++;
9676 
9677 	mutex_unlock(&trace_types_lock);
9678 	mutex_unlock(&event_mutex);
9679 	return tr;
9680 }
9681 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9682 
9683 static int __remove_instance(struct trace_array *tr)
9684 {
9685 	int i;
9686 
9687 	/* Reference counter for a newly created trace array = 1. */
9688 	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9689 		return -EBUSY;
9690 
9691 	list_del(&tr->list);
9692 
9693 	/* Disable all the flags that were enabled coming in */
9694 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9695 		if ((1 << i) & ZEROED_TRACE_FLAGS)
9696 			set_tracer_flag(tr, 1 << i, 0);
9697 	}
9698 
9699 	if (printk_trace == tr)
9700 		update_printk_trace(&global_trace);
9701 
9702 	tracing_set_nop(tr);
9703 	clear_ftrace_function_probes(tr);
9704 	event_trace_del_tracer(tr);
9705 	ftrace_clear_pids(tr);
9706 	ftrace_destroy_function_files(tr);
9707 	tracefs_remove(tr->dir);
9708 	free_percpu(tr->last_func_repeats);
9709 	free_trace_buffers(tr);
9710 	clear_tracing_err_log(tr);
9711 
9712 	for (i = 0; i < tr->nr_topts; i++) {
9713 		kfree(tr->topts[i].topts);
9714 	}
9715 	kfree(tr->topts);
9716 
9717 	free_cpumask_var(tr->pipe_cpumask);
9718 	free_cpumask_var(tr->tracing_cpumask);
9719 	kfree_const(tr->system_names);
9720 	kfree(tr->name);
9721 	kfree(tr);
9722 
9723 	return 0;
9724 }
9725 
9726 int trace_array_destroy(struct trace_array *this_tr)
9727 {
9728 	struct trace_array *tr;
9729 	int ret;
9730 
9731 	if (!this_tr)
9732 		return -EINVAL;
9733 
9734 	mutex_lock(&event_mutex);
9735 	mutex_lock(&trace_types_lock);
9736 
9737 	ret = -ENODEV;
9738 
9739 	/* Making sure trace array exists before destroying it. */
9740 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9741 		if (tr == this_tr) {
9742 			ret = __remove_instance(tr);
9743 			break;
9744 		}
9745 	}
9746 
9747 	mutex_unlock(&trace_types_lock);
9748 	mutex_unlock(&event_mutex);
9749 
9750 	return ret;
9751 }
9752 EXPORT_SYMBOL_GPL(trace_array_destroy);
9753 
9754 static int instance_rmdir(const char *name)
9755 {
9756 	struct trace_array *tr;
9757 	int ret;
9758 
9759 	mutex_lock(&event_mutex);
9760 	mutex_lock(&trace_types_lock);
9761 
9762 	ret = -ENODEV;
9763 	tr = trace_array_find(name);
9764 	if (tr)
9765 		ret = __remove_instance(tr);
9766 
9767 	mutex_unlock(&trace_types_lock);
9768 	mutex_unlock(&event_mutex);
9769 
9770 	return ret;
9771 }
9772 
9773 static __init void create_trace_instances(struct dentry *d_tracer)
9774 {
9775 	struct trace_array *tr;
9776 
9777 	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9778 							 instance_mkdir,
9779 							 instance_rmdir);
9780 	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9781 		return;
9782 
9783 	mutex_lock(&event_mutex);
9784 	mutex_lock(&trace_types_lock);
9785 
9786 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9787 		if (!tr->name)
9788 			continue;
9789 		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9790 			     "Failed to create instance directory\n"))
9791 			break;
9792 	}
9793 
9794 	mutex_unlock(&trace_types_lock);
9795 	mutex_unlock(&event_mutex);
9796 }
9797 
9798 static void
9799 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9800 {
9801 	int cpu;
9802 
9803 	trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9804 			tr, &show_traces_fops);
9805 
9806 	trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9807 			tr, &set_tracer_fops);
9808 
9809 	trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9810 			  tr, &tracing_cpumask_fops);
9811 
9812 	trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9813 			  tr, &tracing_iter_fops);
9814 
9815 	trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9816 			  tr, &tracing_fops);
9817 
9818 	trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9819 			  tr, &tracing_pipe_fops);
9820 
9821 	trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9822 			  tr, &tracing_entries_fops);
9823 
9824 	trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9825 			  tr, &tracing_total_entries_fops);
9826 
9827 	trace_create_file("free_buffer", 0200, d_tracer,
9828 			  tr, &tracing_free_buffer_fops);
9829 
9830 	trace_create_file("trace_marker", 0220, d_tracer,
9831 			  tr, &tracing_mark_fops);
9832 
9833 	tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9834 
9835 	trace_create_file("trace_marker_raw", 0220, d_tracer,
9836 			  tr, &tracing_mark_raw_fops);
9837 
9838 	trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9839 			  &trace_clock_fops);
9840 
9841 	trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9842 			  tr, &rb_simple_fops);
9843 
9844 	trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9845 			  &trace_time_stamp_mode_fops);
9846 
9847 	tr->buffer_percent = 50;
9848 
9849 	trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9850 			tr, &buffer_percent_fops);
9851 
9852 	trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
9853 			  tr, &buffer_subbuf_size_fops);
9854 
9855 	create_trace_options_dir(tr);
9856 
9857 #ifdef CONFIG_TRACER_MAX_TRACE
9858 	trace_create_maxlat_file(tr, d_tracer);
9859 #endif
9860 
9861 	if (ftrace_create_function_files(tr, d_tracer))
9862 		MEM_FAIL(1, "Could not allocate function filter files");
9863 
9864 	if (tr->range_addr_start) {
9865 		trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer,
9866 				  tr, &last_boot_fops);
9867 #ifdef CONFIG_TRACER_SNAPSHOT
9868 	} else {
9869 		trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9870 				  tr, &snapshot_fops);
9871 #endif
9872 	}
9873 
9874 	trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9875 			  tr, &tracing_err_log_fops);
9876 
9877 	for_each_tracing_cpu(cpu)
9878 		tracing_init_tracefs_percpu(tr, cpu);
9879 
9880 	ftrace_init_tracefs(tr, d_tracer);
9881 }
9882 
9883 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9884 {
9885 	struct vfsmount *mnt;
9886 	struct file_system_type *type;
9887 
9888 	/*
9889 	 * To maintain backward compatibility for tools that mount
9890 	 * debugfs to get to the tracing facility, tracefs is automatically
9891 	 * mounted to the debugfs/tracing directory.
9892 	 */
9893 	type = get_fs_type("tracefs");
9894 	if (!type)
9895 		return NULL;
9896 	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9897 	put_filesystem(type);
9898 	if (IS_ERR(mnt))
9899 		return NULL;
9900 	mntget(mnt);
9901 
9902 	return mnt;
9903 }
9904 
9905 /**
9906  * tracing_init_dentry - initialize top level trace array
9907  *
9908  * This is called when creating files or directories in the tracing
9909  * directory. It is called via fs_initcall() by any of the boot up code
9910  * and expects to return the dentry of the top level tracing directory.
9911  */
9912 int tracing_init_dentry(void)
9913 {
9914 	struct trace_array *tr = &global_trace;
9915 
9916 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
9917 		pr_warn("Tracing disabled due to lockdown\n");
9918 		return -EPERM;
9919 	}
9920 
9921 	/* The top level trace array uses  NULL as parent */
9922 	if (tr->dir)
9923 		return 0;
9924 
9925 	if (WARN_ON(!tracefs_initialized()))
9926 		return -ENODEV;
9927 
9928 	/*
9929 	 * As there may still be users that expect the tracing
9930 	 * files to exist in debugfs/tracing, we must automount
9931 	 * the tracefs file system there, so older tools still
9932 	 * work with the newer kernel.
9933 	 */
9934 	tr->dir = debugfs_create_automount("tracing", NULL,
9935 					   trace_automount, NULL);
9936 
9937 	return 0;
9938 }
9939 
9940 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9941 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9942 
9943 static struct workqueue_struct *eval_map_wq __initdata;
9944 static struct work_struct eval_map_work __initdata;
9945 static struct work_struct tracerfs_init_work __initdata;
9946 
9947 static void __init eval_map_work_func(struct work_struct *work)
9948 {
9949 	int len;
9950 
9951 	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9952 	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9953 }
9954 
9955 static int __init trace_eval_init(void)
9956 {
9957 	INIT_WORK(&eval_map_work, eval_map_work_func);
9958 
9959 	eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9960 	if (!eval_map_wq) {
9961 		pr_err("Unable to allocate eval_map_wq\n");
9962 		/* Do work here */
9963 		eval_map_work_func(&eval_map_work);
9964 		return -ENOMEM;
9965 	}
9966 
9967 	queue_work(eval_map_wq, &eval_map_work);
9968 	return 0;
9969 }
9970 
9971 subsys_initcall(trace_eval_init);
9972 
9973 static int __init trace_eval_sync(void)
9974 {
9975 	/* Make sure the eval map updates are finished */
9976 	if (eval_map_wq)
9977 		destroy_workqueue(eval_map_wq);
9978 	return 0;
9979 }
9980 
9981 late_initcall_sync(trace_eval_sync);
9982 
9983 
9984 #ifdef CONFIG_MODULES
9985 static void trace_module_add_evals(struct module *mod)
9986 {
9987 	if (!mod->num_trace_evals)
9988 		return;
9989 
9990 	/*
9991 	 * Modules with bad taint do not have events created, do
9992 	 * not bother with enums either.
9993 	 */
9994 	if (trace_module_has_bad_taint(mod))
9995 		return;
9996 
9997 	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9998 }
9999 
10000 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
10001 static void trace_module_remove_evals(struct module *mod)
10002 {
10003 	union trace_eval_map_item *map;
10004 	union trace_eval_map_item **last = &trace_eval_maps;
10005 
10006 	if (!mod->num_trace_evals)
10007 		return;
10008 
10009 	mutex_lock(&trace_eval_mutex);
10010 
10011 	map = trace_eval_maps;
10012 
10013 	while (map) {
10014 		if (map->head.mod == mod)
10015 			break;
10016 		map = trace_eval_jmp_to_tail(map);
10017 		last = &map->tail.next;
10018 		map = map->tail.next;
10019 	}
10020 	if (!map)
10021 		goto out;
10022 
10023 	*last = trace_eval_jmp_to_tail(map)->tail.next;
10024 	kfree(map);
10025  out:
10026 	mutex_unlock(&trace_eval_mutex);
10027 }
10028 #else
10029 static inline void trace_module_remove_evals(struct module *mod) { }
10030 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
10031 
10032 static int trace_module_notify(struct notifier_block *self,
10033 			       unsigned long val, void *data)
10034 {
10035 	struct module *mod = data;
10036 
10037 	switch (val) {
10038 	case MODULE_STATE_COMING:
10039 		trace_module_add_evals(mod);
10040 		break;
10041 	case MODULE_STATE_GOING:
10042 		trace_module_remove_evals(mod);
10043 		break;
10044 	}
10045 
10046 	return NOTIFY_OK;
10047 }
10048 
10049 static struct notifier_block trace_module_nb = {
10050 	.notifier_call = trace_module_notify,
10051 	.priority = 0,
10052 };
10053 #endif /* CONFIG_MODULES */
10054 
10055 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
10056 {
10057 
10058 	event_trace_init();
10059 
10060 	init_tracer_tracefs(&global_trace, NULL);
10061 	ftrace_init_tracefs_toplevel(&global_trace, NULL);
10062 
10063 	trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
10064 			&global_trace, &tracing_thresh_fops);
10065 
10066 	trace_create_file("README", TRACE_MODE_READ, NULL,
10067 			NULL, &tracing_readme_fops);
10068 
10069 	trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
10070 			NULL, &tracing_saved_cmdlines_fops);
10071 
10072 	trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
10073 			  NULL, &tracing_saved_cmdlines_size_fops);
10074 
10075 	trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
10076 			NULL, &tracing_saved_tgids_fops);
10077 
10078 	trace_create_eval_file(NULL);
10079 
10080 #ifdef CONFIG_MODULES
10081 	register_module_notifier(&trace_module_nb);
10082 #endif
10083 
10084 #ifdef CONFIG_DYNAMIC_FTRACE
10085 	trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
10086 			NULL, &tracing_dyn_info_fops);
10087 #endif
10088 
10089 	create_trace_instances(NULL);
10090 
10091 	update_tracer_options(&global_trace);
10092 }
10093 
10094 static __init int tracer_init_tracefs(void)
10095 {
10096 	int ret;
10097 
10098 	trace_access_lock_init();
10099 
10100 	ret = tracing_init_dentry();
10101 	if (ret)
10102 		return 0;
10103 
10104 	if (eval_map_wq) {
10105 		INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10106 		queue_work(eval_map_wq, &tracerfs_init_work);
10107 	} else {
10108 		tracer_init_tracefs_work_func(NULL);
10109 	}
10110 
10111 	rv_init_interface();
10112 
10113 	return 0;
10114 }
10115 
10116 fs_initcall(tracer_init_tracefs);
10117 
10118 static int trace_die_panic_handler(struct notifier_block *self,
10119 				unsigned long ev, void *unused);
10120 
10121 static struct notifier_block trace_panic_notifier = {
10122 	.notifier_call = trace_die_panic_handler,
10123 	.priority = INT_MAX - 1,
10124 };
10125 
10126 static struct notifier_block trace_die_notifier = {
10127 	.notifier_call = trace_die_panic_handler,
10128 	.priority = INT_MAX - 1,
10129 };
10130 
10131 /*
10132  * The idea is to execute the following die/panic callback early, in order
10133  * to avoid showing irrelevant information in the trace (like other panic
10134  * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10135  * warnings get disabled (to prevent potential log flooding).
10136  */
10137 static int trace_die_panic_handler(struct notifier_block *self,
10138 				unsigned long ev, void *unused)
10139 {
10140 	if (!ftrace_dump_on_oops_enabled())
10141 		return NOTIFY_DONE;
10142 
10143 	/* The die notifier requires DIE_OOPS to trigger */
10144 	if (self == &trace_die_notifier && ev != DIE_OOPS)
10145 		return NOTIFY_DONE;
10146 
10147 	ftrace_dump(DUMP_PARAM);
10148 
10149 	return NOTIFY_DONE;
10150 }
10151 
10152 /*
10153  * printk is set to max of 1024, we really don't need it that big.
10154  * Nothing should be printing 1000 characters anyway.
10155  */
10156 #define TRACE_MAX_PRINT		1000
10157 
10158 /*
10159  * Define here KERN_TRACE so that we have one place to modify
10160  * it if we decide to change what log level the ftrace dump
10161  * should be at.
10162  */
10163 #define KERN_TRACE		KERN_EMERG
10164 
10165 void
10166 trace_printk_seq(struct trace_seq *s)
10167 {
10168 	/* Probably should print a warning here. */
10169 	if (s->seq.len >= TRACE_MAX_PRINT)
10170 		s->seq.len = TRACE_MAX_PRINT;
10171 
10172 	/*
10173 	 * More paranoid code. Although the buffer size is set to
10174 	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10175 	 * an extra layer of protection.
10176 	 */
10177 	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10178 		s->seq.len = s->seq.size - 1;
10179 
10180 	/* should be zero ended, but we are paranoid. */
10181 	s->buffer[s->seq.len] = 0;
10182 
10183 	printk(KERN_TRACE "%s", s->buffer);
10184 
10185 	trace_seq_init(s);
10186 }
10187 
10188 static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr)
10189 {
10190 	iter->tr = tr;
10191 	iter->trace = iter->tr->current_trace;
10192 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
10193 	iter->array_buffer = &tr->array_buffer;
10194 
10195 	if (iter->trace && iter->trace->open)
10196 		iter->trace->open(iter);
10197 
10198 	/* Annotate start of buffers if we had overruns */
10199 	if (ring_buffer_overruns(iter->array_buffer->buffer))
10200 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
10201 
10202 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
10203 	if (trace_clocks[iter->tr->clock_id].in_ns)
10204 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10205 
10206 	/* Can not use kmalloc for iter.temp and iter.fmt */
10207 	iter->temp = static_temp_buf;
10208 	iter->temp_size = STATIC_TEMP_BUF_SIZE;
10209 	iter->fmt = static_fmt_buf;
10210 	iter->fmt_size = STATIC_FMT_BUF_SIZE;
10211 }
10212 
10213 void trace_init_global_iter(struct trace_iterator *iter)
10214 {
10215 	trace_init_iter(iter, &global_trace);
10216 }
10217 
10218 static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode)
10219 {
10220 	/* use static because iter can be a bit big for the stack */
10221 	static struct trace_iterator iter;
10222 	unsigned int old_userobj;
10223 	unsigned long flags;
10224 	int cnt = 0, cpu;
10225 
10226 	/*
10227 	 * Always turn off tracing when we dump.
10228 	 * We don't need to show trace output of what happens
10229 	 * between multiple crashes.
10230 	 *
10231 	 * If the user does a sysrq-z, then they can re-enable
10232 	 * tracing with echo 1 > tracing_on.
10233 	 */
10234 	tracer_tracing_off(tr);
10235 
10236 	local_irq_save(flags);
10237 
10238 	/* Simulate the iterator */
10239 	trace_init_iter(&iter, tr);
10240 
10241 	for_each_tracing_cpu(cpu) {
10242 		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10243 	}
10244 
10245 	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10246 
10247 	/* don't look at user memory in panic mode */
10248 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10249 
10250 	if (dump_mode == DUMP_ORIG)
10251 		iter.cpu_file = raw_smp_processor_id();
10252 	else
10253 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
10254 
10255 	if (tr == &global_trace)
10256 		printk(KERN_TRACE "Dumping ftrace buffer:\n");
10257 	else
10258 		printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name);
10259 
10260 	/* Did function tracer already get disabled? */
10261 	if (ftrace_is_dead()) {
10262 		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10263 		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
10264 	}
10265 
10266 	/*
10267 	 * We need to stop all tracing on all CPUS to read
10268 	 * the next buffer. This is a bit expensive, but is
10269 	 * not done often. We fill all what we can read,
10270 	 * and then release the locks again.
10271 	 */
10272 
10273 	while (!trace_empty(&iter)) {
10274 
10275 		if (!cnt)
10276 			printk(KERN_TRACE "---------------------------------\n");
10277 
10278 		cnt++;
10279 
10280 		trace_iterator_reset(&iter);
10281 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
10282 
10283 		if (trace_find_next_entry_inc(&iter) != NULL) {
10284 			int ret;
10285 
10286 			ret = print_trace_line(&iter);
10287 			if (ret != TRACE_TYPE_NO_CONSUME)
10288 				trace_consume(&iter);
10289 		}
10290 		touch_nmi_watchdog();
10291 
10292 		trace_printk_seq(&iter.seq);
10293 	}
10294 
10295 	if (!cnt)
10296 		printk(KERN_TRACE "   (ftrace buffer empty)\n");
10297 	else
10298 		printk(KERN_TRACE "---------------------------------\n");
10299 
10300 	tr->trace_flags |= old_userobj;
10301 
10302 	for_each_tracing_cpu(cpu) {
10303 		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10304 	}
10305 	local_irq_restore(flags);
10306 }
10307 
10308 static void ftrace_dump_by_param(void)
10309 {
10310 	bool first_param = true;
10311 	char dump_param[MAX_TRACER_SIZE];
10312 	char *buf, *token, *inst_name;
10313 	struct trace_array *tr;
10314 
10315 	strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE);
10316 	buf = dump_param;
10317 
10318 	while ((token = strsep(&buf, ",")) != NULL) {
10319 		if (first_param) {
10320 			first_param = false;
10321 			if (!strcmp("0", token))
10322 				continue;
10323 			else if (!strcmp("1", token)) {
10324 				ftrace_dump_one(&global_trace, DUMP_ALL);
10325 				continue;
10326 			}
10327 			else if (!strcmp("2", token) ||
10328 			  !strcmp("orig_cpu", token)) {
10329 				ftrace_dump_one(&global_trace, DUMP_ORIG);
10330 				continue;
10331 			}
10332 		}
10333 
10334 		inst_name = strsep(&token, "=");
10335 		tr = trace_array_find(inst_name);
10336 		if (!tr) {
10337 			printk(KERN_TRACE "Instance %s not found\n", inst_name);
10338 			continue;
10339 		}
10340 
10341 		if (token && (!strcmp("2", token) ||
10342 			  !strcmp("orig_cpu", token)))
10343 			ftrace_dump_one(tr, DUMP_ORIG);
10344 		else
10345 			ftrace_dump_one(tr, DUMP_ALL);
10346 	}
10347 }
10348 
10349 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10350 {
10351 	static atomic_t dump_running;
10352 
10353 	/* Only allow one dump user at a time. */
10354 	if (atomic_inc_return(&dump_running) != 1) {
10355 		atomic_dec(&dump_running);
10356 		return;
10357 	}
10358 
10359 	switch (oops_dump_mode) {
10360 	case DUMP_ALL:
10361 		ftrace_dump_one(&global_trace, DUMP_ALL);
10362 		break;
10363 	case DUMP_ORIG:
10364 		ftrace_dump_one(&global_trace, DUMP_ORIG);
10365 		break;
10366 	case DUMP_PARAM:
10367 		ftrace_dump_by_param();
10368 		break;
10369 	case DUMP_NONE:
10370 		break;
10371 	default:
10372 		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10373 		ftrace_dump_one(&global_trace, DUMP_ALL);
10374 	}
10375 
10376 	atomic_dec(&dump_running);
10377 }
10378 EXPORT_SYMBOL_GPL(ftrace_dump);
10379 
10380 #define WRITE_BUFSIZE  4096
10381 
10382 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10383 				size_t count, loff_t *ppos,
10384 				int (*createfn)(const char *))
10385 {
10386 	char *kbuf, *buf, *tmp;
10387 	int ret = 0;
10388 	size_t done = 0;
10389 	size_t size;
10390 
10391 	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10392 	if (!kbuf)
10393 		return -ENOMEM;
10394 
10395 	while (done < count) {
10396 		size = count - done;
10397 
10398 		if (size >= WRITE_BUFSIZE)
10399 			size = WRITE_BUFSIZE - 1;
10400 
10401 		if (copy_from_user(kbuf, buffer + done, size)) {
10402 			ret = -EFAULT;
10403 			goto out;
10404 		}
10405 		kbuf[size] = '\0';
10406 		buf = kbuf;
10407 		do {
10408 			tmp = strchr(buf, '\n');
10409 			if (tmp) {
10410 				*tmp = '\0';
10411 				size = tmp - buf + 1;
10412 			} else {
10413 				size = strlen(buf);
10414 				if (done + size < count) {
10415 					if (buf != kbuf)
10416 						break;
10417 					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10418 					pr_warn("Line length is too long: Should be less than %d\n",
10419 						WRITE_BUFSIZE - 2);
10420 					ret = -EINVAL;
10421 					goto out;
10422 				}
10423 			}
10424 			done += size;
10425 
10426 			/* Remove comments */
10427 			tmp = strchr(buf, '#');
10428 
10429 			if (tmp)
10430 				*tmp = '\0';
10431 
10432 			ret = createfn(buf);
10433 			if (ret)
10434 				goto out;
10435 			buf += size;
10436 
10437 		} while (done < count);
10438 	}
10439 	ret = done;
10440 
10441 out:
10442 	kfree(kbuf);
10443 
10444 	return ret;
10445 }
10446 
10447 #ifdef CONFIG_TRACER_MAX_TRACE
10448 __init static bool tr_needs_alloc_snapshot(const char *name)
10449 {
10450 	char *test;
10451 	int len = strlen(name);
10452 	bool ret;
10453 
10454 	if (!boot_snapshot_index)
10455 		return false;
10456 
10457 	if (strncmp(name, boot_snapshot_info, len) == 0 &&
10458 	    boot_snapshot_info[len] == '\t')
10459 		return true;
10460 
10461 	test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10462 	if (!test)
10463 		return false;
10464 
10465 	sprintf(test, "\t%s\t", name);
10466 	ret = strstr(boot_snapshot_info, test) == NULL;
10467 	kfree(test);
10468 	return ret;
10469 }
10470 
10471 __init static void do_allocate_snapshot(const char *name)
10472 {
10473 	if (!tr_needs_alloc_snapshot(name))
10474 		return;
10475 
10476 	/*
10477 	 * When allocate_snapshot is set, the next call to
10478 	 * allocate_trace_buffers() (called by trace_array_get_by_name())
10479 	 * will allocate the snapshot buffer. That will alse clear
10480 	 * this flag.
10481 	 */
10482 	allocate_snapshot = true;
10483 }
10484 #else
10485 static inline void do_allocate_snapshot(const char *name) { }
10486 #endif
10487 
10488 __init static void enable_instances(void)
10489 {
10490 	struct trace_array *tr;
10491 	char *curr_str;
10492 	char *name;
10493 	char *str;
10494 	char *tok;
10495 
10496 	/* A tab is always appended */
10497 	boot_instance_info[boot_instance_index - 1] = '\0';
10498 	str = boot_instance_info;
10499 
10500 	while ((curr_str = strsep(&str, "\t"))) {
10501 		phys_addr_t start = 0;
10502 		phys_addr_t size = 0;
10503 		unsigned long addr = 0;
10504 		bool traceprintk = false;
10505 		bool traceoff = false;
10506 		char *flag_delim;
10507 		char *addr_delim;
10508 
10509 		tok = strsep(&curr_str, ",");
10510 
10511 		flag_delim = strchr(tok, '^');
10512 		addr_delim = strchr(tok, '@');
10513 
10514 		if (addr_delim)
10515 			*addr_delim++ = '\0';
10516 
10517 		if (flag_delim)
10518 			*flag_delim++ = '\0';
10519 
10520 		name = tok;
10521 
10522 		if (flag_delim) {
10523 			char *flag;
10524 
10525 			while ((flag = strsep(&flag_delim, "^"))) {
10526 				if (strcmp(flag, "traceoff") == 0) {
10527 					traceoff = true;
10528 				} else if ((strcmp(flag, "printk") == 0) ||
10529 					   (strcmp(flag, "traceprintk") == 0) ||
10530 					   (strcmp(flag, "trace_printk") == 0)) {
10531 					traceprintk = true;
10532 				} else {
10533 					pr_info("Tracing: Invalid instance flag '%s' for %s\n",
10534 						flag, name);
10535 				}
10536 			}
10537 		}
10538 
10539 		tok = addr_delim;
10540 		if (tok && isdigit(*tok)) {
10541 			start = memparse(tok, &tok);
10542 			if (!start) {
10543 				pr_warn("Tracing: Invalid boot instance address for %s\n",
10544 					name);
10545 				continue;
10546 			}
10547 			if (*tok != ':') {
10548 				pr_warn("Tracing: No size specified for instance %s\n", name);
10549 				continue;
10550 			}
10551 			tok++;
10552 			size = memparse(tok, &tok);
10553 			if (!size) {
10554 				pr_warn("Tracing: Invalid boot instance size for %s\n",
10555 					name);
10556 				continue;
10557 			}
10558 		} else if (tok) {
10559 			if (!reserve_mem_find_by_name(tok, &start, &size)) {
10560 				start = 0;
10561 				pr_warn("Failed to map boot instance %s to %s\n", name, tok);
10562 				continue;
10563 			}
10564 		}
10565 
10566 		if (start) {
10567 			addr = map_pages(start, size);
10568 			if (addr) {
10569 				pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n",
10570 					name, &start, (unsigned long)size);
10571 			} else {
10572 				pr_warn("Tracing: Failed to map boot instance %s\n", name);
10573 				continue;
10574 			}
10575 		} else {
10576 			/* Only non mapped buffers have snapshot buffers */
10577 			if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10578 				do_allocate_snapshot(name);
10579 		}
10580 
10581 		tr = trace_array_create_systems(name, NULL, addr, size);
10582 		if (IS_ERR(tr)) {
10583 			pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str);
10584 			continue;
10585 		}
10586 
10587 		if (traceoff)
10588 			tracer_tracing_off(tr);
10589 
10590 		if (traceprintk)
10591 			update_printk_trace(tr);
10592 
10593 		/*
10594 		 * If start is set, then this is a mapped buffer, and
10595 		 * cannot be deleted by user space, so keep the reference
10596 		 * to it.
10597 		 */
10598 		if (start)
10599 			tr->flags |= TRACE_ARRAY_FL_BOOT;
10600 		else
10601 			trace_array_put(tr);
10602 
10603 		while ((tok = strsep(&curr_str, ","))) {
10604 			early_enable_events(tr, tok, true);
10605 		}
10606 	}
10607 }
10608 
10609 __init static int tracer_alloc_buffers(void)
10610 {
10611 	int ring_buf_size;
10612 	int ret = -ENOMEM;
10613 
10614 
10615 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
10616 		pr_warn("Tracing disabled due to lockdown\n");
10617 		return -EPERM;
10618 	}
10619 
10620 	/*
10621 	 * Make sure we don't accidentally add more trace options
10622 	 * than we have bits for.
10623 	 */
10624 	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10625 
10626 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10627 		goto out;
10628 
10629 	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10630 		goto out_free_buffer_mask;
10631 
10632 	/* Only allocate trace_printk buffers if a trace_printk exists */
10633 	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10634 		/* Must be called before global_trace.buffer is allocated */
10635 		trace_printk_init_buffers();
10636 
10637 	/* To save memory, keep the ring buffer size to its minimum */
10638 	if (global_trace.ring_buffer_expanded)
10639 		ring_buf_size = trace_buf_size;
10640 	else
10641 		ring_buf_size = 1;
10642 
10643 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10644 	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10645 
10646 	raw_spin_lock_init(&global_trace.start_lock);
10647 
10648 	/*
10649 	 * The prepare callbacks allocates some memory for the ring buffer. We
10650 	 * don't free the buffer if the CPU goes down. If we were to free
10651 	 * the buffer, then the user would lose any trace that was in the
10652 	 * buffer. The memory will be removed once the "instance" is removed.
10653 	 */
10654 	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10655 				      "trace/RB:prepare", trace_rb_cpu_prepare,
10656 				      NULL);
10657 	if (ret < 0)
10658 		goto out_free_cpumask;
10659 	/* Used for event triggers */
10660 	ret = -ENOMEM;
10661 	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10662 	if (!temp_buffer)
10663 		goto out_rm_hp_state;
10664 
10665 	if (trace_create_savedcmd() < 0)
10666 		goto out_free_temp_buffer;
10667 
10668 	if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10669 		goto out_free_savedcmd;
10670 
10671 	/* TODO: make the number of buffers hot pluggable with CPUS */
10672 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10673 		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10674 		goto out_free_pipe_cpumask;
10675 	}
10676 	if (global_trace.buffer_disabled)
10677 		tracing_off();
10678 
10679 	if (trace_boot_clock) {
10680 		ret = tracing_set_clock(&global_trace, trace_boot_clock);
10681 		if (ret < 0)
10682 			pr_warn("Trace clock %s not defined, going back to default\n",
10683 				trace_boot_clock);
10684 	}
10685 
10686 	/*
10687 	 * register_tracer() might reference current_trace, so it
10688 	 * needs to be set before we register anything. This is
10689 	 * just a bootstrap of current_trace anyway.
10690 	 */
10691 	global_trace.current_trace = &nop_trace;
10692 
10693 	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10694 #ifdef CONFIG_TRACER_MAX_TRACE
10695 	spin_lock_init(&global_trace.snapshot_trigger_lock);
10696 #endif
10697 	ftrace_init_global_array_ops(&global_trace);
10698 
10699 	init_trace_flags_index(&global_trace);
10700 
10701 	register_tracer(&nop_trace);
10702 
10703 	/* Function tracing may start here (via kernel command line) */
10704 	init_function_trace();
10705 
10706 	/* All seems OK, enable tracing */
10707 	tracing_disabled = 0;
10708 
10709 	atomic_notifier_chain_register(&panic_notifier_list,
10710 				       &trace_panic_notifier);
10711 
10712 	register_die_notifier(&trace_die_notifier);
10713 
10714 	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10715 
10716 	INIT_LIST_HEAD(&global_trace.systems);
10717 	INIT_LIST_HEAD(&global_trace.events);
10718 	INIT_LIST_HEAD(&global_trace.hist_vars);
10719 	INIT_LIST_HEAD(&global_trace.err_log);
10720 	list_add(&global_trace.list, &ftrace_trace_arrays);
10721 
10722 	apply_trace_boot_options();
10723 
10724 	register_snapshot_cmd();
10725 
10726 	test_can_verify();
10727 
10728 	return 0;
10729 
10730 out_free_pipe_cpumask:
10731 	free_cpumask_var(global_trace.pipe_cpumask);
10732 out_free_savedcmd:
10733 	trace_free_saved_cmdlines_buffer();
10734 out_free_temp_buffer:
10735 	ring_buffer_free(temp_buffer);
10736 out_rm_hp_state:
10737 	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10738 out_free_cpumask:
10739 	free_cpumask_var(global_trace.tracing_cpumask);
10740 out_free_buffer_mask:
10741 	free_cpumask_var(tracing_buffer_mask);
10742 out:
10743 	return ret;
10744 }
10745 
10746 void __init ftrace_boot_snapshot(void)
10747 {
10748 #ifdef CONFIG_TRACER_MAX_TRACE
10749 	struct trace_array *tr;
10750 
10751 	if (!snapshot_at_boot)
10752 		return;
10753 
10754 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10755 		if (!tr->allocated_snapshot)
10756 			continue;
10757 
10758 		tracing_snapshot_instance(tr);
10759 		trace_array_puts(tr, "** Boot snapshot taken **\n");
10760 	}
10761 #endif
10762 }
10763 
10764 void __init early_trace_init(void)
10765 {
10766 	if (tracepoint_printk) {
10767 		tracepoint_print_iter =
10768 			kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10769 		if (MEM_FAIL(!tracepoint_print_iter,
10770 			     "Failed to allocate trace iterator\n"))
10771 			tracepoint_printk = 0;
10772 		else
10773 			static_key_enable(&tracepoint_printk_key.key);
10774 	}
10775 	tracer_alloc_buffers();
10776 
10777 	init_events();
10778 }
10779 
10780 void __init trace_init(void)
10781 {
10782 	trace_event_init();
10783 
10784 	if (boot_instance_index)
10785 		enable_instances();
10786 }
10787 
10788 __init static void clear_boot_tracer(void)
10789 {
10790 	/*
10791 	 * The default tracer at boot buffer is an init section.
10792 	 * This function is called in lateinit. If we did not
10793 	 * find the boot tracer, then clear it out, to prevent
10794 	 * later registration from accessing the buffer that is
10795 	 * about to be freed.
10796 	 */
10797 	if (!default_bootup_tracer)
10798 		return;
10799 
10800 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10801 	       default_bootup_tracer);
10802 	default_bootup_tracer = NULL;
10803 }
10804 
10805 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10806 __init static void tracing_set_default_clock(void)
10807 {
10808 	/* sched_clock_stable() is determined in late_initcall */
10809 	if (!trace_boot_clock && !sched_clock_stable()) {
10810 		if (security_locked_down(LOCKDOWN_TRACEFS)) {
10811 			pr_warn("Can not set tracing clock due to lockdown\n");
10812 			return;
10813 		}
10814 
10815 		printk(KERN_WARNING
10816 		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
10817 		       "If you want to keep using the local clock, then add:\n"
10818 		       "  \"trace_clock=local\"\n"
10819 		       "on the kernel command line\n");
10820 		tracing_set_clock(&global_trace, "global");
10821 	}
10822 }
10823 #else
10824 static inline void tracing_set_default_clock(void) { }
10825 #endif
10826 
10827 __init static int late_trace_init(void)
10828 {
10829 	if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10830 		static_key_disable(&tracepoint_printk_key.key);
10831 		tracepoint_printk = 0;
10832 	}
10833 
10834 	tracing_set_default_clock();
10835 	clear_boot_tracer();
10836 	return 0;
10837 }
10838 
10839 late_initcall_sync(late_trace_init);
10840