xref: /linux/kernel/trace/trace.c (revision 80c3e28528ff9f269937fcfe73895213a2e14905)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Originally taken from the RT patch by:
9  *    Arnaldo Carvalho de Melo <acme@redhat.com>
10  *
11  * Based on code from the latency_tracer, that is:
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 #include <linux/ring_buffer.h>
16 #include <linux/utsname.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/cleanup.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
45 #include <linux/fs.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
52 
53 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
54 
55 #include "trace.h"
56 #include "trace_output.h"
57 
58 #ifdef CONFIG_FTRACE_STARTUP_TEST
59 /*
60  * We need to change this state when a selftest is running.
61  * A selftest will lurk into the ring-buffer to count the
62  * entries inserted during the selftest although some concurrent
63  * insertions into the ring-buffer such as trace_printk could occurred
64  * at the same time, giving false positive or negative results.
65  */
66 static bool __read_mostly tracing_selftest_running;
67 
68 /*
69  * If boot-time tracing including tracers/events via kernel cmdline
70  * is running, we do not want to run SELFTEST.
71  */
72 bool __read_mostly tracing_selftest_disabled;
73 
74 void __init disable_tracing_selftest(const char *reason)
75 {
76 	if (!tracing_selftest_disabled) {
77 		tracing_selftest_disabled = true;
78 		pr_info("Ftrace startup test is disabled due to %s\n", reason);
79 	}
80 }
81 #else
82 #define tracing_selftest_running	0
83 #define tracing_selftest_disabled	0
84 #endif
85 
86 /* Pipe tracepoints to printk */
87 static struct trace_iterator *tracepoint_print_iter;
88 int tracepoint_printk;
89 static bool tracepoint_printk_stop_on_boot __initdata;
90 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
91 
92 /* For tracers that don't implement custom flags */
93 static struct tracer_opt dummy_tracer_opt[] = {
94 	{ }
95 };
96 
97 static int
98 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
99 {
100 	return 0;
101 }
102 
103 /*
104  * To prevent the comm cache from being overwritten when no
105  * tracing is active, only save the comm when a trace event
106  * occurred.
107  */
108 DEFINE_PER_CPU(bool, trace_taskinfo_save);
109 
110 /*
111  * Kill all tracing for good (never come back).
112  * It is initialized to 1 but will turn to zero if the initialization
113  * of the tracer is successful. But that is the only place that sets
114  * this back to zero.
115  */
116 static int tracing_disabled = 1;
117 
118 cpumask_var_t __read_mostly	tracing_buffer_mask;
119 
120 /*
121  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
122  *
123  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
124  * is set, then ftrace_dump is called. This will output the contents
125  * of the ftrace buffers to the console.  This is very useful for
126  * capturing traces that lead to crashes and outputing it to a
127  * serial console.
128  *
129  * It is default off, but you can enable it with either specifying
130  * "ftrace_dump_on_oops" in the kernel command line, or setting
131  * /proc/sys/kernel/ftrace_dump_on_oops
132  * Set 1 if you want to dump buffers of all CPUs
133  * Set 2 if you want to dump the buffer of the CPU that triggered oops
134  * Set instance name if you want to dump the specific trace instance
135  * Multiple instance dump is also supported, and instances are seperated
136  * by commas.
137  */
138 /* Set to string format zero to disable by default */
139 char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0";
140 
141 /* When set, tracing will stop when a WARN*() is hit */
142 int __disable_trace_on_warning;
143 
144 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
145 /* Map of enums to their values, for "eval_map" file */
146 struct trace_eval_map_head {
147 	struct module			*mod;
148 	unsigned long			length;
149 };
150 
151 union trace_eval_map_item;
152 
153 struct trace_eval_map_tail {
154 	/*
155 	 * "end" is first and points to NULL as it must be different
156 	 * than "mod" or "eval_string"
157 	 */
158 	union trace_eval_map_item	*next;
159 	const char			*end;	/* points to NULL */
160 };
161 
162 static DEFINE_MUTEX(trace_eval_mutex);
163 
164 /*
165  * The trace_eval_maps are saved in an array with two extra elements,
166  * one at the beginning, and one at the end. The beginning item contains
167  * the count of the saved maps (head.length), and the module they
168  * belong to if not built in (head.mod). The ending item contains a
169  * pointer to the next array of saved eval_map items.
170  */
171 union trace_eval_map_item {
172 	struct trace_eval_map		map;
173 	struct trace_eval_map_head	head;
174 	struct trace_eval_map_tail	tail;
175 };
176 
177 static union trace_eval_map_item *trace_eval_maps;
178 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
179 
180 int tracing_set_tracer(struct trace_array *tr, const char *buf);
181 static void ftrace_trace_userstack(struct trace_array *tr,
182 				   struct trace_buffer *buffer,
183 				   unsigned int trace_ctx);
184 
185 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
186 static char *default_bootup_tracer;
187 
188 static bool allocate_snapshot;
189 static bool snapshot_at_boot;
190 
191 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
192 static int boot_instance_index;
193 
194 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
195 static int boot_snapshot_index;
196 
197 static int __init set_cmdline_ftrace(char *str)
198 {
199 	strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
200 	default_bootup_tracer = bootup_tracer_buf;
201 	/* We are using ftrace early, expand it */
202 	trace_set_ring_buffer_expanded(NULL);
203 	return 1;
204 }
205 __setup("ftrace=", set_cmdline_ftrace);
206 
207 int ftrace_dump_on_oops_enabled(void)
208 {
209 	if (!strcmp("0", ftrace_dump_on_oops))
210 		return 0;
211 	else
212 		return 1;
213 }
214 
215 static int __init set_ftrace_dump_on_oops(char *str)
216 {
217 	if (!*str) {
218 		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
219 		return 1;
220 	}
221 
222 	if (*str == ',') {
223 		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
224 		strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1);
225 		return 1;
226 	}
227 
228 	if (*str++ == '=') {
229 		strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE);
230 		return 1;
231 	}
232 
233 	return 0;
234 }
235 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
236 
237 static int __init stop_trace_on_warning(char *str)
238 {
239 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
240 		__disable_trace_on_warning = 1;
241 	return 1;
242 }
243 __setup("traceoff_on_warning", stop_trace_on_warning);
244 
245 static int __init boot_alloc_snapshot(char *str)
246 {
247 	char *slot = boot_snapshot_info + boot_snapshot_index;
248 	int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
249 	int ret;
250 
251 	if (str[0] == '=') {
252 		str++;
253 		if (strlen(str) >= left)
254 			return -1;
255 
256 		ret = snprintf(slot, left, "%s\t", str);
257 		boot_snapshot_index += ret;
258 	} else {
259 		allocate_snapshot = true;
260 		/* We also need the main ring buffer expanded */
261 		trace_set_ring_buffer_expanded(NULL);
262 	}
263 	return 1;
264 }
265 __setup("alloc_snapshot", boot_alloc_snapshot);
266 
267 
268 static int __init boot_snapshot(char *str)
269 {
270 	snapshot_at_boot = true;
271 	boot_alloc_snapshot(str);
272 	return 1;
273 }
274 __setup("ftrace_boot_snapshot", boot_snapshot);
275 
276 
277 static int __init boot_instance(char *str)
278 {
279 	char *slot = boot_instance_info + boot_instance_index;
280 	int left = sizeof(boot_instance_info) - boot_instance_index;
281 	int ret;
282 
283 	if (strlen(str) >= left)
284 		return -1;
285 
286 	ret = snprintf(slot, left, "%s\t", str);
287 	boot_instance_index += ret;
288 
289 	return 1;
290 }
291 __setup("trace_instance=", boot_instance);
292 
293 
294 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
295 
296 static int __init set_trace_boot_options(char *str)
297 {
298 	strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
299 	return 1;
300 }
301 __setup("trace_options=", set_trace_boot_options);
302 
303 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
304 static char *trace_boot_clock __initdata;
305 
306 static int __init set_trace_boot_clock(char *str)
307 {
308 	strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
309 	trace_boot_clock = trace_boot_clock_buf;
310 	return 1;
311 }
312 __setup("trace_clock=", set_trace_boot_clock);
313 
314 static int __init set_tracepoint_printk(char *str)
315 {
316 	/* Ignore the "tp_printk_stop_on_boot" param */
317 	if (*str == '_')
318 		return 0;
319 
320 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
321 		tracepoint_printk = 1;
322 	return 1;
323 }
324 __setup("tp_printk", set_tracepoint_printk);
325 
326 static int __init set_tracepoint_printk_stop(char *str)
327 {
328 	tracepoint_printk_stop_on_boot = true;
329 	return 1;
330 }
331 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
332 
333 unsigned long long ns2usecs(u64 nsec)
334 {
335 	nsec += 500;
336 	do_div(nsec, 1000);
337 	return nsec;
338 }
339 
340 static void
341 trace_process_export(struct trace_export *export,
342 	       struct ring_buffer_event *event, int flag)
343 {
344 	struct trace_entry *entry;
345 	unsigned int size = 0;
346 
347 	if (export->flags & flag) {
348 		entry = ring_buffer_event_data(event);
349 		size = ring_buffer_event_length(event);
350 		export->write(export, entry, size);
351 	}
352 }
353 
354 static DEFINE_MUTEX(ftrace_export_lock);
355 
356 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
357 
358 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
359 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
360 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
361 
362 static inline void ftrace_exports_enable(struct trace_export *export)
363 {
364 	if (export->flags & TRACE_EXPORT_FUNCTION)
365 		static_branch_inc(&trace_function_exports_enabled);
366 
367 	if (export->flags & TRACE_EXPORT_EVENT)
368 		static_branch_inc(&trace_event_exports_enabled);
369 
370 	if (export->flags & TRACE_EXPORT_MARKER)
371 		static_branch_inc(&trace_marker_exports_enabled);
372 }
373 
374 static inline void ftrace_exports_disable(struct trace_export *export)
375 {
376 	if (export->flags & TRACE_EXPORT_FUNCTION)
377 		static_branch_dec(&trace_function_exports_enabled);
378 
379 	if (export->flags & TRACE_EXPORT_EVENT)
380 		static_branch_dec(&trace_event_exports_enabled);
381 
382 	if (export->flags & TRACE_EXPORT_MARKER)
383 		static_branch_dec(&trace_marker_exports_enabled);
384 }
385 
386 static void ftrace_exports(struct ring_buffer_event *event, int flag)
387 {
388 	struct trace_export *export;
389 
390 	preempt_disable_notrace();
391 
392 	export = rcu_dereference_raw_check(ftrace_exports_list);
393 	while (export) {
394 		trace_process_export(export, event, flag);
395 		export = rcu_dereference_raw_check(export->next);
396 	}
397 
398 	preempt_enable_notrace();
399 }
400 
401 static inline void
402 add_trace_export(struct trace_export **list, struct trace_export *export)
403 {
404 	rcu_assign_pointer(export->next, *list);
405 	/*
406 	 * We are entering export into the list but another
407 	 * CPU might be walking that list. We need to make sure
408 	 * the export->next pointer is valid before another CPU sees
409 	 * the export pointer included into the list.
410 	 */
411 	rcu_assign_pointer(*list, export);
412 }
413 
414 static inline int
415 rm_trace_export(struct trace_export **list, struct trace_export *export)
416 {
417 	struct trace_export **p;
418 
419 	for (p = list; *p != NULL; p = &(*p)->next)
420 		if (*p == export)
421 			break;
422 
423 	if (*p != export)
424 		return -1;
425 
426 	rcu_assign_pointer(*p, (*p)->next);
427 
428 	return 0;
429 }
430 
431 static inline void
432 add_ftrace_export(struct trace_export **list, struct trace_export *export)
433 {
434 	ftrace_exports_enable(export);
435 
436 	add_trace_export(list, export);
437 }
438 
439 static inline int
440 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
441 {
442 	int ret;
443 
444 	ret = rm_trace_export(list, export);
445 	ftrace_exports_disable(export);
446 
447 	return ret;
448 }
449 
450 int register_ftrace_export(struct trace_export *export)
451 {
452 	if (WARN_ON_ONCE(!export->write))
453 		return -1;
454 
455 	mutex_lock(&ftrace_export_lock);
456 
457 	add_ftrace_export(&ftrace_exports_list, export);
458 
459 	mutex_unlock(&ftrace_export_lock);
460 
461 	return 0;
462 }
463 EXPORT_SYMBOL_GPL(register_ftrace_export);
464 
465 int unregister_ftrace_export(struct trace_export *export)
466 {
467 	int ret;
468 
469 	mutex_lock(&ftrace_export_lock);
470 
471 	ret = rm_ftrace_export(&ftrace_exports_list, export);
472 
473 	mutex_unlock(&ftrace_export_lock);
474 
475 	return ret;
476 }
477 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
478 
479 /* trace_flags holds trace_options default values */
480 #define TRACE_DEFAULT_FLAGS						\
481 	(FUNCTION_DEFAULT_FLAGS |					\
482 	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
483 	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
484 	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
485 	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |			\
486 	 TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK)
487 
488 /* trace_options that are only supported by global_trace */
489 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
490 	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
491 
492 /* trace_flags that are default zero for instances */
493 #define ZEROED_TRACE_FLAGS \
494 	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK)
495 
496 /*
497  * The global_trace is the descriptor that holds the top-level tracing
498  * buffers for the live tracing.
499  */
500 static struct trace_array global_trace = {
501 	.trace_flags = TRACE_DEFAULT_FLAGS,
502 };
503 
504 static struct trace_array *printk_trace = &global_trace;
505 
506 static __always_inline bool printk_binsafe(struct trace_array *tr)
507 {
508 	/*
509 	 * The binary format of traceprintk can cause a crash if used
510 	 * by a buffer from another boot. Force the use of the
511 	 * non binary version of trace_printk if the trace_printk
512 	 * buffer is a boot mapped ring buffer.
513 	 */
514 	return !(tr->flags & TRACE_ARRAY_FL_BOOT);
515 }
516 
517 static void update_printk_trace(struct trace_array *tr)
518 {
519 	if (printk_trace == tr)
520 		return;
521 
522 	printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK;
523 	printk_trace = tr;
524 	tr->trace_flags |= TRACE_ITER_TRACE_PRINTK;
525 }
526 
527 void trace_set_ring_buffer_expanded(struct trace_array *tr)
528 {
529 	if (!tr)
530 		tr = &global_trace;
531 	tr->ring_buffer_expanded = true;
532 }
533 
534 LIST_HEAD(ftrace_trace_arrays);
535 
536 int trace_array_get(struct trace_array *this_tr)
537 {
538 	struct trace_array *tr;
539 
540 	guard(mutex)(&trace_types_lock);
541 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
542 		if (tr == this_tr) {
543 			tr->ref++;
544 			return 0;
545 		}
546 	}
547 
548 	return -ENODEV;
549 }
550 
551 static void __trace_array_put(struct trace_array *this_tr)
552 {
553 	WARN_ON(!this_tr->ref);
554 	this_tr->ref--;
555 }
556 
557 /**
558  * trace_array_put - Decrement the reference counter for this trace array.
559  * @this_tr : pointer to the trace array
560  *
561  * NOTE: Use this when we no longer need the trace array returned by
562  * trace_array_get_by_name(). This ensures the trace array can be later
563  * destroyed.
564  *
565  */
566 void trace_array_put(struct trace_array *this_tr)
567 {
568 	if (!this_tr)
569 		return;
570 
571 	mutex_lock(&trace_types_lock);
572 	__trace_array_put(this_tr);
573 	mutex_unlock(&trace_types_lock);
574 }
575 EXPORT_SYMBOL_GPL(trace_array_put);
576 
577 int tracing_check_open_get_tr(struct trace_array *tr)
578 {
579 	int ret;
580 
581 	ret = security_locked_down(LOCKDOWN_TRACEFS);
582 	if (ret)
583 		return ret;
584 
585 	if (tracing_disabled)
586 		return -ENODEV;
587 
588 	if (tr && trace_array_get(tr) < 0)
589 		return -ENODEV;
590 
591 	return 0;
592 }
593 
594 /**
595  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
596  * @filtered_pids: The list of pids to check
597  * @search_pid: The PID to find in @filtered_pids
598  *
599  * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
600  */
601 bool
602 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
603 {
604 	return trace_pid_list_is_set(filtered_pids, search_pid);
605 }
606 
607 /**
608  * trace_ignore_this_task - should a task be ignored for tracing
609  * @filtered_pids: The list of pids to check
610  * @filtered_no_pids: The list of pids not to be traced
611  * @task: The task that should be ignored if not filtered
612  *
613  * Checks if @task should be traced or not from @filtered_pids.
614  * Returns true if @task should *NOT* be traced.
615  * Returns false if @task should be traced.
616  */
617 bool
618 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
619 		       struct trace_pid_list *filtered_no_pids,
620 		       struct task_struct *task)
621 {
622 	/*
623 	 * If filtered_no_pids is not empty, and the task's pid is listed
624 	 * in filtered_no_pids, then return true.
625 	 * Otherwise, if filtered_pids is empty, that means we can
626 	 * trace all tasks. If it has content, then only trace pids
627 	 * within filtered_pids.
628 	 */
629 
630 	return (filtered_pids &&
631 		!trace_find_filtered_pid(filtered_pids, task->pid)) ||
632 		(filtered_no_pids &&
633 		 trace_find_filtered_pid(filtered_no_pids, task->pid));
634 }
635 
636 /**
637  * trace_filter_add_remove_task - Add or remove a task from a pid_list
638  * @pid_list: The list to modify
639  * @self: The current task for fork or NULL for exit
640  * @task: The task to add or remove
641  *
642  * If adding a task, if @self is defined, the task is only added if @self
643  * is also included in @pid_list. This happens on fork and tasks should
644  * only be added when the parent is listed. If @self is NULL, then the
645  * @task pid will be removed from the list, which would happen on exit
646  * of a task.
647  */
648 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
649 				  struct task_struct *self,
650 				  struct task_struct *task)
651 {
652 	if (!pid_list)
653 		return;
654 
655 	/* For forks, we only add if the forking task is listed */
656 	if (self) {
657 		if (!trace_find_filtered_pid(pid_list, self->pid))
658 			return;
659 	}
660 
661 	/* "self" is set for forks, and NULL for exits */
662 	if (self)
663 		trace_pid_list_set(pid_list, task->pid);
664 	else
665 		trace_pid_list_clear(pid_list, task->pid);
666 }
667 
668 /**
669  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
670  * @pid_list: The pid list to show
671  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
672  * @pos: The position of the file
673  *
674  * This is used by the seq_file "next" operation to iterate the pids
675  * listed in a trace_pid_list structure.
676  *
677  * Returns the pid+1 as we want to display pid of zero, but NULL would
678  * stop the iteration.
679  */
680 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
681 {
682 	long pid = (unsigned long)v;
683 	unsigned int next;
684 
685 	(*pos)++;
686 
687 	/* pid already is +1 of the actual previous bit */
688 	if (trace_pid_list_next(pid_list, pid, &next) < 0)
689 		return NULL;
690 
691 	pid = next;
692 
693 	/* Return pid + 1 to allow zero to be represented */
694 	return (void *)(pid + 1);
695 }
696 
697 /**
698  * trace_pid_start - Used for seq_file to start reading pid lists
699  * @pid_list: The pid list to show
700  * @pos: The position of the file
701  *
702  * This is used by seq_file "start" operation to start the iteration
703  * of listing pids.
704  *
705  * Returns the pid+1 as we want to display pid of zero, but NULL would
706  * stop the iteration.
707  */
708 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
709 {
710 	unsigned long pid;
711 	unsigned int first;
712 	loff_t l = 0;
713 
714 	if (trace_pid_list_first(pid_list, &first) < 0)
715 		return NULL;
716 
717 	pid = first;
718 
719 	/* Return pid + 1 so that zero can be the exit value */
720 	for (pid++; pid && l < *pos;
721 	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
722 		;
723 	return (void *)pid;
724 }
725 
726 /**
727  * trace_pid_show - show the current pid in seq_file processing
728  * @m: The seq_file structure to write into
729  * @v: A void pointer of the pid (+1) value to display
730  *
731  * Can be directly used by seq_file operations to display the current
732  * pid value.
733  */
734 int trace_pid_show(struct seq_file *m, void *v)
735 {
736 	unsigned long pid = (unsigned long)v - 1;
737 
738 	seq_printf(m, "%lu\n", pid);
739 	return 0;
740 }
741 
742 /* 128 should be much more than enough */
743 #define PID_BUF_SIZE		127
744 
745 int trace_pid_write(struct trace_pid_list *filtered_pids,
746 		    struct trace_pid_list **new_pid_list,
747 		    const char __user *ubuf, size_t cnt)
748 {
749 	struct trace_pid_list *pid_list;
750 	struct trace_parser parser;
751 	unsigned long val;
752 	int nr_pids = 0;
753 	ssize_t read = 0;
754 	ssize_t ret;
755 	loff_t pos;
756 	pid_t pid;
757 
758 	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
759 		return -ENOMEM;
760 
761 	/*
762 	 * Always recreate a new array. The write is an all or nothing
763 	 * operation. Always create a new array when adding new pids by
764 	 * the user. If the operation fails, then the current list is
765 	 * not modified.
766 	 */
767 	pid_list = trace_pid_list_alloc();
768 	if (!pid_list) {
769 		trace_parser_put(&parser);
770 		return -ENOMEM;
771 	}
772 
773 	if (filtered_pids) {
774 		/* copy the current bits to the new max */
775 		ret = trace_pid_list_first(filtered_pids, &pid);
776 		while (!ret) {
777 			trace_pid_list_set(pid_list, pid);
778 			ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
779 			nr_pids++;
780 		}
781 	}
782 
783 	ret = 0;
784 	while (cnt > 0) {
785 
786 		pos = 0;
787 
788 		ret = trace_get_user(&parser, ubuf, cnt, &pos);
789 		if (ret < 0)
790 			break;
791 
792 		read += ret;
793 		ubuf += ret;
794 		cnt -= ret;
795 
796 		if (!trace_parser_loaded(&parser))
797 			break;
798 
799 		ret = -EINVAL;
800 		if (kstrtoul(parser.buffer, 0, &val))
801 			break;
802 
803 		pid = (pid_t)val;
804 
805 		if (trace_pid_list_set(pid_list, pid) < 0) {
806 			ret = -1;
807 			break;
808 		}
809 		nr_pids++;
810 
811 		trace_parser_clear(&parser);
812 		ret = 0;
813 	}
814 	trace_parser_put(&parser);
815 
816 	if (ret < 0) {
817 		trace_pid_list_free(pid_list);
818 		return ret;
819 	}
820 
821 	if (!nr_pids) {
822 		/* Cleared the list of pids */
823 		trace_pid_list_free(pid_list);
824 		pid_list = NULL;
825 	}
826 
827 	*new_pid_list = pid_list;
828 
829 	return read;
830 }
831 
832 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
833 {
834 	u64 ts;
835 
836 	/* Early boot up does not have a buffer yet */
837 	if (!buf->buffer)
838 		return trace_clock_local();
839 
840 	ts = ring_buffer_time_stamp(buf->buffer);
841 	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
842 
843 	return ts;
844 }
845 
846 u64 ftrace_now(int cpu)
847 {
848 	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
849 }
850 
851 /**
852  * tracing_is_enabled - Show if global_trace has been enabled
853  *
854  * Shows if the global trace has been enabled or not. It uses the
855  * mirror flag "buffer_disabled" to be used in fast paths such as for
856  * the irqsoff tracer. But it may be inaccurate due to races. If you
857  * need to know the accurate state, use tracing_is_on() which is a little
858  * slower, but accurate.
859  */
860 int tracing_is_enabled(void)
861 {
862 	/*
863 	 * For quick access (irqsoff uses this in fast path), just
864 	 * return the mirror variable of the state of the ring buffer.
865 	 * It's a little racy, but we don't really care.
866 	 */
867 	smp_rmb();
868 	return !global_trace.buffer_disabled;
869 }
870 
871 /*
872  * trace_buf_size is the size in bytes that is allocated
873  * for a buffer. Note, the number of bytes is always rounded
874  * to page size.
875  *
876  * This number is purposely set to a low number of 16384.
877  * If the dump on oops happens, it will be much appreciated
878  * to not have to wait for all that output. Anyway this can be
879  * boot time and run time configurable.
880  */
881 #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
882 
883 static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
884 
885 /* trace_types holds a link list of available tracers. */
886 static struct tracer		*trace_types __read_mostly;
887 
888 /*
889  * trace_types_lock is used to protect the trace_types list.
890  */
891 DEFINE_MUTEX(trace_types_lock);
892 
893 /*
894  * serialize the access of the ring buffer
895  *
896  * ring buffer serializes readers, but it is low level protection.
897  * The validity of the events (which returns by ring_buffer_peek() ..etc)
898  * are not protected by ring buffer.
899  *
900  * The content of events may become garbage if we allow other process consumes
901  * these events concurrently:
902  *   A) the page of the consumed events may become a normal page
903  *      (not reader page) in ring buffer, and this page will be rewritten
904  *      by events producer.
905  *   B) The page of the consumed events may become a page for splice_read,
906  *      and this page will be returned to system.
907  *
908  * These primitives allow multi process access to different cpu ring buffer
909  * concurrently.
910  *
911  * These primitives don't distinguish read-only and read-consume access.
912  * Multi read-only access are also serialized.
913  */
914 
915 #ifdef CONFIG_SMP
916 static DECLARE_RWSEM(all_cpu_access_lock);
917 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
918 
919 static inline void trace_access_lock(int cpu)
920 {
921 	if (cpu == RING_BUFFER_ALL_CPUS) {
922 		/* gain it for accessing the whole ring buffer. */
923 		down_write(&all_cpu_access_lock);
924 	} else {
925 		/* gain it for accessing a cpu ring buffer. */
926 
927 		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
928 		down_read(&all_cpu_access_lock);
929 
930 		/* Secondly block other access to this @cpu ring buffer. */
931 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
932 	}
933 }
934 
935 static inline void trace_access_unlock(int cpu)
936 {
937 	if (cpu == RING_BUFFER_ALL_CPUS) {
938 		up_write(&all_cpu_access_lock);
939 	} else {
940 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
941 		up_read(&all_cpu_access_lock);
942 	}
943 }
944 
945 static inline void trace_access_lock_init(void)
946 {
947 	int cpu;
948 
949 	for_each_possible_cpu(cpu)
950 		mutex_init(&per_cpu(cpu_access_lock, cpu));
951 }
952 
953 #else
954 
955 static DEFINE_MUTEX(access_lock);
956 
957 static inline void trace_access_lock(int cpu)
958 {
959 	(void)cpu;
960 	mutex_lock(&access_lock);
961 }
962 
963 static inline void trace_access_unlock(int cpu)
964 {
965 	(void)cpu;
966 	mutex_unlock(&access_lock);
967 }
968 
969 static inline void trace_access_lock_init(void)
970 {
971 }
972 
973 #endif
974 
975 #ifdef CONFIG_STACKTRACE
976 static void __ftrace_trace_stack(struct trace_array *tr,
977 				 struct trace_buffer *buffer,
978 				 unsigned int trace_ctx,
979 				 int skip, struct pt_regs *regs);
980 static inline void ftrace_trace_stack(struct trace_array *tr,
981 				      struct trace_buffer *buffer,
982 				      unsigned int trace_ctx,
983 				      int skip, struct pt_regs *regs);
984 
985 #else
986 static inline void __ftrace_trace_stack(struct trace_array *tr,
987 					struct trace_buffer *buffer,
988 					unsigned int trace_ctx,
989 					int skip, struct pt_regs *regs)
990 {
991 }
992 static inline void ftrace_trace_stack(struct trace_array *tr,
993 				      struct trace_buffer *buffer,
994 				      unsigned long trace_ctx,
995 				      int skip, struct pt_regs *regs)
996 {
997 }
998 
999 #endif
1000 
1001 static __always_inline void
1002 trace_event_setup(struct ring_buffer_event *event,
1003 		  int type, unsigned int trace_ctx)
1004 {
1005 	struct trace_entry *ent = ring_buffer_event_data(event);
1006 
1007 	tracing_generic_entry_update(ent, type, trace_ctx);
1008 }
1009 
1010 static __always_inline struct ring_buffer_event *
1011 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
1012 			  int type,
1013 			  unsigned long len,
1014 			  unsigned int trace_ctx)
1015 {
1016 	struct ring_buffer_event *event;
1017 
1018 	event = ring_buffer_lock_reserve(buffer, len);
1019 	if (event != NULL)
1020 		trace_event_setup(event, type, trace_ctx);
1021 
1022 	return event;
1023 }
1024 
1025 void tracer_tracing_on(struct trace_array *tr)
1026 {
1027 	if (tr->array_buffer.buffer)
1028 		ring_buffer_record_on(tr->array_buffer.buffer);
1029 	/*
1030 	 * This flag is looked at when buffers haven't been allocated
1031 	 * yet, or by some tracers (like irqsoff), that just want to
1032 	 * know if the ring buffer has been disabled, but it can handle
1033 	 * races of where it gets disabled but we still do a record.
1034 	 * As the check is in the fast path of the tracers, it is more
1035 	 * important to be fast than accurate.
1036 	 */
1037 	tr->buffer_disabled = 0;
1038 	/* Make the flag seen by readers */
1039 	smp_wmb();
1040 }
1041 
1042 /**
1043  * tracing_on - enable tracing buffers
1044  *
1045  * This function enables tracing buffers that may have been
1046  * disabled with tracing_off.
1047  */
1048 void tracing_on(void)
1049 {
1050 	tracer_tracing_on(&global_trace);
1051 }
1052 EXPORT_SYMBOL_GPL(tracing_on);
1053 
1054 
1055 static __always_inline void
1056 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1057 {
1058 	__this_cpu_write(trace_taskinfo_save, true);
1059 
1060 	/* If this is the temp buffer, we need to commit fully */
1061 	if (this_cpu_read(trace_buffered_event) == event) {
1062 		/* Length is in event->array[0] */
1063 		ring_buffer_write(buffer, event->array[0], &event->array[1]);
1064 		/* Release the temp buffer */
1065 		this_cpu_dec(trace_buffered_event_cnt);
1066 		/* ring_buffer_unlock_commit() enables preemption */
1067 		preempt_enable_notrace();
1068 	} else
1069 		ring_buffer_unlock_commit(buffer);
1070 }
1071 
1072 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1073 		       const char *str, int size)
1074 {
1075 	struct ring_buffer_event *event;
1076 	struct trace_buffer *buffer;
1077 	struct print_entry *entry;
1078 	unsigned int trace_ctx;
1079 	int alloc;
1080 
1081 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1082 		return 0;
1083 
1084 	if (unlikely(tracing_selftest_running && tr == &global_trace))
1085 		return 0;
1086 
1087 	if (unlikely(tracing_disabled))
1088 		return 0;
1089 
1090 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
1091 
1092 	trace_ctx = tracing_gen_ctx();
1093 	buffer = tr->array_buffer.buffer;
1094 	ring_buffer_nest_start(buffer);
1095 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1096 					    trace_ctx);
1097 	if (!event) {
1098 		size = 0;
1099 		goto out;
1100 	}
1101 
1102 	entry = ring_buffer_event_data(event);
1103 	entry->ip = ip;
1104 
1105 	memcpy(&entry->buf, str, size);
1106 
1107 	/* Add a newline if necessary */
1108 	if (entry->buf[size - 1] != '\n') {
1109 		entry->buf[size] = '\n';
1110 		entry->buf[size + 1] = '\0';
1111 	} else
1112 		entry->buf[size] = '\0';
1113 
1114 	__buffer_unlock_commit(buffer, event);
1115 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1116  out:
1117 	ring_buffer_nest_end(buffer);
1118 	return size;
1119 }
1120 EXPORT_SYMBOL_GPL(__trace_array_puts);
1121 
1122 /**
1123  * __trace_puts - write a constant string into the trace buffer.
1124  * @ip:	   The address of the caller
1125  * @str:   The constant string to write
1126  * @size:  The size of the string.
1127  */
1128 int __trace_puts(unsigned long ip, const char *str, int size)
1129 {
1130 	return __trace_array_puts(printk_trace, ip, str, size);
1131 }
1132 EXPORT_SYMBOL_GPL(__trace_puts);
1133 
1134 /**
1135  * __trace_bputs - write the pointer to a constant string into trace buffer
1136  * @ip:	   The address of the caller
1137  * @str:   The constant string to write to the buffer to
1138  */
1139 int __trace_bputs(unsigned long ip, const char *str)
1140 {
1141 	struct trace_array *tr = READ_ONCE(printk_trace);
1142 	struct ring_buffer_event *event;
1143 	struct trace_buffer *buffer;
1144 	struct bputs_entry *entry;
1145 	unsigned int trace_ctx;
1146 	int size = sizeof(struct bputs_entry);
1147 	int ret = 0;
1148 
1149 	if (!printk_binsafe(tr))
1150 		return __trace_puts(ip, str, strlen(str));
1151 
1152 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1153 		return 0;
1154 
1155 	if (unlikely(tracing_selftest_running || tracing_disabled))
1156 		return 0;
1157 
1158 	trace_ctx = tracing_gen_ctx();
1159 	buffer = tr->array_buffer.buffer;
1160 
1161 	ring_buffer_nest_start(buffer);
1162 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1163 					    trace_ctx);
1164 	if (!event)
1165 		goto out;
1166 
1167 	entry = ring_buffer_event_data(event);
1168 	entry->ip			= ip;
1169 	entry->str			= str;
1170 
1171 	__buffer_unlock_commit(buffer, event);
1172 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1173 
1174 	ret = 1;
1175  out:
1176 	ring_buffer_nest_end(buffer);
1177 	return ret;
1178 }
1179 EXPORT_SYMBOL_GPL(__trace_bputs);
1180 
1181 #ifdef CONFIG_TRACER_SNAPSHOT
1182 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1183 					   void *cond_data)
1184 {
1185 	struct tracer *tracer = tr->current_trace;
1186 	unsigned long flags;
1187 
1188 	if (in_nmi()) {
1189 		trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1190 		trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
1191 		return;
1192 	}
1193 
1194 	if (!tr->allocated_snapshot) {
1195 		trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1196 		trace_array_puts(tr, "*** stopping trace here!   ***\n");
1197 		tracer_tracing_off(tr);
1198 		return;
1199 	}
1200 
1201 	/* Note, snapshot can not be used when the tracer uses it */
1202 	if (tracer->use_max_tr) {
1203 		trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1204 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1205 		return;
1206 	}
1207 
1208 	if (tr->mapped) {
1209 		trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
1210 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1211 		return;
1212 	}
1213 
1214 	local_irq_save(flags);
1215 	update_max_tr(tr, current, smp_processor_id(), cond_data);
1216 	local_irq_restore(flags);
1217 }
1218 
1219 void tracing_snapshot_instance(struct trace_array *tr)
1220 {
1221 	tracing_snapshot_instance_cond(tr, NULL);
1222 }
1223 
1224 /**
1225  * tracing_snapshot - take a snapshot of the current buffer.
1226  *
1227  * This causes a swap between the snapshot buffer and the current live
1228  * tracing buffer. You can use this to take snapshots of the live
1229  * trace when some condition is triggered, but continue to trace.
1230  *
1231  * Note, make sure to allocate the snapshot with either
1232  * a tracing_snapshot_alloc(), or by doing it manually
1233  * with: echo 1 > /sys/kernel/tracing/snapshot
1234  *
1235  * If the snapshot buffer is not allocated, it will stop tracing.
1236  * Basically making a permanent snapshot.
1237  */
1238 void tracing_snapshot(void)
1239 {
1240 	struct trace_array *tr = &global_trace;
1241 
1242 	tracing_snapshot_instance(tr);
1243 }
1244 EXPORT_SYMBOL_GPL(tracing_snapshot);
1245 
1246 /**
1247  * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1248  * @tr:		The tracing instance to snapshot
1249  * @cond_data:	The data to be tested conditionally, and possibly saved
1250  *
1251  * This is the same as tracing_snapshot() except that the snapshot is
1252  * conditional - the snapshot will only happen if the
1253  * cond_snapshot.update() implementation receiving the cond_data
1254  * returns true, which means that the trace array's cond_snapshot
1255  * update() operation used the cond_data to determine whether the
1256  * snapshot should be taken, and if it was, presumably saved it along
1257  * with the snapshot.
1258  */
1259 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1260 {
1261 	tracing_snapshot_instance_cond(tr, cond_data);
1262 }
1263 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1264 
1265 /**
1266  * tracing_cond_snapshot_data - get the user data associated with a snapshot
1267  * @tr:		The tracing instance
1268  *
1269  * When the user enables a conditional snapshot using
1270  * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1271  * with the snapshot.  This accessor is used to retrieve it.
1272  *
1273  * Should not be called from cond_snapshot.update(), since it takes
1274  * the tr->max_lock lock, which the code calling
1275  * cond_snapshot.update() has already done.
1276  *
1277  * Returns the cond_data associated with the trace array's snapshot.
1278  */
1279 void *tracing_cond_snapshot_data(struct trace_array *tr)
1280 {
1281 	void *cond_data = NULL;
1282 
1283 	local_irq_disable();
1284 	arch_spin_lock(&tr->max_lock);
1285 
1286 	if (tr->cond_snapshot)
1287 		cond_data = tr->cond_snapshot->cond_data;
1288 
1289 	arch_spin_unlock(&tr->max_lock);
1290 	local_irq_enable();
1291 
1292 	return cond_data;
1293 }
1294 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1295 
1296 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1297 					struct array_buffer *size_buf, int cpu_id);
1298 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1299 
1300 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1301 {
1302 	int order;
1303 	int ret;
1304 
1305 	if (!tr->allocated_snapshot) {
1306 
1307 		/* Make the snapshot buffer have the same order as main buffer */
1308 		order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1309 		ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1310 		if (ret < 0)
1311 			return ret;
1312 
1313 		/* allocate spare buffer */
1314 		ret = resize_buffer_duplicate_size(&tr->max_buffer,
1315 				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1316 		if (ret < 0)
1317 			return ret;
1318 
1319 		tr->allocated_snapshot = true;
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 static void free_snapshot(struct trace_array *tr)
1326 {
1327 	/*
1328 	 * We don't free the ring buffer. instead, resize it because
1329 	 * The max_tr ring buffer has some state (e.g. ring->clock) and
1330 	 * we want preserve it.
1331 	 */
1332 	ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1333 	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1334 	set_buffer_entries(&tr->max_buffer, 1);
1335 	tracing_reset_online_cpus(&tr->max_buffer);
1336 	tr->allocated_snapshot = false;
1337 }
1338 
1339 static int tracing_arm_snapshot_locked(struct trace_array *tr)
1340 {
1341 	int ret;
1342 
1343 	lockdep_assert_held(&trace_types_lock);
1344 
1345 	spin_lock(&tr->snapshot_trigger_lock);
1346 	if (tr->snapshot == UINT_MAX || tr->mapped) {
1347 		spin_unlock(&tr->snapshot_trigger_lock);
1348 		return -EBUSY;
1349 	}
1350 
1351 	tr->snapshot++;
1352 	spin_unlock(&tr->snapshot_trigger_lock);
1353 
1354 	ret = tracing_alloc_snapshot_instance(tr);
1355 	if (ret) {
1356 		spin_lock(&tr->snapshot_trigger_lock);
1357 		tr->snapshot--;
1358 		spin_unlock(&tr->snapshot_trigger_lock);
1359 	}
1360 
1361 	return ret;
1362 }
1363 
1364 int tracing_arm_snapshot(struct trace_array *tr)
1365 {
1366 	int ret;
1367 
1368 	mutex_lock(&trace_types_lock);
1369 	ret = tracing_arm_snapshot_locked(tr);
1370 	mutex_unlock(&trace_types_lock);
1371 
1372 	return ret;
1373 }
1374 
1375 void tracing_disarm_snapshot(struct trace_array *tr)
1376 {
1377 	spin_lock(&tr->snapshot_trigger_lock);
1378 	if (!WARN_ON(!tr->snapshot))
1379 		tr->snapshot--;
1380 	spin_unlock(&tr->snapshot_trigger_lock);
1381 }
1382 
1383 /**
1384  * tracing_alloc_snapshot - allocate snapshot buffer.
1385  *
1386  * This only allocates the snapshot buffer if it isn't already
1387  * allocated - it doesn't also take a snapshot.
1388  *
1389  * This is meant to be used in cases where the snapshot buffer needs
1390  * to be set up for events that can't sleep but need to be able to
1391  * trigger a snapshot.
1392  */
1393 int tracing_alloc_snapshot(void)
1394 {
1395 	struct trace_array *tr = &global_trace;
1396 	int ret;
1397 
1398 	ret = tracing_alloc_snapshot_instance(tr);
1399 	WARN_ON(ret < 0);
1400 
1401 	return ret;
1402 }
1403 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1404 
1405 /**
1406  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1407  *
1408  * This is similar to tracing_snapshot(), but it will allocate the
1409  * snapshot buffer if it isn't already allocated. Use this only
1410  * where it is safe to sleep, as the allocation may sleep.
1411  *
1412  * This causes a swap between the snapshot buffer and the current live
1413  * tracing buffer. You can use this to take snapshots of the live
1414  * trace when some condition is triggered, but continue to trace.
1415  */
1416 void tracing_snapshot_alloc(void)
1417 {
1418 	int ret;
1419 
1420 	ret = tracing_alloc_snapshot();
1421 	if (ret < 0)
1422 		return;
1423 
1424 	tracing_snapshot();
1425 }
1426 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1427 
1428 /**
1429  * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1430  * @tr:		The tracing instance
1431  * @cond_data:	User data to associate with the snapshot
1432  * @update:	Implementation of the cond_snapshot update function
1433  *
1434  * Check whether the conditional snapshot for the given instance has
1435  * already been enabled, or if the current tracer is already using a
1436  * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1437  * save the cond_data and update function inside.
1438  *
1439  * Returns 0 if successful, error otherwise.
1440  */
1441 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1442 				 cond_update_fn_t update)
1443 {
1444 	struct cond_snapshot *cond_snapshot __free(kfree) =
1445 		kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1446 	int ret;
1447 
1448 	if (!cond_snapshot)
1449 		return -ENOMEM;
1450 
1451 	cond_snapshot->cond_data = cond_data;
1452 	cond_snapshot->update = update;
1453 
1454 	guard(mutex)(&trace_types_lock);
1455 
1456 	if (tr->current_trace->use_max_tr)
1457 		return -EBUSY;
1458 
1459 	/*
1460 	 * The cond_snapshot can only change to NULL without the
1461 	 * trace_types_lock. We don't care if we race with it going
1462 	 * to NULL, but we want to make sure that it's not set to
1463 	 * something other than NULL when we get here, which we can
1464 	 * do safely with only holding the trace_types_lock and not
1465 	 * having to take the max_lock.
1466 	 */
1467 	if (tr->cond_snapshot)
1468 		return -EBUSY;
1469 
1470 	ret = tracing_arm_snapshot_locked(tr);
1471 	if (ret)
1472 		return ret;
1473 
1474 	local_irq_disable();
1475 	arch_spin_lock(&tr->max_lock);
1476 	tr->cond_snapshot = no_free_ptr(cond_snapshot);
1477 	arch_spin_unlock(&tr->max_lock);
1478 	local_irq_enable();
1479 
1480 	return 0;
1481 }
1482 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1483 
1484 /**
1485  * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1486  * @tr:		The tracing instance
1487  *
1488  * Check whether the conditional snapshot for the given instance is
1489  * enabled; if so, free the cond_snapshot associated with it,
1490  * otherwise return -EINVAL.
1491  *
1492  * Returns 0 if successful, error otherwise.
1493  */
1494 int tracing_snapshot_cond_disable(struct trace_array *tr)
1495 {
1496 	int ret = 0;
1497 
1498 	local_irq_disable();
1499 	arch_spin_lock(&tr->max_lock);
1500 
1501 	if (!tr->cond_snapshot)
1502 		ret = -EINVAL;
1503 	else {
1504 		kfree(tr->cond_snapshot);
1505 		tr->cond_snapshot = NULL;
1506 	}
1507 
1508 	arch_spin_unlock(&tr->max_lock);
1509 	local_irq_enable();
1510 
1511 	tracing_disarm_snapshot(tr);
1512 
1513 	return ret;
1514 }
1515 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1516 #else
1517 void tracing_snapshot(void)
1518 {
1519 	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1520 }
1521 EXPORT_SYMBOL_GPL(tracing_snapshot);
1522 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1523 {
1524 	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1525 }
1526 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1527 int tracing_alloc_snapshot(void)
1528 {
1529 	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1530 	return -ENODEV;
1531 }
1532 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1533 void tracing_snapshot_alloc(void)
1534 {
1535 	/* Give warning */
1536 	tracing_snapshot();
1537 }
1538 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1539 void *tracing_cond_snapshot_data(struct trace_array *tr)
1540 {
1541 	return NULL;
1542 }
1543 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1544 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1545 {
1546 	return -ENODEV;
1547 }
1548 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1549 int tracing_snapshot_cond_disable(struct trace_array *tr)
1550 {
1551 	return false;
1552 }
1553 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1554 #define free_snapshot(tr)	do { } while (0)
1555 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
1556 #endif /* CONFIG_TRACER_SNAPSHOT */
1557 
1558 void tracer_tracing_off(struct trace_array *tr)
1559 {
1560 	if (tr->array_buffer.buffer)
1561 		ring_buffer_record_off(tr->array_buffer.buffer);
1562 	/*
1563 	 * This flag is looked at when buffers haven't been allocated
1564 	 * yet, or by some tracers (like irqsoff), that just want to
1565 	 * know if the ring buffer has been disabled, but it can handle
1566 	 * races of where it gets disabled but we still do a record.
1567 	 * As the check is in the fast path of the tracers, it is more
1568 	 * important to be fast than accurate.
1569 	 */
1570 	tr->buffer_disabled = 1;
1571 	/* Make the flag seen by readers */
1572 	smp_wmb();
1573 }
1574 
1575 /**
1576  * tracing_off - turn off tracing buffers
1577  *
1578  * This function stops the tracing buffers from recording data.
1579  * It does not disable any overhead the tracers themselves may
1580  * be causing. This function simply causes all recording to
1581  * the ring buffers to fail.
1582  */
1583 void tracing_off(void)
1584 {
1585 	tracer_tracing_off(&global_trace);
1586 }
1587 EXPORT_SYMBOL_GPL(tracing_off);
1588 
1589 void disable_trace_on_warning(void)
1590 {
1591 	if (__disable_trace_on_warning) {
1592 		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1593 			"Disabling tracing due to warning\n");
1594 		tracing_off();
1595 	}
1596 }
1597 
1598 /**
1599  * tracer_tracing_is_on - show real state of ring buffer enabled
1600  * @tr : the trace array to know if ring buffer is enabled
1601  *
1602  * Shows real state of the ring buffer if it is enabled or not.
1603  */
1604 bool tracer_tracing_is_on(struct trace_array *tr)
1605 {
1606 	if (tr->array_buffer.buffer)
1607 		return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1608 	return !tr->buffer_disabled;
1609 }
1610 
1611 /**
1612  * tracing_is_on - show state of ring buffers enabled
1613  */
1614 int tracing_is_on(void)
1615 {
1616 	return tracer_tracing_is_on(&global_trace);
1617 }
1618 EXPORT_SYMBOL_GPL(tracing_is_on);
1619 
1620 static int __init set_buf_size(char *str)
1621 {
1622 	unsigned long buf_size;
1623 
1624 	if (!str)
1625 		return 0;
1626 	buf_size = memparse(str, &str);
1627 	/*
1628 	 * nr_entries can not be zero and the startup
1629 	 * tests require some buffer space. Therefore
1630 	 * ensure we have at least 4096 bytes of buffer.
1631 	 */
1632 	trace_buf_size = max(4096UL, buf_size);
1633 	return 1;
1634 }
1635 __setup("trace_buf_size=", set_buf_size);
1636 
1637 static int __init set_tracing_thresh(char *str)
1638 {
1639 	unsigned long threshold;
1640 	int ret;
1641 
1642 	if (!str)
1643 		return 0;
1644 	ret = kstrtoul(str, 0, &threshold);
1645 	if (ret < 0)
1646 		return 0;
1647 	tracing_thresh = threshold * 1000;
1648 	return 1;
1649 }
1650 __setup("tracing_thresh=", set_tracing_thresh);
1651 
1652 unsigned long nsecs_to_usecs(unsigned long nsecs)
1653 {
1654 	return nsecs / 1000;
1655 }
1656 
1657 /*
1658  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1659  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1660  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1661  * of strings in the order that the evals (enum) were defined.
1662  */
1663 #undef C
1664 #define C(a, b) b
1665 
1666 /* These must match the bit positions in trace_iterator_flags */
1667 static const char *trace_options[] = {
1668 	TRACE_FLAGS
1669 	NULL
1670 };
1671 
1672 static struct {
1673 	u64 (*func)(void);
1674 	const char *name;
1675 	int in_ns;		/* is this clock in nanoseconds? */
1676 } trace_clocks[] = {
1677 	{ trace_clock_local,		"local",	1 },
1678 	{ trace_clock_global,		"global",	1 },
1679 	{ trace_clock_counter,		"counter",	0 },
1680 	{ trace_clock_jiffies,		"uptime",	0 },
1681 	{ trace_clock,			"perf",		1 },
1682 	{ ktime_get_mono_fast_ns,	"mono",		1 },
1683 	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
1684 	{ ktime_get_boot_fast_ns,	"boot",		1 },
1685 	{ ktime_get_tai_fast_ns,	"tai",		1 },
1686 	ARCH_TRACE_CLOCKS
1687 };
1688 
1689 bool trace_clock_in_ns(struct trace_array *tr)
1690 {
1691 	if (trace_clocks[tr->clock_id].in_ns)
1692 		return true;
1693 
1694 	return false;
1695 }
1696 
1697 /*
1698  * trace_parser_get_init - gets the buffer for trace parser
1699  */
1700 int trace_parser_get_init(struct trace_parser *parser, int size)
1701 {
1702 	memset(parser, 0, sizeof(*parser));
1703 
1704 	parser->buffer = kmalloc(size, GFP_KERNEL);
1705 	if (!parser->buffer)
1706 		return 1;
1707 
1708 	parser->size = size;
1709 	return 0;
1710 }
1711 
1712 /*
1713  * trace_parser_put - frees the buffer for trace parser
1714  */
1715 void trace_parser_put(struct trace_parser *parser)
1716 {
1717 	kfree(parser->buffer);
1718 	parser->buffer = NULL;
1719 }
1720 
1721 /*
1722  * trace_get_user - reads the user input string separated by  space
1723  * (matched by isspace(ch))
1724  *
1725  * For each string found the 'struct trace_parser' is updated,
1726  * and the function returns.
1727  *
1728  * Returns number of bytes read.
1729  *
1730  * See kernel/trace/trace.h for 'struct trace_parser' details.
1731  */
1732 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1733 	size_t cnt, loff_t *ppos)
1734 {
1735 	char ch;
1736 	size_t read = 0;
1737 	ssize_t ret;
1738 
1739 	if (!*ppos)
1740 		trace_parser_clear(parser);
1741 
1742 	ret = get_user(ch, ubuf++);
1743 	if (ret)
1744 		goto out;
1745 
1746 	read++;
1747 	cnt--;
1748 
1749 	/*
1750 	 * The parser is not finished with the last write,
1751 	 * continue reading the user input without skipping spaces.
1752 	 */
1753 	if (!parser->cont) {
1754 		/* skip white space */
1755 		while (cnt && isspace(ch)) {
1756 			ret = get_user(ch, ubuf++);
1757 			if (ret)
1758 				goto out;
1759 			read++;
1760 			cnt--;
1761 		}
1762 
1763 		parser->idx = 0;
1764 
1765 		/* only spaces were written */
1766 		if (isspace(ch) || !ch) {
1767 			*ppos += read;
1768 			ret = read;
1769 			goto out;
1770 		}
1771 	}
1772 
1773 	/* read the non-space input */
1774 	while (cnt && !isspace(ch) && ch) {
1775 		if (parser->idx < parser->size - 1)
1776 			parser->buffer[parser->idx++] = ch;
1777 		else {
1778 			ret = -EINVAL;
1779 			goto out;
1780 		}
1781 		ret = get_user(ch, ubuf++);
1782 		if (ret)
1783 			goto out;
1784 		read++;
1785 		cnt--;
1786 	}
1787 
1788 	/* We either got finished input or we have to wait for another call. */
1789 	if (isspace(ch) || !ch) {
1790 		parser->buffer[parser->idx] = 0;
1791 		parser->cont = false;
1792 	} else if (parser->idx < parser->size - 1) {
1793 		parser->cont = true;
1794 		parser->buffer[parser->idx++] = ch;
1795 		/* Make sure the parsed string always terminates with '\0'. */
1796 		parser->buffer[parser->idx] = 0;
1797 	} else {
1798 		ret = -EINVAL;
1799 		goto out;
1800 	}
1801 
1802 	*ppos += read;
1803 	ret = read;
1804 
1805 out:
1806 	return ret;
1807 }
1808 
1809 /* TODO add a seq_buf_to_buffer() */
1810 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1811 {
1812 	int len;
1813 
1814 	if (trace_seq_used(s) <= s->readpos)
1815 		return -EBUSY;
1816 
1817 	len = trace_seq_used(s) - s->readpos;
1818 	if (cnt > len)
1819 		cnt = len;
1820 	memcpy(buf, s->buffer + s->readpos, cnt);
1821 
1822 	s->readpos += cnt;
1823 	return cnt;
1824 }
1825 
1826 unsigned long __read_mostly	tracing_thresh;
1827 
1828 #ifdef CONFIG_TRACER_MAX_TRACE
1829 static const struct file_operations tracing_max_lat_fops;
1830 
1831 #ifdef LATENCY_FS_NOTIFY
1832 
1833 static struct workqueue_struct *fsnotify_wq;
1834 
1835 static void latency_fsnotify_workfn(struct work_struct *work)
1836 {
1837 	struct trace_array *tr = container_of(work, struct trace_array,
1838 					      fsnotify_work);
1839 	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1840 }
1841 
1842 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1843 {
1844 	struct trace_array *tr = container_of(iwork, struct trace_array,
1845 					      fsnotify_irqwork);
1846 	queue_work(fsnotify_wq, &tr->fsnotify_work);
1847 }
1848 
1849 static void trace_create_maxlat_file(struct trace_array *tr,
1850 				     struct dentry *d_tracer)
1851 {
1852 	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1853 	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1854 	tr->d_max_latency = trace_create_file("tracing_max_latency",
1855 					      TRACE_MODE_WRITE,
1856 					      d_tracer, tr,
1857 					      &tracing_max_lat_fops);
1858 }
1859 
1860 __init static int latency_fsnotify_init(void)
1861 {
1862 	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1863 				      WQ_UNBOUND | WQ_HIGHPRI, 0);
1864 	if (!fsnotify_wq) {
1865 		pr_err("Unable to allocate tr_max_lat_wq\n");
1866 		return -ENOMEM;
1867 	}
1868 	return 0;
1869 }
1870 
1871 late_initcall_sync(latency_fsnotify_init);
1872 
1873 void latency_fsnotify(struct trace_array *tr)
1874 {
1875 	if (!fsnotify_wq)
1876 		return;
1877 	/*
1878 	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1879 	 * possible that we are called from __schedule() or do_idle(), which
1880 	 * could cause a deadlock.
1881 	 */
1882 	irq_work_queue(&tr->fsnotify_irqwork);
1883 }
1884 
1885 #else /* !LATENCY_FS_NOTIFY */
1886 
1887 #define trace_create_maxlat_file(tr, d_tracer)				\
1888 	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
1889 			  d_tracer, tr, &tracing_max_lat_fops)
1890 
1891 #endif
1892 
1893 /*
1894  * Copy the new maximum trace into the separate maximum-trace
1895  * structure. (this way the maximum trace is permanently saved,
1896  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1897  */
1898 static void
1899 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1900 {
1901 	struct array_buffer *trace_buf = &tr->array_buffer;
1902 	struct array_buffer *max_buf = &tr->max_buffer;
1903 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1904 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1905 
1906 	max_buf->cpu = cpu;
1907 	max_buf->time_start = data->preempt_timestamp;
1908 
1909 	max_data->saved_latency = tr->max_latency;
1910 	max_data->critical_start = data->critical_start;
1911 	max_data->critical_end = data->critical_end;
1912 
1913 	strscpy(max_data->comm, tsk->comm);
1914 	max_data->pid = tsk->pid;
1915 	/*
1916 	 * If tsk == current, then use current_uid(), as that does not use
1917 	 * RCU. The irq tracer can be called out of RCU scope.
1918 	 */
1919 	if (tsk == current)
1920 		max_data->uid = current_uid();
1921 	else
1922 		max_data->uid = task_uid(tsk);
1923 
1924 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1925 	max_data->policy = tsk->policy;
1926 	max_data->rt_priority = tsk->rt_priority;
1927 
1928 	/* record this tasks comm */
1929 	tracing_record_cmdline(tsk);
1930 	latency_fsnotify(tr);
1931 }
1932 
1933 /**
1934  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1935  * @tr: tracer
1936  * @tsk: the task with the latency
1937  * @cpu: The cpu that initiated the trace.
1938  * @cond_data: User data associated with a conditional snapshot
1939  *
1940  * Flip the buffers between the @tr and the max_tr and record information
1941  * about which task was the cause of this latency.
1942  */
1943 void
1944 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1945 	      void *cond_data)
1946 {
1947 	if (tr->stop_count)
1948 		return;
1949 
1950 	WARN_ON_ONCE(!irqs_disabled());
1951 
1952 	if (!tr->allocated_snapshot) {
1953 		/* Only the nop tracer should hit this when disabling */
1954 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1955 		return;
1956 	}
1957 
1958 	arch_spin_lock(&tr->max_lock);
1959 
1960 	/* Inherit the recordable setting from array_buffer */
1961 	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1962 		ring_buffer_record_on(tr->max_buffer.buffer);
1963 	else
1964 		ring_buffer_record_off(tr->max_buffer.buffer);
1965 
1966 #ifdef CONFIG_TRACER_SNAPSHOT
1967 	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1968 		arch_spin_unlock(&tr->max_lock);
1969 		return;
1970 	}
1971 #endif
1972 	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1973 
1974 	__update_max_tr(tr, tsk, cpu);
1975 
1976 	arch_spin_unlock(&tr->max_lock);
1977 
1978 	/* Any waiters on the old snapshot buffer need to wake up */
1979 	ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1980 }
1981 
1982 /**
1983  * update_max_tr_single - only copy one trace over, and reset the rest
1984  * @tr: tracer
1985  * @tsk: task with the latency
1986  * @cpu: the cpu of the buffer to copy.
1987  *
1988  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1989  */
1990 void
1991 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1992 {
1993 	int ret;
1994 
1995 	if (tr->stop_count)
1996 		return;
1997 
1998 	WARN_ON_ONCE(!irqs_disabled());
1999 	if (!tr->allocated_snapshot) {
2000 		/* Only the nop tracer should hit this when disabling */
2001 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
2002 		return;
2003 	}
2004 
2005 	arch_spin_lock(&tr->max_lock);
2006 
2007 	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
2008 
2009 	if (ret == -EBUSY) {
2010 		/*
2011 		 * We failed to swap the buffer due to a commit taking
2012 		 * place on this CPU. We fail to record, but we reset
2013 		 * the max trace buffer (no one writes directly to it)
2014 		 * and flag that it failed.
2015 		 * Another reason is resize is in progress.
2016 		 */
2017 		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
2018 			"Failed to swap buffers due to commit or resize in progress\n");
2019 	}
2020 
2021 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
2022 
2023 	__update_max_tr(tr, tsk, cpu);
2024 	arch_spin_unlock(&tr->max_lock);
2025 }
2026 
2027 #endif /* CONFIG_TRACER_MAX_TRACE */
2028 
2029 struct pipe_wait {
2030 	struct trace_iterator		*iter;
2031 	int				wait_index;
2032 };
2033 
2034 static bool wait_pipe_cond(void *data)
2035 {
2036 	struct pipe_wait *pwait = data;
2037 	struct trace_iterator *iter = pwait->iter;
2038 
2039 	if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
2040 		return true;
2041 
2042 	return iter->closed;
2043 }
2044 
2045 static int wait_on_pipe(struct trace_iterator *iter, int full)
2046 {
2047 	struct pipe_wait pwait;
2048 	int ret;
2049 
2050 	/* Iterators are static, they should be filled or empty */
2051 	if (trace_buffer_iter(iter, iter->cpu_file))
2052 		return 0;
2053 
2054 	pwait.wait_index = atomic_read_acquire(&iter->wait_index);
2055 	pwait.iter = iter;
2056 
2057 	ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
2058 			       wait_pipe_cond, &pwait);
2059 
2060 #ifdef CONFIG_TRACER_MAX_TRACE
2061 	/*
2062 	 * Make sure this is still the snapshot buffer, as if a snapshot were
2063 	 * to happen, this would now be the main buffer.
2064 	 */
2065 	if (iter->snapshot)
2066 		iter->array_buffer = &iter->tr->max_buffer;
2067 #endif
2068 	return ret;
2069 }
2070 
2071 #ifdef CONFIG_FTRACE_STARTUP_TEST
2072 static bool selftests_can_run;
2073 
2074 struct trace_selftests {
2075 	struct list_head		list;
2076 	struct tracer			*type;
2077 };
2078 
2079 static LIST_HEAD(postponed_selftests);
2080 
2081 static int save_selftest(struct tracer *type)
2082 {
2083 	struct trace_selftests *selftest;
2084 
2085 	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
2086 	if (!selftest)
2087 		return -ENOMEM;
2088 
2089 	selftest->type = type;
2090 	list_add(&selftest->list, &postponed_selftests);
2091 	return 0;
2092 }
2093 
2094 static int run_tracer_selftest(struct tracer *type)
2095 {
2096 	struct trace_array *tr = &global_trace;
2097 	struct tracer *saved_tracer = tr->current_trace;
2098 	int ret;
2099 
2100 	if (!type->selftest || tracing_selftest_disabled)
2101 		return 0;
2102 
2103 	/*
2104 	 * If a tracer registers early in boot up (before scheduling is
2105 	 * initialized and such), then do not run its selftests yet.
2106 	 * Instead, run it a little later in the boot process.
2107 	 */
2108 	if (!selftests_can_run)
2109 		return save_selftest(type);
2110 
2111 	if (!tracing_is_on()) {
2112 		pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2113 			type->name);
2114 		return 0;
2115 	}
2116 
2117 	/*
2118 	 * Run a selftest on this tracer.
2119 	 * Here we reset the trace buffer, and set the current
2120 	 * tracer to be this tracer. The tracer can then run some
2121 	 * internal tracing to verify that everything is in order.
2122 	 * If we fail, we do not register this tracer.
2123 	 */
2124 	tracing_reset_online_cpus(&tr->array_buffer);
2125 
2126 	tr->current_trace = type;
2127 
2128 #ifdef CONFIG_TRACER_MAX_TRACE
2129 	if (type->use_max_tr) {
2130 		/* If we expanded the buffers, make sure the max is expanded too */
2131 		if (tr->ring_buffer_expanded)
2132 			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2133 					   RING_BUFFER_ALL_CPUS);
2134 		tr->allocated_snapshot = true;
2135 	}
2136 #endif
2137 
2138 	/* the test is responsible for initializing and enabling */
2139 	pr_info("Testing tracer %s: ", type->name);
2140 	ret = type->selftest(type, tr);
2141 	/* the test is responsible for resetting too */
2142 	tr->current_trace = saved_tracer;
2143 	if (ret) {
2144 		printk(KERN_CONT "FAILED!\n");
2145 		/* Add the warning after printing 'FAILED' */
2146 		WARN_ON(1);
2147 		return -1;
2148 	}
2149 	/* Only reset on passing, to avoid touching corrupted buffers */
2150 	tracing_reset_online_cpus(&tr->array_buffer);
2151 
2152 #ifdef CONFIG_TRACER_MAX_TRACE
2153 	if (type->use_max_tr) {
2154 		tr->allocated_snapshot = false;
2155 
2156 		/* Shrink the max buffer again */
2157 		if (tr->ring_buffer_expanded)
2158 			ring_buffer_resize(tr->max_buffer.buffer, 1,
2159 					   RING_BUFFER_ALL_CPUS);
2160 	}
2161 #endif
2162 
2163 	printk(KERN_CONT "PASSED\n");
2164 	return 0;
2165 }
2166 
2167 static int do_run_tracer_selftest(struct tracer *type)
2168 {
2169 	int ret;
2170 
2171 	/*
2172 	 * Tests can take a long time, especially if they are run one after the
2173 	 * other, as does happen during bootup when all the tracers are
2174 	 * registered. This could cause the soft lockup watchdog to trigger.
2175 	 */
2176 	cond_resched();
2177 
2178 	tracing_selftest_running = true;
2179 	ret = run_tracer_selftest(type);
2180 	tracing_selftest_running = false;
2181 
2182 	return ret;
2183 }
2184 
2185 static __init int init_trace_selftests(void)
2186 {
2187 	struct trace_selftests *p, *n;
2188 	struct tracer *t, **last;
2189 	int ret;
2190 
2191 	selftests_can_run = true;
2192 
2193 	guard(mutex)(&trace_types_lock);
2194 
2195 	if (list_empty(&postponed_selftests))
2196 		return 0;
2197 
2198 	pr_info("Running postponed tracer tests:\n");
2199 
2200 	tracing_selftest_running = true;
2201 	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2202 		/* This loop can take minutes when sanitizers are enabled, so
2203 		 * lets make sure we allow RCU processing.
2204 		 */
2205 		cond_resched();
2206 		ret = run_tracer_selftest(p->type);
2207 		/* If the test fails, then warn and remove from available_tracers */
2208 		if (ret < 0) {
2209 			WARN(1, "tracer: %s failed selftest, disabling\n",
2210 			     p->type->name);
2211 			last = &trace_types;
2212 			for (t = trace_types; t; t = t->next) {
2213 				if (t == p->type) {
2214 					*last = t->next;
2215 					break;
2216 				}
2217 				last = &t->next;
2218 			}
2219 		}
2220 		list_del(&p->list);
2221 		kfree(p);
2222 	}
2223 	tracing_selftest_running = false;
2224 
2225 	return 0;
2226 }
2227 core_initcall(init_trace_selftests);
2228 #else
2229 static inline int do_run_tracer_selftest(struct tracer *type)
2230 {
2231 	return 0;
2232 }
2233 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2234 
2235 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2236 
2237 static void __init apply_trace_boot_options(void);
2238 
2239 /**
2240  * register_tracer - register a tracer with the ftrace system.
2241  * @type: the plugin for the tracer
2242  *
2243  * Register a new plugin tracer.
2244  */
2245 int __init register_tracer(struct tracer *type)
2246 {
2247 	struct tracer *t;
2248 	int ret = 0;
2249 
2250 	if (!type->name) {
2251 		pr_info("Tracer must have a name\n");
2252 		return -1;
2253 	}
2254 
2255 	if (strlen(type->name) >= MAX_TRACER_SIZE) {
2256 		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2257 		return -1;
2258 	}
2259 
2260 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
2261 		pr_warn("Can not register tracer %s due to lockdown\n",
2262 			   type->name);
2263 		return -EPERM;
2264 	}
2265 
2266 	mutex_lock(&trace_types_lock);
2267 
2268 	for (t = trace_types; t; t = t->next) {
2269 		if (strcmp(type->name, t->name) == 0) {
2270 			/* already found */
2271 			pr_info("Tracer %s already registered\n",
2272 				type->name);
2273 			ret = -1;
2274 			goto out;
2275 		}
2276 	}
2277 
2278 	if (!type->set_flag)
2279 		type->set_flag = &dummy_set_flag;
2280 	if (!type->flags) {
2281 		/*allocate a dummy tracer_flags*/
2282 		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2283 		if (!type->flags) {
2284 			ret = -ENOMEM;
2285 			goto out;
2286 		}
2287 		type->flags->val = 0;
2288 		type->flags->opts = dummy_tracer_opt;
2289 	} else
2290 		if (!type->flags->opts)
2291 			type->flags->opts = dummy_tracer_opt;
2292 
2293 	/* store the tracer for __set_tracer_option */
2294 	type->flags->trace = type;
2295 
2296 	ret = do_run_tracer_selftest(type);
2297 	if (ret < 0)
2298 		goto out;
2299 
2300 	type->next = trace_types;
2301 	trace_types = type;
2302 	add_tracer_options(&global_trace, type);
2303 
2304  out:
2305 	mutex_unlock(&trace_types_lock);
2306 
2307 	if (ret || !default_bootup_tracer)
2308 		goto out_unlock;
2309 
2310 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2311 		goto out_unlock;
2312 
2313 	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2314 	/* Do we want this tracer to start on bootup? */
2315 	tracing_set_tracer(&global_trace, type->name);
2316 	default_bootup_tracer = NULL;
2317 
2318 	apply_trace_boot_options();
2319 
2320 	/* disable other selftests, since this will break it. */
2321 	disable_tracing_selftest("running a tracer");
2322 
2323  out_unlock:
2324 	return ret;
2325 }
2326 
2327 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2328 {
2329 	struct trace_buffer *buffer = buf->buffer;
2330 
2331 	if (!buffer)
2332 		return;
2333 
2334 	ring_buffer_record_disable(buffer);
2335 
2336 	/* Make sure all commits have finished */
2337 	synchronize_rcu();
2338 	ring_buffer_reset_cpu(buffer, cpu);
2339 
2340 	ring_buffer_record_enable(buffer);
2341 }
2342 
2343 void tracing_reset_online_cpus(struct array_buffer *buf)
2344 {
2345 	struct trace_buffer *buffer = buf->buffer;
2346 
2347 	if (!buffer)
2348 		return;
2349 
2350 	ring_buffer_record_disable(buffer);
2351 
2352 	/* Make sure all commits have finished */
2353 	synchronize_rcu();
2354 
2355 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2356 
2357 	ring_buffer_reset_online_cpus(buffer);
2358 
2359 	ring_buffer_record_enable(buffer);
2360 }
2361 
2362 static void tracing_reset_all_cpus(struct array_buffer *buf)
2363 {
2364 	struct trace_buffer *buffer = buf->buffer;
2365 
2366 	if (!buffer)
2367 		return;
2368 
2369 	ring_buffer_record_disable(buffer);
2370 
2371 	/* Make sure all commits have finished */
2372 	synchronize_rcu();
2373 
2374 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2375 
2376 	ring_buffer_reset(buffer);
2377 
2378 	ring_buffer_record_enable(buffer);
2379 }
2380 
2381 /* Must have trace_types_lock held */
2382 void tracing_reset_all_online_cpus_unlocked(void)
2383 {
2384 	struct trace_array *tr;
2385 
2386 	lockdep_assert_held(&trace_types_lock);
2387 
2388 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2389 		if (!tr->clear_trace)
2390 			continue;
2391 		tr->clear_trace = false;
2392 		tracing_reset_online_cpus(&tr->array_buffer);
2393 #ifdef CONFIG_TRACER_MAX_TRACE
2394 		tracing_reset_online_cpus(&tr->max_buffer);
2395 #endif
2396 	}
2397 }
2398 
2399 void tracing_reset_all_online_cpus(void)
2400 {
2401 	mutex_lock(&trace_types_lock);
2402 	tracing_reset_all_online_cpus_unlocked();
2403 	mutex_unlock(&trace_types_lock);
2404 }
2405 
2406 int is_tracing_stopped(void)
2407 {
2408 	return global_trace.stop_count;
2409 }
2410 
2411 static void tracing_start_tr(struct trace_array *tr)
2412 {
2413 	struct trace_buffer *buffer;
2414 	unsigned long flags;
2415 
2416 	if (tracing_disabled)
2417 		return;
2418 
2419 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2420 	if (--tr->stop_count) {
2421 		if (WARN_ON_ONCE(tr->stop_count < 0)) {
2422 			/* Someone screwed up their debugging */
2423 			tr->stop_count = 0;
2424 		}
2425 		goto out;
2426 	}
2427 
2428 	/* Prevent the buffers from switching */
2429 	arch_spin_lock(&tr->max_lock);
2430 
2431 	buffer = tr->array_buffer.buffer;
2432 	if (buffer)
2433 		ring_buffer_record_enable(buffer);
2434 
2435 #ifdef CONFIG_TRACER_MAX_TRACE
2436 	buffer = tr->max_buffer.buffer;
2437 	if (buffer)
2438 		ring_buffer_record_enable(buffer);
2439 #endif
2440 
2441 	arch_spin_unlock(&tr->max_lock);
2442 
2443  out:
2444 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2445 }
2446 
2447 /**
2448  * tracing_start - quick start of the tracer
2449  *
2450  * If tracing is enabled but was stopped by tracing_stop,
2451  * this will start the tracer back up.
2452  */
2453 void tracing_start(void)
2454 
2455 {
2456 	return tracing_start_tr(&global_trace);
2457 }
2458 
2459 static void tracing_stop_tr(struct trace_array *tr)
2460 {
2461 	struct trace_buffer *buffer;
2462 	unsigned long flags;
2463 
2464 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2465 	if (tr->stop_count++)
2466 		goto out;
2467 
2468 	/* Prevent the buffers from switching */
2469 	arch_spin_lock(&tr->max_lock);
2470 
2471 	buffer = tr->array_buffer.buffer;
2472 	if (buffer)
2473 		ring_buffer_record_disable(buffer);
2474 
2475 #ifdef CONFIG_TRACER_MAX_TRACE
2476 	buffer = tr->max_buffer.buffer;
2477 	if (buffer)
2478 		ring_buffer_record_disable(buffer);
2479 #endif
2480 
2481 	arch_spin_unlock(&tr->max_lock);
2482 
2483  out:
2484 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2485 }
2486 
2487 /**
2488  * tracing_stop - quick stop of the tracer
2489  *
2490  * Light weight way to stop tracing. Use in conjunction with
2491  * tracing_start.
2492  */
2493 void tracing_stop(void)
2494 {
2495 	return tracing_stop_tr(&global_trace);
2496 }
2497 
2498 /*
2499  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2500  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2501  * simplifies those functions and keeps them in sync.
2502  */
2503 enum print_line_t trace_handle_return(struct trace_seq *s)
2504 {
2505 	return trace_seq_has_overflowed(s) ?
2506 		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2507 }
2508 EXPORT_SYMBOL_GPL(trace_handle_return);
2509 
2510 static unsigned short migration_disable_value(void)
2511 {
2512 #if defined(CONFIG_SMP)
2513 	return current->migration_disabled;
2514 #else
2515 	return 0;
2516 #endif
2517 }
2518 
2519 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2520 {
2521 	unsigned int trace_flags = irqs_status;
2522 	unsigned int pc;
2523 
2524 	pc = preempt_count();
2525 
2526 	if (pc & NMI_MASK)
2527 		trace_flags |= TRACE_FLAG_NMI;
2528 	if (pc & HARDIRQ_MASK)
2529 		trace_flags |= TRACE_FLAG_HARDIRQ;
2530 	if (in_serving_softirq())
2531 		trace_flags |= TRACE_FLAG_SOFTIRQ;
2532 	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2533 		trace_flags |= TRACE_FLAG_BH_OFF;
2534 
2535 	if (tif_need_resched())
2536 		trace_flags |= TRACE_FLAG_NEED_RESCHED;
2537 	if (test_preempt_need_resched())
2538 		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2539 	if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY))
2540 		trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
2541 	return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2542 		(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2543 }
2544 
2545 struct ring_buffer_event *
2546 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2547 			  int type,
2548 			  unsigned long len,
2549 			  unsigned int trace_ctx)
2550 {
2551 	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2552 }
2553 
2554 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2555 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2556 static int trace_buffered_event_ref;
2557 
2558 /**
2559  * trace_buffered_event_enable - enable buffering events
2560  *
2561  * When events are being filtered, it is quicker to use a temporary
2562  * buffer to write the event data into if there's a likely chance
2563  * that it will not be committed. The discard of the ring buffer
2564  * is not as fast as committing, and is much slower than copying
2565  * a commit.
2566  *
2567  * When an event is to be filtered, allocate per cpu buffers to
2568  * write the event data into, and if the event is filtered and discarded
2569  * it is simply dropped, otherwise, the entire data is to be committed
2570  * in one shot.
2571  */
2572 void trace_buffered_event_enable(void)
2573 {
2574 	struct ring_buffer_event *event;
2575 	struct page *page;
2576 	int cpu;
2577 
2578 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2579 
2580 	if (trace_buffered_event_ref++)
2581 		return;
2582 
2583 	for_each_tracing_cpu(cpu) {
2584 		page = alloc_pages_node(cpu_to_node(cpu),
2585 					GFP_KERNEL | __GFP_NORETRY, 0);
2586 		/* This is just an optimization and can handle failures */
2587 		if (!page) {
2588 			pr_err("Failed to allocate event buffer\n");
2589 			break;
2590 		}
2591 
2592 		event = page_address(page);
2593 		memset(event, 0, sizeof(*event));
2594 
2595 		per_cpu(trace_buffered_event, cpu) = event;
2596 
2597 		preempt_disable();
2598 		if (cpu == smp_processor_id() &&
2599 		    __this_cpu_read(trace_buffered_event) !=
2600 		    per_cpu(trace_buffered_event, cpu))
2601 			WARN_ON_ONCE(1);
2602 		preempt_enable();
2603 	}
2604 }
2605 
2606 static void enable_trace_buffered_event(void *data)
2607 {
2608 	/* Probably not needed, but do it anyway */
2609 	smp_rmb();
2610 	this_cpu_dec(trace_buffered_event_cnt);
2611 }
2612 
2613 static void disable_trace_buffered_event(void *data)
2614 {
2615 	this_cpu_inc(trace_buffered_event_cnt);
2616 }
2617 
2618 /**
2619  * trace_buffered_event_disable - disable buffering events
2620  *
2621  * When a filter is removed, it is faster to not use the buffered
2622  * events, and to commit directly into the ring buffer. Free up
2623  * the temp buffers when there are no more users. This requires
2624  * special synchronization with current events.
2625  */
2626 void trace_buffered_event_disable(void)
2627 {
2628 	int cpu;
2629 
2630 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2631 
2632 	if (WARN_ON_ONCE(!trace_buffered_event_ref))
2633 		return;
2634 
2635 	if (--trace_buffered_event_ref)
2636 		return;
2637 
2638 	/* For each CPU, set the buffer as used. */
2639 	on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2640 			 NULL, true);
2641 
2642 	/* Wait for all current users to finish */
2643 	synchronize_rcu();
2644 
2645 	for_each_tracing_cpu(cpu) {
2646 		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2647 		per_cpu(trace_buffered_event, cpu) = NULL;
2648 	}
2649 
2650 	/*
2651 	 * Wait for all CPUs that potentially started checking if they can use
2652 	 * their event buffer only after the previous synchronize_rcu() call and
2653 	 * they still read a valid pointer from trace_buffered_event. It must be
2654 	 * ensured they don't see cleared trace_buffered_event_cnt else they
2655 	 * could wrongly decide to use the pointed-to buffer which is now freed.
2656 	 */
2657 	synchronize_rcu();
2658 
2659 	/* For each CPU, relinquish the buffer */
2660 	on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2661 			 true);
2662 }
2663 
2664 static struct trace_buffer *temp_buffer;
2665 
2666 struct ring_buffer_event *
2667 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2668 			  struct trace_event_file *trace_file,
2669 			  int type, unsigned long len,
2670 			  unsigned int trace_ctx)
2671 {
2672 	struct ring_buffer_event *entry;
2673 	struct trace_array *tr = trace_file->tr;
2674 	int val;
2675 
2676 	*current_rb = tr->array_buffer.buffer;
2677 
2678 	if (!tr->no_filter_buffering_ref &&
2679 	    (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2680 		preempt_disable_notrace();
2681 		/*
2682 		 * Filtering is on, so try to use the per cpu buffer first.
2683 		 * This buffer will simulate a ring_buffer_event,
2684 		 * where the type_len is zero and the array[0] will
2685 		 * hold the full length.
2686 		 * (see include/linux/ring-buffer.h for details on
2687 		 *  how the ring_buffer_event is structured).
2688 		 *
2689 		 * Using a temp buffer during filtering and copying it
2690 		 * on a matched filter is quicker than writing directly
2691 		 * into the ring buffer and then discarding it when
2692 		 * it doesn't match. That is because the discard
2693 		 * requires several atomic operations to get right.
2694 		 * Copying on match and doing nothing on a failed match
2695 		 * is still quicker than no copy on match, but having
2696 		 * to discard out of the ring buffer on a failed match.
2697 		 */
2698 		if ((entry = __this_cpu_read(trace_buffered_event))) {
2699 			int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2700 
2701 			val = this_cpu_inc_return(trace_buffered_event_cnt);
2702 
2703 			/*
2704 			 * Preemption is disabled, but interrupts and NMIs
2705 			 * can still come in now. If that happens after
2706 			 * the above increment, then it will have to go
2707 			 * back to the old method of allocating the event
2708 			 * on the ring buffer, and if the filter fails, it
2709 			 * will have to call ring_buffer_discard_commit()
2710 			 * to remove it.
2711 			 *
2712 			 * Need to also check the unlikely case that the
2713 			 * length is bigger than the temp buffer size.
2714 			 * If that happens, then the reserve is pretty much
2715 			 * guaranteed to fail, as the ring buffer currently
2716 			 * only allows events less than a page. But that may
2717 			 * change in the future, so let the ring buffer reserve
2718 			 * handle the failure in that case.
2719 			 */
2720 			if (val == 1 && likely(len <= max_len)) {
2721 				trace_event_setup(entry, type, trace_ctx);
2722 				entry->array[0] = len;
2723 				/* Return with preemption disabled */
2724 				return entry;
2725 			}
2726 			this_cpu_dec(trace_buffered_event_cnt);
2727 		}
2728 		/* __trace_buffer_lock_reserve() disables preemption */
2729 		preempt_enable_notrace();
2730 	}
2731 
2732 	entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2733 					    trace_ctx);
2734 	/*
2735 	 * If tracing is off, but we have triggers enabled
2736 	 * we still need to look at the event data. Use the temp_buffer
2737 	 * to store the trace event for the trigger to use. It's recursive
2738 	 * safe and will not be recorded anywhere.
2739 	 */
2740 	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2741 		*current_rb = temp_buffer;
2742 		entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2743 						    trace_ctx);
2744 	}
2745 	return entry;
2746 }
2747 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2748 
2749 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2750 static DEFINE_MUTEX(tracepoint_printk_mutex);
2751 
2752 static void output_printk(struct trace_event_buffer *fbuffer)
2753 {
2754 	struct trace_event_call *event_call;
2755 	struct trace_event_file *file;
2756 	struct trace_event *event;
2757 	unsigned long flags;
2758 	struct trace_iterator *iter = tracepoint_print_iter;
2759 
2760 	/* We should never get here if iter is NULL */
2761 	if (WARN_ON_ONCE(!iter))
2762 		return;
2763 
2764 	event_call = fbuffer->trace_file->event_call;
2765 	if (!event_call || !event_call->event.funcs ||
2766 	    !event_call->event.funcs->trace)
2767 		return;
2768 
2769 	file = fbuffer->trace_file;
2770 	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2771 	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2772 	     !filter_match_preds(file->filter, fbuffer->entry)))
2773 		return;
2774 
2775 	event = &fbuffer->trace_file->event_call->event;
2776 
2777 	raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2778 	trace_seq_init(&iter->seq);
2779 	iter->ent = fbuffer->entry;
2780 	event_call->event.funcs->trace(iter, 0, event);
2781 	trace_seq_putc(&iter->seq, 0);
2782 	printk("%s", iter->seq.buffer);
2783 
2784 	raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2785 }
2786 
2787 int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
2788 			     void *buffer, size_t *lenp,
2789 			     loff_t *ppos)
2790 {
2791 	int save_tracepoint_printk;
2792 	int ret;
2793 
2794 	guard(mutex)(&tracepoint_printk_mutex);
2795 	save_tracepoint_printk = tracepoint_printk;
2796 
2797 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2798 
2799 	/*
2800 	 * This will force exiting early, as tracepoint_printk
2801 	 * is always zero when tracepoint_printk_iter is not allocated
2802 	 */
2803 	if (!tracepoint_print_iter)
2804 		tracepoint_printk = 0;
2805 
2806 	if (save_tracepoint_printk == tracepoint_printk)
2807 		return ret;
2808 
2809 	if (tracepoint_printk)
2810 		static_key_enable(&tracepoint_printk_key.key);
2811 	else
2812 		static_key_disable(&tracepoint_printk_key.key);
2813 
2814 	return ret;
2815 }
2816 
2817 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2818 {
2819 	enum event_trigger_type tt = ETT_NONE;
2820 	struct trace_event_file *file = fbuffer->trace_file;
2821 
2822 	if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2823 			fbuffer->entry, &tt))
2824 		goto discard;
2825 
2826 	if (static_key_false(&tracepoint_printk_key.key))
2827 		output_printk(fbuffer);
2828 
2829 	if (static_branch_unlikely(&trace_event_exports_enabled))
2830 		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2831 
2832 	trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2833 			fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2834 
2835 discard:
2836 	if (tt)
2837 		event_triggers_post_call(file, tt);
2838 
2839 }
2840 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2841 
2842 /*
2843  * Skip 3:
2844  *
2845  *   trace_buffer_unlock_commit_regs()
2846  *   trace_event_buffer_commit()
2847  *   trace_event_raw_event_xxx()
2848  */
2849 # define STACK_SKIP 3
2850 
2851 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2852 				     struct trace_buffer *buffer,
2853 				     struct ring_buffer_event *event,
2854 				     unsigned int trace_ctx,
2855 				     struct pt_regs *regs)
2856 {
2857 	__buffer_unlock_commit(buffer, event);
2858 
2859 	/*
2860 	 * If regs is not set, then skip the necessary functions.
2861 	 * Note, we can still get here via blktrace, wakeup tracer
2862 	 * and mmiotrace, but that's ok if they lose a function or
2863 	 * two. They are not that meaningful.
2864 	 */
2865 	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2866 	ftrace_trace_userstack(tr, buffer, trace_ctx);
2867 }
2868 
2869 /*
2870  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2871  */
2872 void
2873 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2874 				   struct ring_buffer_event *event)
2875 {
2876 	__buffer_unlock_commit(buffer, event);
2877 }
2878 
2879 void
2880 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2881 	       parent_ip, unsigned int trace_ctx)
2882 {
2883 	struct trace_buffer *buffer = tr->array_buffer.buffer;
2884 	struct ring_buffer_event *event;
2885 	struct ftrace_entry *entry;
2886 
2887 	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2888 					    trace_ctx);
2889 	if (!event)
2890 		return;
2891 	entry	= ring_buffer_event_data(event);
2892 	entry->ip			= ip;
2893 	entry->parent_ip		= parent_ip;
2894 
2895 	if (static_branch_unlikely(&trace_function_exports_enabled))
2896 		ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2897 	__buffer_unlock_commit(buffer, event);
2898 }
2899 
2900 #ifdef CONFIG_STACKTRACE
2901 
2902 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2903 #define FTRACE_KSTACK_NESTING	4
2904 
2905 #define FTRACE_KSTACK_ENTRIES	(SZ_4K / FTRACE_KSTACK_NESTING)
2906 
2907 struct ftrace_stack {
2908 	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
2909 };
2910 
2911 
2912 struct ftrace_stacks {
2913 	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
2914 };
2915 
2916 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2917 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2918 
2919 static void __ftrace_trace_stack(struct trace_array *tr,
2920 				 struct trace_buffer *buffer,
2921 				 unsigned int trace_ctx,
2922 				 int skip, struct pt_regs *regs)
2923 {
2924 	struct ring_buffer_event *event;
2925 	unsigned int size, nr_entries;
2926 	struct ftrace_stack *fstack;
2927 	struct stack_entry *entry;
2928 	int stackidx;
2929 
2930 	/*
2931 	 * Add one, for this function and the call to save_stack_trace()
2932 	 * If regs is set, then these functions will not be in the way.
2933 	 */
2934 #ifndef CONFIG_UNWINDER_ORC
2935 	if (!regs)
2936 		skip++;
2937 #endif
2938 
2939 	preempt_disable_notrace();
2940 
2941 	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2942 
2943 	/* This should never happen. If it does, yell once and skip */
2944 	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2945 		goto out;
2946 
2947 	/*
2948 	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2949 	 * interrupt will either see the value pre increment or post
2950 	 * increment. If the interrupt happens pre increment it will have
2951 	 * restored the counter when it returns.  We just need a barrier to
2952 	 * keep gcc from moving things around.
2953 	 */
2954 	barrier();
2955 
2956 	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2957 	size = ARRAY_SIZE(fstack->calls);
2958 
2959 	if (regs) {
2960 		nr_entries = stack_trace_save_regs(regs, fstack->calls,
2961 						   size, skip);
2962 	} else {
2963 		nr_entries = stack_trace_save(fstack->calls, size, skip);
2964 	}
2965 
2966 #ifdef CONFIG_DYNAMIC_FTRACE
2967 	/* Mark entry of stack trace as trampoline code */
2968 	if (tr->ops && tr->ops->trampoline) {
2969 		unsigned long tramp_start = tr->ops->trampoline;
2970 		unsigned long tramp_end = tramp_start + tr->ops->trampoline_size;
2971 		unsigned long *calls = fstack->calls;
2972 
2973 		for (int i = 0; i < nr_entries; i++) {
2974 			if (calls[i] >= tramp_start && calls[i] < tramp_end)
2975 				calls[i] = FTRACE_TRAMPOLINE_MARKER;
2976 		}
2977 	}
2978 #endif
2979 
2980 	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2981 				    struct_size(entry, caller, nr_entries),
2982 				    trace_ctx);
2983 	if (!event)
2984 		goto out;
2985 	entry = ring_buffer_event_data(event);
2986 
2987 	entry->size = nr_entries;
2988 	memcpy(&entry->caller, fstack->calls,
2989 	       flex_array_size(entry, caller, nr_entries));
2990 
2991 	__buffer_unlock_commit(buffer, event);
2992 
2993  out:
2994 	/* Again, don't let gcc optimize things here */
2995 	barrier();
2996 	__this_cpu_dec(ftrace_stack_reserve);
2997 	preempt_enable_notrace();
2998 
2999 }
3000 
3001 static inline void ftrace_trace_stack(struct trace_array *tr,
3002 				      struct trace_buffer *buffer,
3003 				      unsigned int trace_ctx,
3004 				      int skip, struct pt_regs *regs)
3005 {
3006 	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3007 		return;
3008 
3009 	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs);
3010 }
3011 
3012 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3013 		   int skip)
3014 {
3015 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3016 
3017 	if (rcu_is_watching()) {
3018 		__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
3019 		return;
3020 	}
3021 
3022 	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3023 		return;
3024 
3025 	/*
3026 	 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3027 	 * but if the above rcu_is_watching() failed, then the NMI
3028 	 * triggered someplace critical, and ct_irq_enter() should
3029 	 * not be called from NMI.
3030 	 */
3031 	if (unlikely(in_nmi()))
3032 		return;
3033 
3034 	ct_irq_enter_irqson();
3035 	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
3036 	ct_irq_exit_irqson();
3037 }
3038 
3039 /**
3040  * trace_dump_stack - record a stack back trace in the trace buffer
3041  * @skip: Number of functions to skip (helper handlers)
3042  */
3043 void trace_dump_stack(int skip)
3044 {
3045 	if (tracing_disabled || tracing_selftest_running)
3046 		return;
3047 
3048 #ifndef CONFIG_UNWINDER_ORC
3049 	/* Skip 1 to skip this function. */
3050 	skip++;
3051 #endif
3052 	__ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer,
3053 				tracing_gen_ctx(), skip, NULL);
3054 }
3055 EXPORT_SYMBOL_GPL(trace_dump_stack);
3056 
3057 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3058 static DEFINE_PER_CPU(int, user_stack_count);
3059 
3060 static void
3061 ftrace_trace_userstack(struct trace_array *tr,
3062 		       struct trace_buffer *buffer, unsigned int trace_ctx)
3063 {
3064 	struct ring_buffer_event *event;
3065 	struct userstack_entry *entry;
3066 
3067 	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3068 		return;
3069 
3070 	/*
3071 	 * NMIs can not handle page faults, even with fix ups.
3072 	 * The save user stack can (and often does) fault.
3073 	 */
3074 	if (unlikely(in_nmi()))
3075 		return;
3076 
3077 	/*
3078 	 * prevent recursion, since the user stack tracing may
3079 	 * trigger other kernel events.
3080 	 */
3081 	preempt_disable();
3082 	if (__this_cpu_read(user_stack_count))
3083 		goto out;
3084 
3085 	__this_cpu_inc(user_stack_count);
3086 
3087 	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3088 					    sizeof(*entry), trace_ctx);
3089 	if (!event)
3090 		goto out_drop_count;
3091 	entry	= ring_buffer_event_data(event);
3092 
3093 	entry->tgid		= current->tgid;
3094 	memset(&entry->caller, 0, sizeof(entry->caller));
3095 
3096 	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3097 	__buffer_unlock_commit(buffer, event);
3098 
3099  out_drop_count:
3100 	__this_cpu_dec(user_stack_count);
3101  out:
3102 	preempt_enable();
3103 }
3104 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3105 static void ftrace_trace_userstack(struct trace_array *tr,
3106 				   struct trace_buffer *buffer,
3107 				   unsigned int trace_ctx)
3108 {
3109 }
3110 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3111 
3112 #endif /* CONFIG_STACKTRACE */
3113 
3114 static inline void
3115 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3116 			  unsigned long long delta)
3117 {
3118 	entry->bottom_delta_ts = delta & U32_MAX;
3119 	entry->top_delta_ts = (delta >> 32);
3120 }
3121 
3122 void trace_last_func_repeats(struct trace_array *tr,
3123 			     struct trace_func_repeats *last_info,
3124 			     unsigned int trace_ctx)
3125 {
3126 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3127 	struct func_repeats_entry *entry;
3128 	struct ring_buffer_event *event;
3129 	u64 delta;
3130 
3131 	event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3132 					    sizeof(*entry), trace_ctx);
3133 	if (!event)
3134 		return;
3135 
3136 	delta = ring_buffer_event_time_stamp(buffer, event) -
3137 		last_info->ts_last_call;
3138 
3139 	entry = ring_buffer_event_data(event);
3140 	entry->ip = last_info->ip;
3141 	entry->parent_ip = last_info->parent_ip;
3142 	entry->count = last_info->count;
3143 	func_repeats_set_delta_ts(entry, delta);
3144 
3145 	__buffer_unlock_commit(buffer, event);
3146 }
3147 
3148 /* created for use with alloc_percpu */
3149 struct trace_buffer_struct {
3150 	int nesting;
3151 	char buffer[4][TRACE_BUF_SIZE];
3152 };
3153 
3154 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3155 
3156 /*
3157  * This allows for lockless recording.  If we're nested too deeply, then
3158  * this returns NULL.
3159  */
3160 static char *get_trace_buf(void)
3161 {
3162 	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3163 
3164 	if (!trace_percpu_buffer || buffer->nesting >= 4)
3165 		return NULL;
3166 
3167 	buffer->nesting++;
3168 
3169 	/* Interrupts must see nesting incremented before we use the buffer */
3170 	barrier();
3171 	return &buffer->buffer[buffer->nesting - 1][0];
3172 }
3173 
3174 static void put_trace_buf(void)
3175 {
3176 	/* Don't let the decrement of nesting leak before this */
3177 	barrier();
3178 	this_cpu_dec(trace_percpu_buffer->nesting);
3179 }
3180 
3181 static int alloc_percpu_trace_buffer(void)
3182 {
3183 	struct trace_buffer_struct __percpu *buffers;
3184 
3185 	if (trace_percpu_buffer)
3186 		return 0;
3187 
3188 	buffers = alloc_percpu(struct trace_buffer_struct);
3189 	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3190 		return -ENOMEM;
3191 
3192 	trace_percpu_buffer = buffers;
3193 	return 0;
3194 }
3195 
3196 static int buffers_allocated;
3197 
3198 void trace_printk_init_buffers(void)
3199 {
3200 	if (buffers_allocated)
3201 		return;
3202 
3203 	if (alloc_percpu_trace_buffer())
3204 		return;
3205 
3206 	/* trace_printk() is for debug use only. Don't use it in production. */
3207 
3208 	pr_warn("\n");
3209 	pr_warn("**********************************************************\n");
3210 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3211 	pr_warn("**                                                      **\n");
3212 	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
3213 	pr_warn("**                                                      **\n");
3214 	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
3215 	pr_warn("** unsafe for production use.                           **\n");
3216 	pr_warn("**                                                      **\n");
3217 	pr_warn("** If you see this message and you are not debugging    **\n");
3218 	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
3219 	pr_warn("**                                                      **\n");
3220 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3221 	pr_warn("**********************************************************\n");
3222 
3223 	/* Expand the buffers to set size */
3224 	tracing_update_buffers(&global_trace);
3225 
3226 	buffers_allocated = 1;
3227 
3228 	/*
3229 	 * trace_printk_init_buffers() can be called by modules.
3230 	 * If that happens, then we need to start cmdline recording
3231 	 * directly here. If the global_trace.buffer is already
3232 	 * allocated here, then this was called by module code.
3233 	 */
3234 	if (global_trace.array_buffer.buffer)
3235 		tracing_start_cmdline_record();
3236 }
3237 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3238 
3239 void trace_printk_start_comm(void)
3240 {
3241 	/* Start tracing comms if trace printk is set */
3242 	if (!buffers_allocated)
3243 		return;
3244 	tracing_start_cmdline_record();
3245 }
3246 
3247 static void trace_printk_start_stop_comm(int enabled)
3248 {
3249 	if (!buffers_allocated)
3250 		return;
3251 
3252 	if (enabled)
3253 		tracing_start_cmdline_record();
3254 	else
3255 		tracing_stop_cmdline_record();
3256 }
3257 
3258 /**
3259  * trace_vbprintk - write binary msg to tracing buffer
3260  * @ip:    The address of the caller
3261  * @fmt:   The string format to write to the buffer
3262  * @args:  Arguments for @fmt
3263  */
3264 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3265 {
3266 	struct ring_buffer_event *event;
3267 	struct trace_buffer *buffer;
3268 	struct trace_array *tr = READ_ONCE(printk_trace);
3269 	struct bprint_entry *entry;
3270 	unsigned int trace_ctx;
3271 	char *tbuffer;
3272 	int len = 0, size;
3273 
3274 	if (!printk_binsafe(tr))
3275 		return trace_vprintk(ip, fmt, args);
3276 
3277 	if (unlikely(tracing_selftest_running || tracing_disabled))
3278 		return 0;
3279 
3280 	/* Don't pollute graph traces with trace_vprintk internals */
3281 	pause_graph_tracing();
3282 
3283 	trace_ctx = tracing_gen_ctx();
3284 	preempt_disable_notrace();
3285 
3286 	tbuffer = get_trace_buf();
3287 	if (!tbuffer) {
3288 		len = 0;
3289 		goto out_nobuffer;
3290 	}
3291 
3292 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3293 
3294 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3295 		goto out_put;
3296 
3297 	size = sizeof(*entry) + sizeof(u32) * len;
3298 	buffer = tr->array_buffer.buffer;
3299 	ring_buffer_nest_start(buffer);
3300 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3301 					    trace_ctx);
3302 	if (!event)
3303 		goto out;
3304 	entry = ring_buffer_event_data(event);
3305 	entry->ip			= ip;
3306 	entry->fmt			= fmt;
3307 
3308 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3309 	__buffer_unlock_commit(buffer, event);
3310 	ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3311 
3312 out:
3313 	ring_buffer_nest_end(buffer);
3314 out_put:
3315 	put_trace_buf();
3316 
3317 out_nobuffer:
3318 	preempt_enable_notrace();
3319 	unpause_graph_tracing();
3320 
3321 	return len;
3322 }
3323 EXPORT_SYMBOL_GPL(trace_vbprintk);
3324 
3325 __printf(3, 0)
3326 static int
3327 __trace_array_vprintk(struct trace_buffer *buffer,
3328 		      unsigned long ip, const char *fmt, va_list args)
3329 {
3330 	struct ring_buffer_event *event;
3331 	int len = 0, size;
3332 	struct print_entry *entry;
3333 	unsigned int trace_ctx;
3334 	char *tbuffer;
3335 
3336 	if (tracing_disabled)
3337 		return 0;
3338 
3339 	/* Don't pollute graph traces with trace_vprintk internals */
3340 	pause_graph_tracing();
3341 
3342 	trace_ctx = tracing_gen_ctx();
3343 	preempt_disable_notrace();
3344 
3345 
3346 	tbuffer = get_trace_buf();
3347 	if (!tbuffer) {
3348 		len = 0;
3349 		goto out_nobuffer;
3350 	}
3351 
3352 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3353 
3354 	size = sizeof(*entry) + len + 1;
3355 	ring_buffer_nest_start(buffer);
3356 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3357 					    trace_ctx);
3358 	if (!event)
3359 		goto out;
3360 	entry = ring_buffer_event_data(event);
3361 	entry->ip = ip;
3362 
3363 	memcpy(&entry->buf, tbuffer, len + 1);
3364 	__buffer_unlock_commit(buffer, event);
3365 	ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
3366 
3367 out:
3368 	ring_buffer_nest_end(buffer);
3369 	put_trace_buf();
3370 
3371 out_nobuffer:
3372 	preempt_enable_notrace();
3373 	unpause_graph_tracing();
3374 
3375 	return len;
3376 }
3377 
3378 __printf(3, 0)
3379 int trace_array_vprintk(struct trace_array *tr,
3380 			unsigned long ip, const char *fmt, va_list args)
3381 {
3382 	if (tracing_selftest_running && tr == &global_trace)
3383 		return 0;
3384 
3385 	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3386 }
3387 
3388 /**
3389  * trace_array_printk - Print a message to a specific instance
3390  * @tr: The instance trace_array descriptor
3391  * @ip: The instruction pointer that this is called from.
3392  * @fmt: The format to print (printf format)
3393  *
3394  * If a subsystem sets up its own instance, they have the right to
3395  * printk strings into their tracing instance buffer using this
3396  * function. Note, this function will not write into the top level
3397  * buffer (use trace_printk() for that), as writing into the top level
3398  * buffer should only have events that can be individually disabled.
3399  * trace_printk() is only used for debugging a kernel, and should not
3400  * be ever incorporated in normal use.
3401  *
3402  * trace_array_printk() can be used, as it will not add noise to the
3403  * top level tracing buffer.
3404  *
3405  * Note, trace_array_init_printk() must be called on @tr before this
3406  * can be used.
3407  */
3408 __printf(3, 0)
3409 int trace_array_printk(struct trace_array *tr,
3410 		       unsigned long ip, const char *fmt, ...)
3411 {
3412 	int ret;
3413 	va_list ap;
3414 
3415 	if (!tr)
3416 		return -ENOENT;
3417 
3418 	/* This is only allowed for created instances */
3419 	if (tr == &global_trace)
3420 		return 0;
3421 
3422 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3423 		return 0;
3424 
3425 	va_start(ap, fmt);
3426 	ret = trace_array_vprintk(tr, ip, fmt, ap);
3427 	va_end(ap);
3428 	return ret;
3429 }
3430 EXPORT_SYMBOL_GPL(trace_array_printk);
3431 
3432 /**
3433  * trace_array_init_printk - Initialize buffers for trace_array_printk()
3434  * @tr: The trace array to initialize the buffers for
3435  *
3436  * As trace_array_printk() only writes into instances, they are OK to
3437  * have in the kernel (unlike trace_printk()). This needs to be called
3438  * before trace_array_printk() can be used on a trace_array.
3439  */
3440 int trace_array_init_printk(struct trace_array *tr)
3441 {
3442 	if (!tr)
3443 		return -ENOENT;
3444 
3445 	/* This is only allowed for created instances */
3446 	if (tr == &global_trace)
3447 		return -EINVAL;
3448 
3449 	return alloc_percpu_trace_buffer();
3450 }
3451 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3452 
3453 __printf(3, 4)
3454 int trace_array_printk_buf(struct trace_buffer *buffer,
3455 			   unsigned long ip, const char *fmt, ...)
3456 {
3457 	int ret;
3458 	va_list ap;
3459 
3460 	if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK))
3461 		return 0;
3462 
3463 	va_start(ap, fmt);
3464 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3465 	va_end(ap);
3466 	return ret;
3467 }
3468 
3469 __printf(2, 0)
3470 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3471 {
3472 	return trace_array_vprintk(printk_trace, ip, fmt, args);
3473 }
3474 EXPORT_SYMBOL_GPL(trace_vprintk);
3475 
3476 static void trace_iterator_increment(struct trace_iterator *iter)
3477 {
3478 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3479 
3480 	iter->idx++;
3481 	if (buf_iter)
3482 		ring_buffer_iter_advance(buf_iter);
3483 }
3484 
3485 static struct trace_entry *
3486 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3487 		unsigned long *lost_events)
3488 {
3489 	struct ring_buffer_event *event;
3490 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3491 
3492 	if (buf_iter) {
3493 		event = ring_buffer_iter_peek(buf_iter, ts);
3494 		if (lost_events)
3495 			*lost_events = ring_buffer_iter_dropped(buf_iter) ?
3496 				(unsigned long)-1 : 0;
3497 	} else {
3498 		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3499 					 lost_events);
3500 	}
3501 
3502 	if (event) {
3503 		iter->ent_size = ring_buffer_event_length(event);
3504 		return ring_buffer_event_data(event);
3505 	}
3506 	iter->ent_size = 0;
3507 	return NULL;
3508 }
3509 
3510 static struct trace_entry *
3511 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3512 		  unsigned long *missing_events, u64 *ent_ts)
3513 {
3514 	struct trace_buffer *buffer = iter->array_buffer->buffer;
3515 	struct trace_entry *ent, *next = NULL;
3516 	unsigned long lost_events = 0, next_lost = 0;
3517 	int cpu_file = iter->cpu_file;
3518 	u64 next_ts = 0, ts;
3519 	int next_cpu = -1;
3520 	int next_size = 0;
3521 	int cpu;
3522 
3523 	/*
3524 	 * If we are in a per_cpu trace file, don't bother by iterating over
3525 	 * all cpu and peek directly.
3526 	 */
3527 	if (cpu_file > RING_BUFFER_ALL_CPUS) {
3528 		if (ring_buffer_empty_cpu(buffer, cpu_file))
3529 			return NULL;
3530 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3531 		if (ent_cpu)
3532 			*ent_cpu = cpu_file;
3533 
3534 		return ent;
3535 	}
3536 
3537 	for_each_tracing_cpu(cpu) {
3538 
3539 		if (ring_buffer_empty_cpu(buffer, cpu))
3540 			continue;
3541 
3542 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3543 
3544 		/*
3545 		 * Pick the entry with the smallest timestamp:
3546 		 */
3547 		if (ent && (!next || ts < next_ts)) {
3548 			next = ent;
3549 			next_cpu = cpu;
3550 			next_ts = ts;
3551 			next_lost = lost_events;
3552 			next_size = iter->ent_size;
3553 		}
3554 	}
3555 
3556 	iter->ent_size = next_size;
3557 
3558 	if (ent_cpu)
3559 		*ent_cpu = next_cpu;
3560 
3561 	if (ent_ts)
3562 		*ent_ts = next_ts;
3563 
3564 	if (missing_events)
3565 		*missing_events = next_lost;
3566 
3567 	return next;
3568 }
3569 
3570 #define STATIC_FMT_BUF_SIZE	128
3571 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3572 
3573 char *trace_iter_expand_format(struct trace_iterator *iter)
3574 {
3575 	char *tmp;
3576 
3577 	/*
3578 	 * iter->tr is NULL when used with tp_printk, which makes
3579 	 * this get called where it is not safe to call krealloc().
3580 	 */
3581 	if (!iter->tr || iter->fmt == static_fmt_buf)
3582 		return NULL;
3583 
3584 	tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3585 		       GFP_KERNEL);
3586 	if (tmp) {
3587 		iter->fmt_size += STATIC_FMT_BUF_SIZE;
3588 		iter->fmt = tmp;
3589 	}
3590 
3591 	return tmp;
3592 }
3593 
3594 /* Returns true if the string is safe to dereference from an event */
3595 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3596 {
3597 	unsigned long addr = (unsigned long)str;
3598 	struct trace_event *trace_event;
3599 	struct trace_event_call *event;
3600 
3601 	/* OK if part of the event data */
3602 	if ((addr >= (unsigned long)iter->ent) &&
3603 	    (addr < (unsigned long)iter->ent + iter->ent_size))
3604 		return true;
3605 
3606 	/* OK if part of the temp seq buffer */
3607 	if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3608 	    (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
3609 		return true;
3610 
3611 	/* Core rodata can not be freed */
3612 	if (is_kernel_rodata(addr))
3613 		return true;
3614 
3615 	if (trace_is_tracepoint_string(str))
3616 		return true;
3617 
3618 	/*
3619 	 * Now this could be a module event, referencing core module
3620 	 * data, which is OK.
3621 	 */
3622 	if (!iter->ent)
3623 		return false;
3624 
3625 	trace_event = ftrace_find_event(iter->ent->type);
3626 	if (!trace_event)
3627 		return false;
3628 
3629 	event = container_of(trace_event, struct trace_event_call, event);
3630 	if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3631 		return false;
3632 
3633 	/* Would rather have rodata, but this will suffice */
3634 	if (within_module_core(addr, event->module))
3635 		return true;
3636 
3637 	return false;
3638 }
3639 
3640 /**
3641  * ignore_event - Check dereferenced fields while writing to the seq buffer
3642  * @iter: The iterator that holds the seq buffer and the event being printed
3643  *
3644  * At boot up, test_event_printk() will flag any event that dereferences
3645  * a string with "%s" that does exist in the ring buffer. It may still
3646  * be valid, as the string may point to a static string in the kernel
3647  * rodata that never gets freed. But if the string pointer is pointing
3648  * to something that was allocated, there's a chance that it can be freed
3649  * by the time the user reads the trace. This would cause a bad memory
3650  * access by the kernel and possibly crash the system.
3651  *
3652  * This function will check if the event has any fields flagged as needing
3653  * to be checked at runtime and perform those checks.
3654  *
3655  * If it is found that a field is unsafe, it will write into the @iter->seq
3656  * a message stating what was found to be unsafe.
3657  *
3658  * @return: true if the event is unsafe and should be ignored,
3659  *          false otherwise.
3660  */
3661 bool ignore_event(struct trace_iterator *iter)
3662 {
3663 	struct ftrace_event_field *field;
3664 	struct trace_event *trace_event;
3665 	struct trace_event_call *event;
3666 	struct list_head *head;
3667 	struct trace_seq *seq;
3668 	const void *ptr;
3669 
3670 	trace_event = ftrace_find_event(iter->ent->type);
3671 
3672 	seq = &iter->seq;
3673 
3674 	if (!trace_event) {
3675 		trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type);
3676 		return true;
3677 	}
3678 
3679 	event = container_of(trace_event, struct trace_event_call, event);
3680 	if (!(event->flags & TRACE_EVENT_FL_TEST_STR))
3681 		return false;
3682 
3683 	head = trace_get_fields(event);
3684 	if (!head) {
3685 		trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n",
3686 				 trace_event_name(event));
3687 		return true;
3688 	}
3689 
3690 	/* Offsets are from the iter->ent that points to the raw event */
3691 	ptr = iter->ent;
3692 
3693 	list_for_each_entry(field, head, link) {
3694 		const char *str;
3695 		bool good;
3696 
3697 		if (!field->needs_test)
3698 			continue;
3699 
3700 		str = *(const char **)(ptr + field->offset);
3701 
3702 		good = trace_safe_str(iter, str);
3703 
3704 		/*
3705 		 * If you hit this warning, it is likely that the
3706 		 * trace event in question used %s on a string that
3707 		 * was saved at the time of the event, but may not be
3708 		 * around when the trace is read. Use __string(),
3709 		 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3710 		 * instead. See samples/trace_events/trace-events-sample.h
3711 		 * for reference.
3712 		 */
3713 		if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'",
3714 			      trace_event_name(event), field->name)) {
3715 			trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n",
3716 					 trace_event_name(event), field->name);
3717 			return true;
3718 		}
3719 	}
3720 	return false;
3721 }
3722 
3723 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3724 {
3725 	const char *p, *new_fmt;
3726 	char *q;
3727 
3728 	if (WARN_ON_ONCE(!fmt))
3729 		return fmt;
3730 
3731 	if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3732 		return fmt;
3733 
3734 	p = fmt;
3735 	new_fmt = q = iter->fmt;
3736 	while (*p) {
3737 		if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3738 			if (!trace_iter_expand_format(iter))
3739 				return fmt;
3740 
3741 			q += iter->fmt - new_fmt;
3742 			new_fmt = iter->fmt;
3743 		}
3744 
3745 		*q++ = *p++;
3746 
3747 		/* Replace %p with %px */
3748 		if (p[-1] == '%') {
3749 			if (p[0] == '%') {
3750 				*q++ = *p++;
3751 			} else if (p[0] == 'p' && !isalnum(p[1])) {
3752 				*q++ = *p++;
3753 				*q++ = 'x';
3754 			}
3755 		}
3756 	}
3757 	*q = '\0';
3758 
3759 	return new_fmt;
3760 }
3761 
3762 #define STATIC_TEMP_BUF_SIZE	128
3763 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3764 
3765 /* Find the next real entry, without updating the iterator itself */
3766 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3767 					  int *ent_cpu, u64 *ent_ts)
3768 {
3769 	/* __find_next_entry will reset ent_size */
3770 	int ent_size = iter->ent_size;
3771 	struct trace_entry *entry;
3772 
3773 	/*
3774 	 * If called from ftrace_dump(), then the iter->temp buffer
3775 	 * will be the static_temp_buf and not created from kmalloc.
3776 	 * If the entry size is greater than the buffer, we can
3777 	 * not save it. Just return NULL in that case. This is only
3778 	 * used to add markers when two consecutive events' time
3779 	 * stamps have a large delta. See trace_print_lat_context()
3780 	 */
3781 	if (iter->temp == static_temp_buf &&
3782 	    STATIC_TEMP_BUF_SIZE < ent_size)
3783 		return NULL;
3784 
3785 	/*
3786 	 * The __find_next_entry() may call peek_next_entry(), which may
3787 	 * call ring_buffer_peek() that may make the contents of iter->ent
3788 	 * undefined. Need to copy iter->ent now.
3789 	 */
3790 	if (iter->ent && iter->ent != iter->temp) {
3791 		if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3792 		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3793 			void *temp;
3794 			temp = kmalloc(iter->ent_size, GFP_KERNEL);
3795 			if (!temp)
3796 				return NULL;
3797 			kfree(iter->temp);
3798 			iter->temp = temp;
3799 			iter->temp_size = iter->ent_size;
3800 		}
3801 		memcpy(iter->temp, iter->ent, iter->ent_size);
3802 		iter->ent = iter->temp;
3803 	}
3804 	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3805 	/* Put back the original ent_size */
3806 	iter->ent_size = ent_size;
3807 
3808 	return entry;
3809 }
3810 
3811 /* Find the next real entry, and increment the iterator to the next entry */
3812 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3813 {
3814 	iter->ent = __find_next_entry(iter, &iter->cpu,
3815 				      &iter->lost_events, &iter->ts);
3816 
3817 	if (iter->ent)
3818 		trace_iterator_increment(iter);
3819 
3820 	return iter->ent ? iter : NULL;
3821 }
3822 
3823 static void trace_consume(struct trace_iterator *iter)
3824 {
3825 	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3826 			    &iter->lost_events);
3827 }
3828 
3829 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3830 {
3831 	struct trace_iterator *iter = m->private;
3832 	int i = (int)*pos;
3833 	void *ent;
3834 
3835 	WARN_ON_ONCE(iter->leftover);
3836 
3837 	(*pos)++;
3838 
3839 	/* can't go backwards */
3840 	if (iter->idx > i)
3841 		return NULL;
3842 
3843 	if (iter->idx < 0)
3844 		ent = trace_find_next_entry_inc(iter);
3845 	else
3846 		ent = iter;
3847 
3848 	while (ent && iter->idx < i)
3849 		ent = trace_find_next_entry_inc(iter);
3850 
3851 	iter->pos = *pos;
3852 
3853 	return ent;
3854 }
3855 
3856 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3857 {
3858 	struct ring_buffer_iter *buf_iter;
3859 	unsigned long entries = 0;
3860 	u64 ts;
3861 
3862 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3863 
3864 	buf_iter = trace_buffer_iter(iter, cpu);
3865 	if (!buf_iter)
3866 		return;
3867 
3868 	ring_buffer_iter_reset(buf_iter);
3869 
3870 	/*
3871 	 * We could have the case with the max latency tracers
3872 	 * that a reset never took place on a cpu. This is evident
3873 	 * by the timestamp being before the start of the buffer.
3874 	 */
3875 	while (ring_buffer_iter_peek(buf_iter, &ts)) {
3876 		if (ts >= iter->array_buffer->time_start)
3877 			break;
3878 		entries++;
3879 		ring_buffer_iter_advance(buf_iter);
3880 		/* This could be a big loop */
3881 		cond_resched();
3882 	}
3883 
3884 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3885 }
3886 
3887 /*
3888  * The current tracer is copied to avoid a global locking
3889  * all around.
3890  */
3891 static void *s_start(struct seq_file *m, loff_t *pos)
3892 {
3893 	struct trace_iterator *iter = m->private;
3894 	struct trace_array *tr = iter->tr;
3895 	int cpu_file = iter->cpu_file;
3896 	void *p = NULL;
3897 	loff_t l = 0;
3898 	int cpu;
3899 
3900 	mutex_lock(&trace_types_lock);
3901 	if (unlikely(tr->current_trace != iter->trace)) {
3902 		/* Close iter->trace before switching to the new current tracer */
3903 		if (iter->trace->close)
3904 			iter->trace->close(iter);
3905 		iter->trace = tr->current_trace;
3906 		/* Reopen the new current tracer */
3907 		if (iter->trace->open)
3908 			iter->trace->open(iter);
3909 	}
3910 	mutex_unlock(&trace_types_lock);
3911 
3912 #ifdef CONFIG_TRACER_MAX_TRACE
3913 	if (iter->snapshot && iter->trace->use_max_tr)
3914 		return ERR_PTR(-EBUSY);
3915 #endif
3916 
3917 	if (*pos != iter->pos) {
3918 		iter->ent = NULL;
3919 		iter->cpu = 0;
3920 		iter->idx = -1;
3921 
3922 		if (cpu_file == RING_BUFFER_ALL_CPUS) {
3923 			for_each_tracing_cpu(cpu)
3924 				tracing_iter_reset(iter, cpu);
3925 		} else
3926 			tracing_iter_reset(iter, cpu_file);
3927 
3928 		iter->leftover = 0;
3929 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3930 			;
3931 
3932 	} else {
3933 		/*
3934 		 * If we overflowed the seq_file before, then we want
3935 		 * to just reuse the trace_seq buffer again.
3936 		 */
3937 		if (iter->leftover)
3938 			p = iter;
3939 		else {
3940 			l = *pos - 1;
3941 			p = s_next(m, p, &l);
3942 		}
3943 	}
3944 
3945 	trace_event_read_lock();
3946 	trace_access_lock(cpu_file);
3947 	return p;
3948 }
3949 
3950 static void s_stop(struct seq_file *m, void *p)
3951 {
3952 	struct trace_iterator *iter = m->private;
3953 
3954 #ifdef CONFIG_TRACER_MAX_TRACE
3955 	if (iter->snapshot && iter->trace->use_max_tr)
3956 		return;
3957 #endif
3958 
3959 	trace_access_unlock(iter->cpu_file);
3960 	trace_event_read_unlock();
3961 }
3962 
3963 static void
3964 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3965 		      unsigned long *entries, int cpu)
3966 {
3967 	unsigned long count;
3968 
3969 	count = ring_buffer_entries_cpu(buf->buffer, cpu);
3970 	/*
3971 	 * If this buffer has skipped entries, then we hold all
3972 	 * entries for the trace and we need to ignore the
3973 	 * ones before the time stamp.
3974 	 */
3975 	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3976 		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3977 		/* total is the same as the entries */
3978 		*total = count;
3979 	} else
3980 		*total = count +
3981 			ring_buffer_overrun_cpu(buf->buffer, cpu);
3982 	*entries = count;
3983 }
3984 
3985 static void
3986 get_total_entries(struct array_buffer *buf,
3987 		  unsigned long *total, unsigned long *entries)
3988 {
3989 	unsigned long t, e;
3990 	int cpu;
3991 
3992 	*total = 0;
3993 	*entries = 0;
3994 
3995 	for_each_tracing_cpu(cpu) {
3996 		get_total_entries_cpu(buf, &t, &e, cpu);
3997 		*total += t;
3998 		*entries += e;
3999 	}
4000 }
4001 
4002 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4003 {
4004 	unsigned long total, entries;
4005 
4006 	if (!tr)
4007 		tr = &global_trace;
4008 
4009 	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4010 
4011 	return entries;
4012 }
4013 
4014 unsigned long trace_total_entries(struct trace_array *tr)
4015 {
4016 	unsigned long total, entries;
4017 
4018 	if (!tr)
4019 		tr = &global_trace;
4020 
4021 	get_total_entries(&tr->array_buffer, &total, &entries);
4022 
4023 	return entries;
4024 }
4025 
4026 static void print_lat_help_header(struct seq_file *m)
4027 {
4028 	seq_puts(m, "#                    _------=> CPU#            \n"
4029 		    "#                   / _-----=> irqs-off/BH-disabled\n"
4030 		    "#                  | / _----=> need-resched    \n"
4031 		    "#                  || / _---=> hardirq/softirq \n"
4032 		    "#                  ||| / _--=> preempt-depth   \n"
4033 		    "#                  |||| / _-=> migrate-disable \n"
4034 		    "#                  ||||| /     delay           \n"
4035 		    "#  cmd     pid     |||||| time  |   caller     \n"
4036 		    "#     \\   /        ||||||  \\    |    /       \n");
4037 }
4038 
4039 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4040 {
4041 	unsigned long total;
4042 	unsigned long entries;
4043 
4044 	get_total_entries(buf, &total, &entries);
4045 	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
4046 		   entries, total, num_online_cpus());
4047 	seq_puts(m, "#\n");
4048 }
4049 
4050 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4051 				   unsigned int flags)
4052 {
4053 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4054 
4055 	print_event_info(buf, m);
4056 
4057 	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
4058 	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
4059 }
4060 
4061 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4062 				       unsigned int flags)
4063 {
4064 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4065 	static const char space[] = "            ";
4066 	int prec = tgid ? 12 : 2;
4067 
4068 	print_event_info(buf, m);
4069 
4070 	seq_printf(m, "#                            %.*s  _-----=> irqs-off/BH-disabled\n", prec, space);
4071 	seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
4072 	seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
4073 	seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
4074 	seq_printf(m, "#                            %.*s||| / _-=> migrate-disable\n", prec, space);
4075 	seq_printf(m, "#                            %.*s|||| /     delay\n", prec, space);
4076 	seq_printf(m, "#           TASK-PID  %.*s CPU#  |||||  TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
4077 	seq_printf(m, "#              | |    %.*s   |   |||||     |         |\n", prec, "       |    ");
4078 }
4079 
4080 void
4081 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4082 {
4083 	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4084 	struct array_buffer *buf = iter->array_buffer;
4085 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4086 	struct tracer *type = iter->trace;
4087 	unsigned long entries;
4088 	unsigned long total;
4089 	const char *name = type->name;
4090 
4091 	get_total_entries(buf, &total, &entries);
4092 
4093 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4094 		   name, init_utsname()->release);
4095 	seq_puts(m, "# -----------------------------------"
4096 		 "---------------------------------\n");
4097 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4098 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4099 		   nsecs_to_usecs(data->saved_latency),
4100 		   entries,
4101 		   total,
4102 		   buf->cpu,
4103 		   preempt_model_none()      ? "server" :
4104 		   preempt_model_voluntary() ? "desktop" :
4105 		   preempt_model_full()      ? "preempt" :
4106 		   preempt_model_rt()        ? "preempt_rt" :
4107 		   "unknown",
4108 		   /* These are reserved for later use */
4109 		   0, 0, 0, 0);
4110 #ifdef CONFIG_SMP
4111 	seq_printf(m, " #P:%d)\n", num_online_cpus());
4112 #else
4113 	seq_puts(m, ")\n");
4114 #endif
4115 	seq_puts(m, "#    -----------------\n");
4116 	seq_printf(m, "#    | task: %.16s-%d "
4117 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4118 		   data->comm, data->pid,
4119 		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4120 		   data->policy, data->rt_priority);
4121 	seq_puts(m, "#    -----------------\n");
4122 
4123 	if (data->critical_start) {
4124 		seq_puts(m, "#  => started at: ");
4125 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4126 		trace_print_seq(m, &iter->seq);
4127 		seq_puts(m, "\n#  => ended at:   ");
4128 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4129 		trace_print_seq(m, &iter->seq);
4130 		seq_puts(m, "\n#\n");
4131 	}
4132 
4133 	seq_puts(m, "#\n");
4134 }
4135 
4136 static void test_cpu_buff_start(struct trace_iterator *iter)
4137 {
4138 	struct trace_seq *s = &iter->seq;
4139 	struct trace_array *tr = iter->tr;
4140 
4141 	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4142 		return;
4143 
4144 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4145 		return;
4146 
4147 	if (cpumask_available(iter->started) &&
4148 	    cpumask_test_cpu(iter->cpu, iter->started))
4149 		return;
4150 
4151 	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4152 		return;
4153 
4154 	if (cpumask_available(iter->started))
4155 		cpumask_set_cpu(iter->cpu, iter->started);
4156 
4157 	/* Don't print started cpu buffer for the first entry of the trace */
4158 	if (iter->idx > 1)
4159 		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4160 				iter->cpu);
4161 }
4162 
4163 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4164 {
4165 	struct trace_array *tr = iter->tr;
4166 	struct trace_seq *s = &iter->seq;
4167 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4168 	struct trace_entry *entry;
4169 	struct trace_event *event;
4170 
4171 	entry = iter->ent;
4172 
4173 	test_cpu_buff_start(iter);
4174 
4175 	event = ftrace_find_event(entry->type);
4176 
4177 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4178 		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4179 			trace_print_lat_context(iter);
4180 		else
4181 			trace_print_context(iter);
4182 	}
4183 
4184 	if (trace_seq_has_overflowed(s))
4185 		return TRACE_TYPE_PARTIAL_LINE;
4186 
4187 	if (event) {
4188 		if (tr->trace_flags & TRACE_ITER_FIELDS)
4189 			return print_event_fields(iter, event);
4190 		/*
4191 		 * For TRACE_EVENT() events, the print_fmt is not
4192 		 * safe to use if the array has delta offsets
4193 		 * Force printing via the fields.
4194 		 */
4195 		if ((tr->text_delta || tr->data_delta) &&
4196 		    event->type > __TRACE_LAST_TYPE)
4197 			return print_event_fields(iter, event);
4198 
4199 		return event->funcs->trace(iter, sym_flags, event);
4200 	}
4201 
4202 	trace_seq_printf(s, "Unknown type %d\n", entry->type);
4203 
4204 	return trace_handle_return(s);
4205 }
4206 
4207 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4208 {
4209 	struct trace_array *tr = iter->tr;
4210 	struct trace_seq *s = &iter->seq;
4211 	struct trace_entry *entry;
4212 	struct trace_event *event;
4213 
4214 	entry = iter->ent;
4215 
4216 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4217 		trace_seq_printf(s, "%d %d %llu ",
4218 				 entry->pid, iter->cpu, iter->ts);
4219 
4220 	if (trace_seq_has_overflowed(s))
4221 		return TRACE_TYPE_PARTIAL_LINE;
4222 
4223 	event = ftrace_find_event(entry->type);
4224 	if (event)
4225 		return event->funcs->raw(iter, 0, event);
4226 
4227 	trace_seq_printf(s, "%d ?\n", entry->type);
4228 
4229 	return trace_handle_return(s);
4230 }
4231 
4232 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4233 {
4234 	struct trace_array *tr = iter->tr;
4235 	struct trace_seq *s = &iter->seq;
4236 	unsigned char newline = '\n';
4237 	struct trace_entry *entry;
4238 	struct trace_event *event;
4239 
4240 	entry = iter->ent;
4241 
4242 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4243 		SEQ_PUT_HEX_FIELD(s, entry->pid);
4244 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
4245 		SEQ_PUT_HEX_FIELD(s, iter->ts);
4246 		if (trace_seq_has_overflowed(s))
4247 			return TRACE_TYPE_PARTIAL_LINE;
4248 	}
4249 
4250 	event = ftrace_find_event(entry->type);
4251 	if (event) {
4252 		enum print_line_t ret = event->funcs->hex(iter, 0, event);
4253 		if (ret != TRACE_TYPE_HANDLED)
4254 			return ret;
4255 	}
4256 
4257 	SEQ_PUT_FIELD(s, newline);
4258 
4259 	return trace_handle_return(s);
4260 }
4261 
4262 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4263 {
4264 	struct trace_array *tr = iter->tr;
4265 	struct trace_seq *s = &iter->seq;
4266 	struct trace_entry *entry;
4267 	struct trace_event *event;
4268 
4269 	entry = iter->ent;
4270 
4271 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4272 		SEQ_PUT_FIELD(s, entry->pid);
4273 		SEQ_PUT_FIELD(s, iter->cpu);
4274 		SEQ_PUT_FIELD(s, iter->ts);
4275 		if (trace_seq_has_overflowed(s))
4276 			return TRACE_TYPE_PARTIAL_LINE;
4277 	}
4278 
4279 	event = ftrace_find_event(entry->type);
4280 	return event ? event->funcs->binary(iter, 0, event) :
4281 		TRACE_TYPE_HANDLED;
4282 }
4283 
4284 int trace_empty(struct trace_iterator *iter)
4285 {
4286 	struct ring_buffer_iter *buf_iter;
4287 	int cpu;
4288 
4289 	/* If we are looking at one CPU buffer, only check that one */
4290 	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4291 		cpu = iter->cpu_file;
4292 		buf_iter = trace_buffer_iter(iter, cpu);
4293 		if (buf_iter) {
4294 			if (!ring_buffer_iter_empty(buf_iter))
4295 				return 0;
4296 		} else {
4297 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4298 				return 0;
4299 		}
4300 		return 1;
4301 	}
4302 
4303 	for_each_tracing_cpu(cpu) {
4304 		buf_iter = trace_buffer_iter(iter, cpu);
4305 		if (buf_iter) {
4306 			if (!ring_buffer_iter_empty(buf_iter))
4307 				return 0;
4308 		} else {
4309 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4310 				return 0;
4311 		}
4312 	}
4313 
4314 	return 1;
4315 }
4316 
4317 /*  Called with trace_event_read_lock() held. */
4318 enum print_line_t print_trace_line(struct trace_iterator *iter)
4319 {
4320 	struct trace_array *tr = iter->tr;
4321 	unsigned long trace_flags = tr->trace_flags;
4322 	enum print_line_t ret;
4323 
4324 	if (iter->lost_events) {
4325 		if (iter->lost_events == (unsigned long)-1)
4326 			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4327 					 iter->cpu);
4328 		else
4329 			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4330 					 iter->cpu, iter->lost_events);
4331 		if (trace_seq_has_overflowed(&iter->seq))
4332 			return TRACE_TYPE_PARTIAL_LINE;
4333 	}
4334 
4335 	if (iter->trace && iter->trace->print_line) {
4336 		ret = iter->trace->print_line(iter);
4337 		if (ret != TRACE_TYPE_UNHANDLED)
4338 			return ret;
4339 	}
4340 
4341 	if (iter->ent->type == TRACE_BPUTS &&
4342 			trace_flags & TRACE_ITER_PRINTK &&
4343 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4344 		return trace_print_bputs_msg_only(iter);
4345 
4346 	if (iter->ent->type == TRACE_BPRINT &&
4347 			trace_flags & TRACE_ITER_PRINTK &&
4348 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4349 		return trace_print_bprintk_msg_only(iter);
4350 
4351 	if (iter->ent->type == TRACE_PRINT &&
4352 			trace_flags & TRACE_ITER_PRINTK &&
4353 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4354 		return trace_print_printk_msg_only(iter);
4355 
4356 	if (trace_flags & TRACE_ITER_BIN)
4357 		return print_bin_fmt(iter);
4358 
4359 	if (trace_flags & TRACE_ITER_HEX)
4360 		return print_hex_fmt(iter);
4361 
4362 	if (trace_flags & TRACE_ITER_RAW)
4363 		return print_raw_fmt(iter);
4364 
4365 	return print_trace_fmt(iter);
4366 }
4367 
4368 void trace_latency_header(struct seq_file *m)
4369 {
4370 	struct trace_iterator *iter = m->private;
4371 	struct trace_array *tr = iter->tr;
4372 
4373 	/* print nothing if the buffers are empty */
4374 	if (trace_empty(iter))
4375 		return;
4376 
4377 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4378 		print_trace_header(m, iter);
4379 
4380 	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4381 		print_lat_help_header(m);
4382 }
4383 
4384 void trace_default_header(struct seq_file *m)
4385 {
4386 	struct trace_iterator *iter = m->private;
4387 	struct trace_array *tr = iter->tr;
4388 	unsigned long trace_flags = tr->trace_flags;
4389 
4390 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4391 		return;
4392 
4393 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4394 		/* print nothing if the buffers are empty */
4395 		if (trace_empty(iter))
4396 			return;
4397 		print_trace_header(m, iter);
4398 		if (!(trace_flags & TRACE_ITER_VERBOSE))
4399 			print_lat_help_header(m);
4400 	} else {
4401 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4402 			if (trace_flags & TRACE_ITER_IRQ_INFO)
4403 				print_func_help_header_irq(iter->array_buffer,
4404 							   m, trace_flags);
4405 			else
4406 				print_func_help_header(iter->array_buffer, m,
4407 						       trace_flags);
4408 		}
4409 	}
4410 }
4411 
4412 static void test_ftrace_alive(struct seq_file *m)
4413 {
4414 	if (!ftrace_is_dead())
4415 		return;
4416 	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4417 		    "#          MAY BE MISSING FUNCTION EVENTS\n");
4418 }
4419 
4420 #ifdef CONFIG_TRACER_MAX_TRACE
4421 static void show_snapshot_main_help(struct seq_file *m)
4422 {
4423 	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4424 		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4425 		    "#                      Takes a snapshot of the main buffer.\n"
4426 		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4427 		    "#                      (Doesn't have to be '2' works with any number that\n"
4428 		    "#                       is not a '0' or '1')\n");
4429 }
4430 
4431 static void show_snapshot_percpu_help(struct seq_file *m)
4432 {
4433 	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4434 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4435 	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4436 		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
4437 #else
4438 	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4439 		    "#                     Must use main snapshot file to allocate.\n");
4440 #endif
4441 	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4442 		    "#                      (Doesn't have to be '2' works with any number that\n"
4443 		    "#                       is not a '0' or '1')\n");
4444 }
4445 
4446 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4447 {
4448 	if (iter->tr->allocated_snapshot)
4449 		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4450 	else
4451 		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4452 
4453 	seq_puts(m, "# Snapshot commands:\n");
4454 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4455 		show_snapshot_main_help(m);
4456 	else
4457 		show_snapshot_percpu_help(m);
4458 }
4459 #else
4460 /* Should never be called */
4461 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4462 #endif
4463 
4464 static int s_show(struct seq_file *m, void *v)
4465 {
4466 	struct trace_iterator *iter = v;
4467 	int ret;
4468 
4469 	if (iter->ent == NULL) {
4470 		if (iter->tr) {
4471 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
4472 			seq_puts(m, "#\n");
4473 			test_ftrace_alive(m);
4474 		}
4475 		if (iter->snapshot && trace_empty(iter))
4476 			print_snapshot_help(m, iter);
4477 		else if (iter->trace && iter->trace->print_header)
4478 			iter->trace->print_header(m);
4479 		else
4480 			trace_default_header(m);
4481 
4482 	} else if (iter->leftover) {
4483 		/*
4484 		 * If we filled the seq_file buffer earlier, we
4485 		 * want to just show it now.
4486 		 */
4487 		ret = trace_print_seq(m, &iter->seq);
4488 
4489 		/* ret should this time be zero, but you never know */
4490 		iter->leftover = ret;
4491 
4492 	} else {
4493 		ret = print_trace_line(iter);
4494 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4495 			iter->seq.full = 0;
4496 			trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4497 		}
4498 		ret = trace_print_seq(m, &iter->seq);
4499 		/*
4500 		 * If we overflow the seq_file buffer, then it will
4501 		 * ask us for this data again at start up.
4502 		 * Use that instead.
4503 		 *  ret is 0 if seq_file write succeeded.
4504 		 *        -1 otherwise.
4505 		 */
4506 		iter->leftover = ret;
4507 	}
4508 
4509 	return 0;
4510 }
4511 
4512 /*
4513  * Should be used after trace_array_get(), trace_types_lock
4514  * ensures that i_cdev was already initialized.
4515  */
4516 static inline int tracing_get_cpu(struct inode *inode)
4517 {
4518 	if (inode->i_cdev) /* See trace_create_cpu_file() */
4519 		return (long)inode->i_cdev - 1;
4520 	return RING_BUFFER_ALL_CPUS;
4521 }
4522 
4523 static const struct seq_operations tracer_seq_ops = {
4524 	.start		= s_start,
4525 	.next		= s_next,
4526 	.stop		= s_stop,
4527 	.show		= s_show,
4528 };
4529 
4530 /*
4531  * Note, as iter itself can be allocated and freed in different
4532  * ways, this function is only used to free its content, and not
4533  * the iterator itself. The only requirement to all the allocations
4534  * is that it must zero all fields (kzalloc), as freeing works with
4535  * ethier allocated content or NULL.
4536  */
4537 static void free_trace_iter_content(struct trace_iterator *iter)
4538 {
4539 	/* The fmt is either NULL, allocated or points to static_fmt_buf */
4540 	if (iter->fmt != static_fmt_buf)
4541 		kfree(iter->fmt);
4542 
4543 	kfree(iter->temp);
4544 	kfree(iter->buffer_iter);
4545 	mutex_destroy(&iter->mutex);
4546 	free_cpumask_var(iter->started);
4547 }
4548 
4549 static struct trace_iterator *
4550 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4551 {
4552 	struct trace_array *tr = inode->i_private;
4553 	struct trace_iterator *iter;
4554 	int cpu;
4555 
4556 	if (tracing_disabled)
4557 		return ERR_PTR(-ENODEV);
4558 
4559 	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4560 	if (!iter)
4561 		return ERR_PTR(-ENOMEM);
4562 
4563 	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4564 				    GFP_KERNEL);
4565 	if (!iter->buffer_iter)
4566 		goto release;
4567 
4568 	/*
4569 	 * trace_find_next_entry() may need to save off iter->ent.
4570 	 * It will place it into the iter->temp buffer. As most
4571 	 * events are less than 128, allocate a buffer of that size.
4572 	 * If one is greater, then trace_find_next_entry() will
4573 	 * allocate a new buffer to adjust for the bigger iter->ent.
4574 	 * It's not critical if it fails to get allocated here.
4575 	 */
4576 	iter->temp = kmalloc(128, GFP_KERNEL);
4577 	if (iter->temp)
4578 		iter->temp_size = 128;
4579 
4580 	/*
4581 	 * trace_event_printf() may need to modify given format
4582 	 * string to replace %p with %px so that it shows real address
4583 	 * instead of hash value. However, that is only for the event
4584 	 * tracing, other tracer may not need. Defer the allocation
4585 	 * until it is needed.
4586 	 */
4587 	iter->fmt = NULL;
4588 	iter->fmt_size = 0;
4589 
4590 	mutex_lock(&trace_types_lock);
4591 	iter->trace = tr->current_trace;
4592 
4593 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4594 		goto fail;
4595 
4596 	iter->tr = tr;
4597 
4598 #ifdef CONFIG_TRACER_MAX_TRACE
4599 	/* Currently only the top directory has a snapshot */
4600 	if (tr->current_trace->print_max || snapshot)
4601 		iter->array_buffer = &tr->max_buffer;
4602 	else
4603 #endif
4604 		iter->array_buffer = &tr->array_buffer;
4605 	iter->snapshot = snapshot;
4606 	iter->pos = -1;
4607 	iter->cpu_file = tracing_get_cpu(inode);
4608 	mutex_init(&iter->mutex);
4609 
4610 	/* Notify the tracer early; before we stop tracing. */
4611 	if (iter->trace->open)
4612 		iter->trace->open(iter);
4613 
4614 	/* Annotate start of buffers if we had overruns */
4615 	if (ring_buffer_overruns(iter->array_buffer->buffer))
4616 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
4617 
4618 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4619 	if (trace_clocks[tr->clock_id].in_ns)
4620 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4621 
4622 	/*
4623 	 * If pause-on-trace is enabled, then stop the trace while
4624 	 * dumping, unless this is the "snapshot" file
4625 	 */
4626 	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4627 		tracing_stop_tr(tr);
4628 
4629 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4630 		for_each_tracing_cpu(cpu) {
4631 			iter->buffer_iter[cpu] =
4632 				ring_buffer_read_prepare(iter->array_buffer->buffer,
4633 							 cpu, GFP_KERNEL);
4634 		}
4635 		ring_buffer_read_prepare_sync();
4636 		for_each_tracing_cpu(cpu) {
4637 			ring_buffer_read_start(iter->buffer_iter[cpu]);
4638 			tracing_iter_reset(iter, cpu);
4639 		}
4640 	} else {
4641 		cpu = iter->cpu_file;
4642 		iter->buffer_iter[cpu] =
4643 			ring_buffer_read_prepare(iter->array_buffer->buffer,
4644 						 cpu, GFP_KERNEL);
4645 		ring_buffer_read_prepare_sync();
4646 		ring_buffer_read_start(iter->buffer_iter[cpu]);
4647 		tracing_iter_reset(iter, cpu);
4648 	}
4649 
4650 	mutex_unlock(&trace_types_lock);
4651 
4652 	return iter;
4653 
4654  fail:
4655 	mutex_unlock(&trace_types_lock);
4656 	free_trace_iter_content(iter);
4657 release:
4658 	seq_release_private(inode, file);
4659 	return ERR_PTR(-ENOMEM);
4660 }
4661 
4662 int tracing_open_generic(struct inode *inode, struct file *filp)
4663 {
4664 	int ret;
4665 
4666 	ret = tracing_check_open_get_tr(NULL);
4667 	if (ret)
4668 		return ret;
4669 
4670 	filp->private_data = inode->i_private;
4671 	return 0;
4672 }
4673 
4674 bool tracing_is_disabled(void)
4675 {
4676 	return (tracing_disabled) ? true: false;
4677 }
4678 
4679 /*
4680  * Open and update trace_array ref count.
4681  * Must have the current trace_array passed to it.
4682  */
4683 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4684 {
4685 	struct trace_array *tr = inode->i_private;
4686 	int ret;
4687 
4688 	ret = tracing_check_open_get_tr(tr);
4689 	if (ret)
4690 		return ret;
4691 
4692 	filp->private_data = inode->i_private;
4693 
4694 	return 0;
4695 }
4696 
4697 /*
4698  * The private pointer of the inode is the trace_event_file.
4699  * Update the tr ref count associated to it.
4700  */
4701 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4702 {
4703 	struct trace_event_file *file = inode->i_private;
4704 	int ret;
4705 
4706 	ret = tracing_check_open_get_tr(file->tr);
4707 	if (ret)
4708 		return ret;
4709 
4710 	mutex_lock(&event_mutex);
4711 
4712 	/* Fail if the file is marked for removal */
4713 	if (file->flags & EVENT_FILE_FL_FREED) {
4714 		trace_array_put(file->tr);
4715 		ret = -ENODEV;
4716 	} else {
4717 		event_file_get(file);
4718 	}
4719 
4720 	mutex_unlock(&event_mutex);
4721 	if (ret)
4722 		return ret;
4723 
4724 	filp->private_data = inode->i_private;
4725 
4726 	return 0;
4727 }
4728 
4729 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4730 {
4731 	struct trace_event_file *file = inode->i_private;
4732 
4733 	trace_array_put(file->tr);
4734 	event_file_put(file);
4735 
4736 	return 0;
4737 }
4738 
4739 int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
4740 {
4741 	tracing_release_file_tr(inode, filp);
4742 	return single_release(inode, filp);
4743 }
4744 
4745 static int tracing_mark_open(struct inode *inode, struct file *filp)
4746 {
4747 	stream_open(inode, filp);
4748 	return tracing_open_generic_tr(inode, filp);
4749 }
4750 
4751 static int tracing_release(struct inode *inode, struct file *file)
4752 {
4753 	struct trace_array *tr = inode->i_private;
4754 	struct seq_file *m = file->private_data;
4755 	struct trace_iterator *iter;
4756 	int cpu;
4757 
4758 	if (!(file->f_mode & FMODE_READ)) {
4759 		trace_array_put(tr);
4760 		return 0;
4761 	}
4762 
4763 	/* Writes do not use seq_file */
4764 	iter = m->private;
4765 	mutex_lock(&trace_types_lock);
4766 
4767 	for_each_tracing_cpu(cpu) {
4768 		if (iter->buffer_iter[cpu])
4769 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
4770 	}
4771 
4772 	if (iter->trace && iter->trace->close)
4773 		iter->trace->close(iter);
4774 
4775 	if (!iter->snapshot && tr->stop_count)
4776 		/* reenable tracing if it was previously enabled */
4777 		tracing_start_tr(tr);
4778 
4779 	__trace_array_put(tr);
4780 
4781 	mutex_unlock(&trace_types_lock);
4782 
4783 	free_trace_iter_content(iter);
4784 	seq_release_private(inode, file);
4785 
4786 	return 0;
4787 }
4788 
4789 int tracing_release_generic_tr(struct inode *inode, struct file *file)
4790 {
4791 	struct trace_array *tr = inode->i_private;
4792 
4793 	trace_array_put(tr);
4794 	return 0;
4795 }
4796 
4797 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4798 {
4799 	struct trace_array *tr = inode->i_private;
4800 
4801 	trace_array_put(tr);
4802 
4803 	return single_release(inode, file);
4804 }
4805 
4806 static int tracing_open(struct inode *inode, struct file *file)
4807 {
4808 	struct trace_array *tr = inode->i_private;
4809 	struct trace_iterator *iter;
4810 	int ret;
4811 
4812 	ret = tracing_check_open_get_tr(tr);
4813 	if (ret)
4814 		return ret;
4815 
4816 	/* If this file was open for write, then erase contents */
4817 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4818 		int cpu = tracing_get_cpu(inode);
4819 		struct array_buffer *trace_buf = &tr->array_buffer;
4820 
4821 #ifdef CONFIG_TRACER_MAX_TRACE
4822 		if (tr->current_trace->print_max)
4823 			trace_buf = &tr->max_buffer;
4824 #endif
4825 
4826 		if (cpu == RING_BUFFER_ALL_CPUS)
4827 			tracing_reset_online_cpus(trace_buf);
4828 		else
4829 			tracing_reset_cpu(trace_buf, cpu);
4830 	}
4831 
4832 	if (file->f_mode & FMODE_READ) {
4833 		iter = __tracing_open(inode, file, false);
4834 		if (IS_ERR(iter))
4835 			ret = PTR_ERR(iter);
4836 		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4837 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
4838 	}
4839 
4840 	if (ret < 0)
4841 		trace_array_put(tr);
4842 
4843 	return ret;
4844 }
4845 
4846 /*
4847  * Some tracers are not suitable for instance buffers.
4848  * A tracer is always available for the global array (toplevel)
4849  * or if it explicitly states that it is.
4850  */
4851 static bool
4852 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4853 {
4854 #ifdef CONFIG_TRACER_SNAPSHOT
4855 	/* arrays with mapped buffer range do not have snapshots */
4856 	if (tr->range_addr_start && t->use_max_tr)
4857 		return false;
4858 #endif
4859 	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4860 }
4861 
4862 /* Find the next tracer that this trace array may use */
4863 static struct tracer *
4864 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4865 {
4866 	while (t && !trace_ok_for_array(t, tr))
4867 		t = t->next;
4868 
4869 	return t;
4870 }
4871 
4872 static void *
4873 t_next(struct seq_file *m, void *v, loff_t *pos)
4874 {
4875 	struct trace_array *tr = m->private;
4876 	struct tracer *t = v;
4877 
4878 	(*pos)++;
4879 
4880 	if (t)
4881 		t = get_tracer_for_array(tr, t->next);
4882 
4883 	return t;
4884 }
4885 
4886 static void *t_start(struct seq_file *m, loff_t *pos)
4887 {
4888 	struct trace_array *tr = m->private;
4889 	struct tracer *t;
4890 	loff_t l = 0;
4891 
4892 	mutex_lock(&trace_types_lock);
4893 
4894 	t = get_tracer_for_array(tr, trace_types);
4895 	for (; t && l < *pos; t = t_next(m, t, &l))
4896 			;
4897 
4898 	return t;
4899 }
4900 
4901 static void t_stop(struct seq_file *m, void *p)
4902 {
4903 	mutex_unlock(&trace_types_lock);
4904 }
4905 
4906 static int t_show(struct seq_file *m, void *v)
4907 {
4908 	struct tracer *t = v;
4909 
4910 	if (!t)
4911 		return 0;
4912 
4913 	seq_puts(m, t->name);
4914 	if (t->next)
4915 		seq_putc(m, ' ');
4916 	else
4917 		seq_putc(m, '\n');
4918 
4919 	return 0;
4920 }
4921 
4922 static const struct seq_operations show_traces_seq_ops = {
4923 	.start		= t_start,
4924 	.next		= t_next,
4925 	.stop		= t_stop,
4926 	.show		= t_show,
4927 };
4928 
4929 static int show_traces_open(struct inode *inode, struct file *file)
4930 {
4931 	struct trace_array *tr = inode->i_private;
4932 	struct seq_file *m;
4933 	int ret;
4934 
4935 	ret = tracing_check_open_get_tr(tr);
4936 	if (ret)
4937 		return ret;
4938 
4939 	ret = seq_open(file, &show_traces_seq_ops);
4940 	if (ret) {
4941 		trace_array_put(tr);
4942 		return ret;
4943 	}
4944 
4945 	m = file->private_data;
4946 	m->private = tr;
4947 
4948 	return 0;
4949 }
4950 
4951 static int tracing_seq_release(struct inode *inode, struct file *file)
4952 {
4953 	struct trace_array *tr = inode->i_private;
4954 
4955 	trace_array_put(tr);
4956 	return seq_release(inode, file);
4957 }
4958 
4959 static ssize_t
4960 tracing_write_stub(struct file *filp, const char __user *ubuf,
4961 		   size_t count, loff_t *ppos)
4962 {
4963 	return count;
4964 }
4965 
4966 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4967 {
4968 	int ret;
4969 
4970 	if (file->f_mode & FMODE_READ)
4971 		ret = seq_lseek(file, offset, whence);
4972 	else
4973 		file->f_pos = ret = 0;
4974 
4975 	return ret;
4976 }
4977 
4978 static const struct file_operations tracing_fops = {
4979 	.open		= tracing_open,
4980 	.read		= seq_read,
4981 	.read_iter	= seq_read_iter,
4982 	.splice_read	= copy_splice_read,
4983 	.write		= tracing_write_stub,
4984 	.llseek		= tracing_lseek,
4985 	.release	= tracing_release,
4986 };
4987 
4988 static const struct file_operations show_traces_fops = {
4989 	.open		= show_traces_open,
4990 	.read		= seq_read,
4991 	.llseek		= seq_lseek,
4992 	.release	= tracing_seq_release,
4993 };
4994 
4995 static ssize_t
4996 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4997 		     size_t count, loff_t *ppos)
4998 {
4999 	struct trace_array *tr = file_inode(filp)->i_private;
5000 	char *mask_str;
5001 	int len;
5002 
5003 	len = snprintf(NULL, 0, "%*pb\n",
5004 		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
5005 	mask_str = kmalloc(len, GFP_KERNEL);
5006 	if (!mask_str)
5007 		return -ENOMEM;
5008 
5009 	len = snprintf(mask_str, len, "%*pb\n",
5010 		       cpumask_pr_args(tr->tracing_cpumask));
5011 	if (len >= count) {
5012 		count = -EINVAL;
5013 		goto out_err;
5014 	}
5015 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5016 
5017 out_err:
5018 	kfree(mask_str);
5019 
5020 	return count;
5021 }
5022 
5023 int tracing_set_cpumask(struct trace_array *tr,
5024 			cpumask_var_t tracing_cpumask_new)
5025 {
5026 	int cpu;
5027 
5028 	if (!tr)
5029 		return -EINVAL;
5030 
5031 	local_irq_disable();
5032 	arch_spin_lock(&tr->max_lock);
5033 	for_each_tracing_cpu(cpu) {
5034 		/*
5035 		 * Increase/decrease the disabled counter if we are
5036 		 * about to flip a bit in the cpumask:
5037 		 */
5038 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5039 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5040 			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5041 			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5042 #ifdef CONFIG_TRACER_MAX_TRACE
5043 			ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5044 #endif
5045 		}
5046 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5047 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5048 			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5049 			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5050 #ifdef CONFIG_TRACER_MAX_TRACE
5051 			ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5052 #endif
5053 		}
5054 	}
5055 	arch_spin_unlock(&tr->max_lock);
5056 	local_irq_enable();
5057 
5058 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5059 
5060 	return 0;
5061 }
5062 
5063 static ssize_t
5064 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5065 		      size_t count, loff_t *ppos)
5066 {
5067 	struct trace_array *tr = file_inode(filp)->i_private;
5068 	cpumask_var_t tracing_cpumask_new;
5069 	int err;
5070 
5071 	if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5072 		return -ENOMEM;
5073 
5074 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5075 	if (err)
5076 		goto err_free;
5077 
5078 	err = tracing_set_cpumask(tr, tracing_cpumask_new);
5079 	if (err)
5080 		goto err_free;
5081 
5082 	free_cpumask_var(tracing_cpumask_new);
5083 
5084 	return count;
5085 
5086 err_free:
5087 	free_cpumask_var(tracing_cpumask_new);
5088 
5089 	return err;
5090 }
5091 
5092 static const struct file_operations tracing_cpumask_fops = {
5093 	.open		= tracing_open_generic_tr,
5094 	.read		= tracing_cpumask_read,
5095 	.write		= tracing_cpumask_write,
5096 	.release	= tracing_release_generic_tr,
5097 	.llseek		= generic_file_llseek,
5098 };
5099 
5100 static int tracing_trace_options_show(struct seq_file *m, void *v)
5101 {
5102 	struct tracer_opt *trace_opts;
5103 	struct trace_array *tr = m->private;
5104 	u32 tracer_flags;
5105 	int i;
5106 
5107 	guard(mutex)(&trace_types_lock);
5108 
5109 	tracer_flags = tr->current_trace->flags->val;
5110 	trace_opts = tr->current_trace->flags->opts;
5111 
5112 	for (i = 0; trace_options[i]; i++) {
5113 		if (tr->trace_flags & (1 << i))
5114 			seq_printf(m, "%s\n", trace_options[i]);
5115 		else
5116 			seq_printf(m, "no%s\n", trace_options[i]);
5117 	}
5118 
5119 	for (i = 0; trace_opts[i].name; i++) {
5120 		if (tracer_flags & trace_opts[i].bit)
5121 			seq_printf(m, "%s\n", trace_opts[i].name);
5122 		else
5123 			seq_printf(m, "no%s\n", trace_opts[i].name);
5124 	}
5125 
5126 	return 0;
5127 }
5128 
5129 static int __set_tracer_option(struct trace_array *tr,
5130 			       struct tracer_flags *tracer_flags,
5131 			       struct tracer_opt *opts, int neg)
5132 {
5133 	struct tracer *trace = tracer_flags->trace;
5134 	int ret;
5135 
5136 	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5137 	if (ret)
5138 		return ret;
5139 
5140 	if (neg)
5141 		tracer_flags->val &= ~opts->bit;
5142 	else
5143 		tracer_flags->val |= opts->bit;
5144 	return 0;
5145 }
5146 
5147 /* Try to assign a tracer specific option */
5148 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5149 {
5150 	struct tracer *trace = tr->current_trace;
5151 	struct tracer_flags *tracer_flags = trace->flags;
5152 	struct tracer_opt *opts = NULL;
5153 	int i;
5154 
5155 	for (i = 0; tracer_flags->opts[i].name; i++) {
5156 		opts = &tracer_flags->opts[i];
5157 
5158 		if (strcmp(cmp, opts->name) == 0)
5159 			return __set_tracer_option(tr, trace->flags, opts, neg);
5160 	}
5161 
5162 	return -EINVAL;
5163 }
5164 
5165 /* Some tracers require overwrite to stay enabled */
5166 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5167 {
5168 	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5169 		return -1;
5170 
5171 	return 0;
5172 }
5173 
5174 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5175 {
5176 	if ((mask == TRACE_ITER_RECORD_TGID) ||
5177 	    (mask == TRACE_ITER_RECORD_CMD) ||
5178 	    (mask == TRACE_ITER_TRACE_PRINTK))
5179 		lockdep_assert_held(&event_mutex);
5180 
5181 	/* do nothing if flag is already set */
5182 	if (!!(tr->trace_flags & mask) == !!enabled)
5183 		return 0;
5184 
5185 	/* Give the tracer a chance to approve the change */
5186 	if (tr->current_trace->flag_changed)
5187 		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5188 			return -EINVAL;
5189 
5190 	if (mask == TRACE_ITER_TRACE_PRINTK) {
5191 		if (enabled) {
5192 			update_printk_trace(tr);
5193 		} else {
5194 			/*
5195 			 * The global_trace cannot clear this.
5196 			 * It's flag only gets cleared if another instance sets it.
5197 			 */
5198 			if (printk_trace == &global_trace)
5199 				return -EINVAL;
5200 			/*
5201 			 * An instance must always have it set.
5202 			 * by default, that's the global_trace instane.
5203 			 */
5204 			if (printk_trace == tr)
5205 				update_printk_trace(&global_trace);
5206 		}
5207 	}
5208 
5209 	if (enabled)
5210 		tr->trace_flags |= mask;
5211 	else
5212 		tr->trace_flags &= ~mask;
5213 
5214 	if (mask == TRACE_ITER_RECORD_CMD)
5215 		trace_event_enable_cmd_record(enabled);
5216 
5217 	if (mask == TRACE_ITER_RECORD_TGID) {
5218 
5219 		if (trace_alloc_tgid_map() < 0) {
5220 			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5221 			return -ENOMEM;
5222 		}
5223 
5224 		trace_event_enable_tgid_record(enabled);
5225 	}
5226 
5227 	if (mask == TRACE_ITER_EVENT_FORK)
5228 		trace_event_follow_fork(tr, enabled);
5229 
5230 	if (mask == TRACE_ITER_FUNC_FORK)
5231 		ftrace_pid_follow_fork(tr, enabled);
5232 
5233 	if (mask == TRACE_ITER_OVERWRITE) {
5234 		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5235 #ifdef CONFIG_TRACER_MAX_TRACE
5236 		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5237 #endif
5238 	}
5239 
5240 	if (mask == TRACE_ITER_PRINTK) {
5241 		trace_printk_start_stop_comm(enabled);
5242 		trace_printk_control(enabled);
5243 	}
5244 
5245 	return 0;
5246 }
5247 
5248 int trace_set_options(struct trace_array *tr, char *option)
5249 {
5250 	char *cmp;
5251 	int neg = 0;
5252 	int ret;
5253 	size_t orig_len = strlen(option);
5254 	int len;
5255 
5256 	cmp = strstrip(option);
5257 
5258 	len = str_has_prefix(cmp, "no");
5259 	if (len)
5260 		neg = 1;
5261 
5262 	cmp += len;
5263 
5264 	mutex_lock(&event_mutex);
5265 	mutex_lock(&trace_types_lock);
5266 
5267 	ret = match_string(trace_options, -1, cmp);
5268 	/* If no option could be set, test the specific tracer options */
5269 	if (ret < 0)
5270 		ret = set_tracer_option(tr, cmp, neg);
5271 	else
5272 		ret = set_tracer_flag(tr, 1 << ret, !neg);
5273 
5274 	mutex_unlock(&trace_types_lock);
5275 	mutex_unlock(&event_mutex);
5276 
5277 	/*
5278 	 * If the first trailing whitespace is replaced with '\0' by strstrip,
5279 	 * turn it back into a space.
5280 	 */
5281 	if (orig_len > strlen(option))
5282 		option[strlen(option)] = ' ';
5283 
5284 	return ret;
5285 }
5286 
5287 static void __init apply_trace_boot_options(void)
5288 {
5289 	char *buf = trace_boot_options_buf;
5290 	char *option;
5291 
5292 	while (true) {
5293 		option = strsep(&buf, ",");
5294 
5295 		if (!option)
5296 			break;
5297 
5298 		if (*option)
5299 			trace_set_options(&global_trace, option);
5300 
5301 		/* Put back the comma to allow this to be called again */
5302 		if (buf)
5303 			*(buf - 1) = ',';
5304 	}
5305 }
5306 
5307 static ssize_t
5308 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5309 			size_t cnt, loff_t *ppos)
5310 {
5311 	struct seq_file *m = filp->private_data;
5312 	struct trace_array *tr = m->private;
5313 	char buf[64];
5314 	int ret;
5315 
5316 	if (cnt >= sizeof(buf))
5317 		return -EINVAL;
5318 
5319 	if (copy_from_user(buf, ubuf, cnt))
5320 		return -EFAULT;
5321 
5322 	buf[cnt] = 0;
5323 
5324 	ret = trace_set_options(tr, buf);
5325 	if (ret < 0)
5326 		return ret;
5327 
5328 	*ppos += cnt;
5329 
5330 	return cnt;
5331 }
5332 
5333 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5334 {
5335 	struct trace_array *tr = inode->i_private;
5336 	int ret;
5337 
5338 	ret = tracing_check_open_get_tr(tr);
5339 	if (ret)
5340 		return ret;
5341 
5342 	ret = single_open(file, tracing_trace_options_show, inode->i_private);
5343 	if (ret < 0)
5344 		trace_array_put(tr);
5345 
5346 	return ret;
5347 }
5348 
5349 static const struct file_operations tracing_iter_fops = {
5350 	.open		= tracing_trace_options_open,
5351 	.read		= seq_read,
5352 	.llseek		= seq_lseek,
5353 	.release	= tracing_single_release_tr,
5354 	.write		= tracing_trace_options_write,
5355 };
5356 
5357 static const char readme_msg[] =
5358 	"tracing mini-HOWTO:\n\n"
5359 	"By default tracefs removes all OTH file permission bits.\n"
5360 	"When mounting tracefs an optional group id can be specified\n"
5361 	"which adds the group to every directory and file in tracefs:\n\n"
5362 	"\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n"
5363 	"# echo 0 > tracing_on : quick way to disable tracing\n"
5364 	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5365 	" Important files:\n"
5366 	"  trace\t\t\t- The static contents of the buffer\n"
5367 	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
5368 	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5369 	"  current_tracer\t- function and latency tracers\n"
5370 	"  available_tracers\t- list of configured tracers for current_tracer\n"
5371 	"  error_log\t- error log for failed commands (that support it)\n"
5372 	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
5373 	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
5374 	"  trace_clock\t\t- change the clock used to order events\n"
5375 	"       local:   Per cpu clock but may not be synced across CPUs\n"
5376 	"      global:   Synced across CPUs but slows tracing down.\n"
5377 	"     counter:   Not a clock, but just an increment\n"
5378 	"      uptime:   Jiffy counter from time of boot\n"
5379 	"        perf:   Same clock that perf events use\n"
5380 #ifdef CONFIG_X86_64
5381 	"     x86-tsc:   TSC cycle counter\n"
5382 #endif
5383 	"\n  timestamp_mode\t- view the mode used to timestamp events\n"
5384 	"       delta:   Delta difference against a buffer-wide timestamp\n"
5385 	"    absolute:   Absolute (standalone) timestamp\n"
5386 	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5387 	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5388 	"  tracing_cpumask\t- Limit which CPUs to trace\n"
5389 	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5390 	"\t\t\t  Remove sub-buffer with rmdir\n"
5391 	"  trace_options\t\t- Set format or modify how tracing happens\n"
5392 	"\t\t\t  Disable an option by prefixing 'no' to the\n"
5393 	"\t\t\t  option name\n"
5394 	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5395 #ifdef CONFIG_DYNAMIC_FTRACE
5396 	"\n  available_filter_functions - list of functions that can be filtered on\n"
5397 	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
5398 	"\t\t\t  functions\n"
5399 	"\t     accepts: func_full_name or glob-matching-pattern\n"
5400 	"\t     modules: Can select a group via module\n"
5401 	"\t      Format: :mod:<module-name>\n"
5402 	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
5403 	"\t    triggers: a command to perform when function is hit\n"
5404 	"\t      Format: <function>:<trigger>[:count]\n"
5405 	"\t     trigger: traceon, traceoff\n"
5406 	"\t\t      enable_event:<system>:<event>\n"
5407 	"\t\t      disable_event:<system>:<event>\n"
5408 #ifdef CONFIG_STACKTRACE
5409 	"\t\t      stacktrace\n"
5410 #endif
5411 #ifdef CONFIG_TRACER_SNAPSHOT
5412 	"\t\t      snapshot\n"
5413 #endif
5414 	"\t\t      dump\n"
5415 	"\t\t      cpudump\n"
5416 	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
5417 	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
5418 	"\t     The first one will disable tracing every time do_fault is hit\n"
5419 	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
5420 	"\t       The first time do trap is hit and it disables tracing, the\n"
5421 	"\t       counter will decrement to 2. If tracing is already disabled,\n"
5422 	"\t       the counter will not decrement. It only decrements when the\n"
5423 	"\t       trigger did work\n"
5424 	"\t     To remove trigger without count:\n"
5425 	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
5426 	"\t     To remove trigger with a count:\n"
5427 	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5428 	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
5429 	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5430 	"\t    modules: Can select a group via module command :mod:\n"
5431 	"\t    Does not accept triggers\n"
5432 #endif /* CONFIG_DYNAMIC_FTRACE */
5433 #ifdef CONFIG_FUNCTION_TRACER
5434 	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5435 	"\t\t    (function)\n"
5436 	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5437 	"\t\t    (function)\n"
5438 #endif
5439 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5440 	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5441 	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5442 	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5443 #endif
5444 #ifdef CONFIG_TRACER_SNAPSHOT
5445 	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
5446 	"\t\t\t  snapshot buffer. Read the contents for more\n"
5447 	"\t\t\t  information\n"
5448 #endif
5449 #ifdef CONFIG_STACK_TRACER
5450 	"  stack_trace\t\t- Shows the max stack trace when active\n"
5451 	"  stack_max_size\t- Shows current max stack size that was traced\n"
5452 	"\t\t\t  Write into this file to reset the max size (trigger a\n"
5453 	"\t\t\t  new trace)\n"
5454 #ifdef CONFIG_DYNAMIC_FTRACE
5455 	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5456 	"\t\t\t  traces\n"
5457 #endif
5458 #endif /* CONFIG_STACK_TRACER */
5459 #ifdef CONFIG_DYNAMIC_EVENTS
5460 	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5461 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5462 #endif
5463 #ifdef CONFIG_KPROBE_EVENTS
5464 	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5465 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5466 #endif
5467 #ifdef CONFIG_UPROBE_EVENTS
5468 	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5469 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5470 #endif
5471 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5472     defined(CONFIG_FPROBE_EVENTS)
5473 	"\t  accepts: event-definitions (one definition per line)\n"
5474 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5475 	"\t   Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5476 	"\t           r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5477 #endif
5478 #ifdef CONFIG_FPROBE_EVENTS
5479 	"\t           f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5480 	"\t           t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5481 #endif
5482 #ifdef CONFIG_HIST_TRIGGERS
5483 	"\t           s:[synthetic/]<event> <field> [<field>]\n"
5484 #endif
5485 	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5486 	"\t           -:[<group>/][<event>]\n"
5487 #ifdef CONFIG_KPROBE_EVENTS
5488 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5489   "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5490 #endif
5491 #ifdef CONFIG_UPROBE_EVENTS
5492   "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5493 #endif
5494 	"\t     args: <name>=fetcharg[:type]\n"
5495 	"\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5496 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5497 	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5498 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5499 	"\t           <argname>[->field[->field|.field...]],\n"
5500 #endif
5501 #else
5502 	"\t           $stack<index>, $stack, $retval, $comm,\n"
5503 #endif
5504 	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5505 	"\t     kernel return probes support: $retval, $arg<N>, $comm\n"
5506 	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5507 	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5508 	"\t           symstr, %pd/%pD, <type>\\[<array-size>\\]\n"
5509 #ifdef CONFIG_HIST_TRIGGERS
5510 	"\t    field: <stype> <name>;\n"
5511 	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5512 	"\t           [unsigned] char/int/long\n"
5513 #endif
5514 	"\t    efield: For event probes ('e' types), the field is on of the fields\n"
5515 	"\t            of the <attached-group>/<attached-event>.\n"
5516 #endif
5517 	"  events/\t\t- Directory containing all trace event subsystems:\n"
5518 	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5519 	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
5520 	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5521 	"\t\t\t  events\n"
5522 	"      filter\t\t- If set, only events passing filter are traced\n"
5523 	"  events/<system>/<event>/\t- Directory containing control files for\n"
5524 	"\t\t\t  <event>:\n"
5525 	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5526 	"      filter\t\t- If set, only events passing filter are traced\n"
5527 	"      trigger\t\t- If set, a command to perform when event is hit\n"
5528 	"\t    Format: <trigger>[:count][if <filter>]\n"
5529 	"\t   trigger: traceon, traceoff\n"
5530 	"\t            enable_event:<system>:<event>\n"
5531 	"\t            disable_event:<system>:<event>\n"
5532 #ifdef CONFIG_HIST_TRIGGERS
5533 	"\t            enable_hist:<system>:<event>\n"
5534 	"\t            disable_hist:<system>:<event>\n"
5535 #endif
5536 #ifdef CONFIG_STACKTRACE
5537 	"\t\t    stacktrace\n"
5538 #endif
5539 #ifdef CONFIG_TRACER_SNAPSHOT
5540 	"\t\t    snapshot\n"
5541 #endif
5542 #ifdef CONFIG_HIST_TRIGGERS
5543 	"\t\t    hist (see below)\n"
5544 #endif
5545 	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
5546 	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
5547 	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5548 	"\t                  events/block/block_unplug/trigger\n"
5549 	"\t   The first disables tracing every time block_unplug is hit.\n"
5550 	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
5551 	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
5552 	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5553 	"\t   Like function triggers, the counter is only decremented if it\n"
5554 	"\t    enabled or disabled tracing.\n"
5555 	"\t   To remove a trigger without a count:\n"
5556 	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
5557 	"\t   To remove a trigger with a count:\n"
5558 	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
5559 	"\t   Filters can be ignored when removing a trigger.\n"
5560 #ifdef CONFIG_HIST_TRIGGERS
5561 	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
5562 	"\t    Format: hist:keys=<field1[,field2,...]>\n"
5563 	"\t            [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5564 	"\t            [:values=<field1[,field2,...]>]\n"
5565 	"\t            [:sort=<field1[,field2,...]>]\n"
5566 	"\t            [:size=#entries]\n"
5567 	"\t            [:pause][:continue][:clear]\n"
5568 	"\t            [:name=histname1]\n"
5569 	"\t            [:nohitcount]\n"
5570 	"\t            [:<handler>.<action>]\n"
5571 	"\t            [if <filter>]\n\n"
5572 	"\t    Note, special fields can be used as well:\n"
5573 	"\t            common_timestamp - to record current timestamp\n"
5574 	"\t            common_cpu - to record the CPU the event happened on\n"
5575 	"\n"
5576 	"\t    A hist trigger variable can be:\n"
5577 	"\t        - a reference to a field e.g. x=current_timestamp,\n"
5578 	"\t        - a reference to another variable e.g. y=$x,\n"
5579 	"\t        - a numeric literal: e.g. ms_per_sec=1000,\n"
5580 	"\t        - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5581 	"\n"
5582 	"\t    hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5583 	"\t    multiplication(*) and division(/) operators. An operand can be either a\n"
5584 	"\t    variable reference, field or numeric literal.\n"
5585 	"\n"
5586 	"\t    When a matching event is hit, an entry is added to a hash\n"
5587 	"\t    table using the key(s) and value(s) named, and the value of a\n"
5588 	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
5589 	"\t    correspond to fields in the event's format description.  Keys\n"
5590 	"\t    can be any field, or the special string 'common_stacktrace'.\n"
5591 	"\t    Compound keys consisting of up to two fields can be specified\n"
5592 	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
5593 	"\t    fields.  Sort keys consisting of up to two fields can be\n"
5594 	"\t    specified using the 'sort' keyword.  The sort direction can\n"
5595 	"\t    be modified by appending '.descending' or '.ascending' to a\n"
5596 	"\t    sort field.  The 'size' parameter can be used to specify more\n"
5597 	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
5598 	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
5599 	"\t    its histogram data will be shared with other triggers of the\n"
5600 	"\t    same name, and trigger hits will update this common data.\n\n"
5601 	"\t    Reading the 'hist' file for the event will dump the hash\n"
5602 	"\t    table in its entirety to stdout.  If there are multiple hist\n"
5603 	"\t    triggers attached to an event, there will be a table for each\n"
5604 	"\t    trigger in the output.  The table displayed for a named\n"
5605 	"\t    trigger will be the same as any other instance having the\n"
5606 	"\t    same name.  The default format used to display a given field\n"
5607 	"\t    can be modified by appending any of the following modifiers\n"
5608 	"\t    to the field name, as applicable:\n\n"
5609 	"\t            .hex        display a number as a hex value\n"
5610 	"\t            .sym        display an address as a symbol\n"
5611 	"\t            .sym-offset display an address as a symbol and offset\n"
5612 	"\t            .execname   display a common_pid as a program name\n"
5613 	"\t            .syscall    display a syscall id as a syscall name\n"
5614 	"\t            .log2       display log2 value rather than raw number\n"
5615 	"\t            .buckets=size  display values in groups of size rather than raw number\n"
5616 	"\t            .usecs      display a common_timestamp in microseconds\n"
5617 	"\t            .percent    display a number of percentage value\n"
5618 	"\t            .graph      display a bar-graph of a value\n\n"
5619 	"\t    The 'pause' parameter can be used to pause an existing hist\n"
5620 	"\t    trigger or to start a hist trigger but not log any events\n"
5621 	"\t    until told to do so.  'continue' can be used to start or\n"
5622 	"\t    restart a paused hist trigger.\n\n"
5623 	"\t    The 'clear' parameter will clear the contents of a running\n"
5624 	"\t    hist trigger and leave its current paused/active state\n"
5625 	"\t    unchanged.\n\n"
5626 	"\t    The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5627 	"\t    raw hitcount in the histogram.\n\n"
5628 	"\t    The enable_hist and disable_hist triggers can be used to\n"
5629 	"\t    have one event conditionally start and stop another event's\n"
5630 	"\t    already-attached hist trigger.  The syntax is analogous to\n"
5631 	"\t    the enable_event and disable_event triggers.\n\n"
5632 	"\t    Hist trigger handlers and actions are executed whenever a\n"
5633 	"\t    a histogram entry is added or updated.  They take the form:\n\n"
5634 	"\t        <handler>.<action>\n\n"
5635 	"\t    The available handlers are:\n\n"
5636 	"\t        onmatch(matching.event)  - invoke on addition or update\n"
5637 	"\t        onmax(var)               - invoke if var exceeds current max\n"
5638 	"\t        onchange(var)            - invoke action if var changes\n\n"
5639 	"\t    The available actions are:\n\n"
5640 	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
5641 	"\t        save(field,...)                      - save current event fields\n"
5642 #ifdef CONFIG_TRACER_SNAPSHOT
5643 	"\t        snapshot()                           - snapshot the trace buffer\n\n"
5644 #endif
5645 #ifdef CONFIG_SYNTH_EVENTS
5646 	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5647 	"\t  Write into this file to define/undefine new synthetic events.\n"
5648 	"\t     example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5649 #endif
5650 #endif
5651 ;
5652 
5653 static ssize_t
5654 tracing_readme_read(struct file *filp, char __user *ubuf,
5655 		       size_t cnt, loff_t *ppos)
5656 {
5657 	return simple_read_from_buffer(ubuf, cnt, ppos,
5658 					readme_msg, strlen(readme_msg));
5659 }
5660 
5661 static const struct file_operations tracing_readme_fops = {
5662 	.open		= tracing_open_generic,
5663 	.read		= tracing_readme_read,
5664 	.llseek		= generic_file_llseek,
5665 };
5666 
5667 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5668 static union trace_eval_map_item *
5669 update_eval_map(union trace_eval_map_item *ptr)
5670 {
5671 	if (!ptr->map.eval_string) {
5672 		if (ptr->tail.next) {
5673 			ptr = ptr->tail.next;
5674 			/* Set ptr to the next real item (skip head) */
5675 			ptr++;
5676 		} else
5677 			return NULL;
5678 	}
5679 	return ptr;
5680 }
5681 
5682 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5683 {
5684 	union trace_eval_map_item *ptr = v;
5685 
5686 	/*
5687 	 * Paranoid! If ptr points to end, we don't want to increment past it.
5688 	 * This really should never happen.
5689 	 */
5690 	(*pos)++;
5691 	ptr = update_eval_map(ptr);
5692 	if (WARN_ON_ONCE(!ptr))
5693 		return NULL;
5694 
5695 	ptr++;
5696 	ptr = update_eval_map(ptr);
5697 
5698 	return ptr;
5699 }
5700 
5701 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5702 {
5703 	union trace_eval_map_item *v;
5704 	loff_t l = 0;
5705 
5706 	mutex_lock(&trace_eval_mutex);
5707 
5708 	v = trace_eval_maps;
5709 	if (v)
5710 		v++;
5711 
5712 	while (v && l < *pos) {
5713 		v = eval_map_next(m, v, &l);
5714 	}
5715 
5716 	return v;
5717 }
5718 
5719 static void eval_map_stop(struct seq_file *m, void *v)
5720 {
5721 	mutex_unlock(&trace_eval_mutex);
5722 }
5723 
5724 static int eval_map_show(struct seq_file *m, void *v)
5725 {
5726 	union trace_eval_map_item *ptr = v;
5727 
5728 	seq_printf(m, "%s %ld (%s)\n",
5729 		   ptr->map.eval_string, ptr->map.eval_value,
5730 		   ptr->map.system);
5731 
5732 	return 0;
5733 }
5734 
5735 static const struct seq_operations tracing_eval_map_seq_ops = {
5736 	.start		= eval_map_start,
5737 	.next		= eval_map_next,
5738 	.stop		= eval_map_stop,
5739 	.show		= eval_map_show,
5740 };
5741 
5742 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5743 {
5744 	int ret;
5745 
5746 	ret = tracing_check_open_get_tr(NULL);
5747 	if (ret)
5748 		return ret;
5749 
5750 	return seq_open(filp, &tracing_eval_map_seq_ops);
5751 }
5752 
5753 static const struct file_operations tracing_eval_map_fops = {
5754 	.open		= tracing_eval_map_open,
5755 	.read		= seq_read,
5756 	.llseek		= seq_lseek,
5757 	.release	= seq_release,
5758 };
5759 
5760 static inline union trace_eval_map_item *
5761 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5762 {
5763 	/* Return tail of array given the head */
5764 	return ptr + ptr->head.length + 1;
5765 }
5766 
5767 static void
5768 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5769 			   int len)
5770 {
5771 	struct trace_eval_map **stop;
5772 	struct trace_eval_map **map;
5773 	union trace_eval_map_item *map_array;
5774 	union trace_eval_map_item *ptr;
5775 
5776 	stop = start + len;
5777 
5778 	/*
5779 	 * The trace_eval_maps contains the map plus a head and tail item,
5780 	 * where the head holds the module and length of array, and the
5781 	 * tail holds a pointer to the next list.
5782 	 */
5783 	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5784 	if (!map_array) {
5785 		pr_warn("Unable to allocate trace eval mapping\n");
5786 		return;
5787 	}
5788 
5789 	guard(mutex)(&trace_eval_mutex);
5790 
5791 	if (!trace_eval_maps)
5792 		trace_eval_maps = map_array;
5793 	else {
5794 		ptr = trace_eval_maps;
5795 		for (;;) {
5796 			ptr = trace_eval_jmp_to_tail(ptr);
5797 			if (!ptr->tail.next)
5798 				break;
5799 			ptr = ptr->tail.next;
5800 
5801 		}
5802 		ptr->tail.next = map_array;
5803 	}
5804 	map_array->head.mod = mod;
5805 	map_array->head.length = len;
5806 	map_array++;
5807 
5808 	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5809 		map_array->map = **map;
5810 		map_array++;
5811 	}
5812 	memset(map_array, 0, sizeof(*map_array));
5813 }
5814 
5815 static void trace_create_eval_file(struct dentry *d_tracer)
5816 {
5817 	trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
5818 			  NULL, &tracing_eval_map_fops);
5819 }
5820 
5821 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5822 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5823 static inline void trace_insert_eval_map_file(struct module *mod,
5824 			      struct trace_eval_map **start, int len) { }
5825 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5826 
5827 static void trace_insert_eval_map(struct module *mod,
5828 				  struct trace_eval_map **start, int len)
5829 {
5830 	struct trace_eval_map **map;
5831 
5832 	if (len <= 0)
5833 		return;
5834 
5835 	map = start;
5836 
5837 	trace_event_eval_update(map, len);
5838 
5839 	trace_insert_eval_map_file(mod, start, len);
5840 }
5841 
5842 static ssize_t
5843 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5844 		       size_t cnt, loff_t *ppos)
5845 {
5846 	struct trace_array *tr = filp->private_data;
5847 	char buf[MAX_TRACER_SIZE+2];
5848 	int r;
5849 
5850 	mutex_lock(&trace_types_lock);
5851 	r = sprintf(buf, "%s\n", tr->current_trace->name);
5852 	mutex_unlock(&trace_types_lock);
5853 
5854 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5855 }
5856 
5857 int tracer_init(struct tracer *t, struct trace_array *tr)
5858 {
5859 	tracing_reset_online_cpus(&tr->array_buffer);
5860 	return t->init(tr);
5861 }
5862 
5863 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5864 {
5865 	int cpu;
5866 
5867 	for_each_tracing_cpu(cpu)
5868 		per_cpu_ptr(buf->data, cpu)->entries = val;
5869 }
5870 
5871 static void update_buffer_entries(struct array_buffer *buf, int cpu)
5872 {
5873 	if (cpu == RING_BUFFER_ALL_CPUS) {
5874 		set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
5875 	} else {
5876 		per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
5877 	}
5878 }
5879 
5880 #ifdef CONFIG_TRACER_MAX_TRACE
5881 /* resize @tr's buffer to the size of @size_tr's entries */
5882 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5883 					struct array_buffer *size_buf, int cpu_id)
5884 {
5885 	int cpu, ret = 0;
5886 
5887 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
5888 		for_each_tracing_cpu(cpu) {
5889 			ret = ring_buffer_resize(trace_buf->buffer,
5890 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5891 			if (ret < 0)
5892 				break;
5893 			per_cpu_ptr(trace_buf->data, cpu)->entries =
5894 				per_cpu_ptr(size_buf->data, cpu)->entries;
5895 		}
5896 	} else {
5897 		ret = ring_buffer_resize(trace_buf->buffer,
5898 				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5899 		if (ret == 0)
5900 			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5901 				per_cpu_ptr(size_buf->data, cpu_id)->entries;
5902 	}
5903 
5904 	return ret;
5905 }
5906 #endif /* CONFIG_TRACER_MAX_TRACE */
5907 
5908 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5909 					unsigned long size, int cpu)
5910 {
5911 	int ret;
5912 
5913 	/*
5914 	 * If kernel or user changes the size of the ring buffer
5915 	 * we use the size that was given, and we can forget about
5916 	 * expanding it later.
5917 	 */
5918 	trace_set_ring_buffer_expanded(tr);
5919 
5920 	/* May be called before buffers are initialized */
5921 	if (!tr->array_buffer.buffer)
5922 		return 0;
5923 
5924 	/* Do not allow tracing while resizing ring buffer */
5925 	tracing_stop_tr(tr);
5926 
5927 	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5928 	if (ret < 0)
5929 		goto out_start;
5930 
5931 #ifdef CONFIG_TRACER_MAX_TRACE
5932 	if (!tr->allocated_snapshot)
5933 		goto out;
5934 
5935 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5936 	if (ret < 0) {
5937 		int r = resize_buffer_duplicate_size(&tr->array_buffer,
5938 						     &tr->array_buffer, cpu);
5939 		if (r < 0) {
5940 			/*
5941 			 * AARGH! We are left with different
5942 			 * size max buffer!!!!
5943 			 * The max buffer is our "snapshot" buffer.
5944 			 * When a tracer needs a snapshot (one of the
5945 			 * latency tracers), it swaps the max buffer
5946 			 * with the saved snap shot. We succeeded to
5947 			 * update the size of the main buffer, but failed to
5948 			 * update the size of the max buffer. But when we tried
5949 			 * to reset the main buffer to the original size, we
5950 			 * failed there too. This is very unlikely to
5951 			 * happen, but if it does, warn and kill all
5952 			 * tracing.
5953 			 */
5954 			WARN_ON(1);
5955 			tracing_disabled = 1;
5956 		}
5957 		goto out_start;
5958 	}
5959 
5960 	update_buffer_entries(&tr->max_buffer, cpu);
5961 
5962  out:
5963 #endif /* CONFIG_TRACER_MAX_TRACE */
5964 
5965 	update_buffer_entries(&tr->array_buffer, cpu);
5966  out_start:
5967 	tracing_start_tr(tr);
5968 	return ret;
5969 }
5970 
5971 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5972 				  unsigned long size, int cpu_id)
5973 {
5974 	int ret;
5975 
5976 	guard(mutex)(&trace_types_lock);
5977 
5978 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
5979 		/* make sure, this cpu is enabled in the mask */
5980 		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask))
5981 			return -EINVAL;
5982 	}
5983 
5984 	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5985 	if (ret < 0)
5986 		ret = -ENOMEM;
5987 
5988 	return ret;
5989 }
5990 
5991 static void update_last_data(struct trace_array *tr)
5992 {
5993 	if (!tr->text_delta && !tr->data_delta)
5994 		return;
5995 
5996 	/*
5997 	 * Need to clear all CPU buffers as there cannot be events
5998 	 * from the previous boot mixed with events with this boot
5999 	 * as that will cause a confusing trace. Need to clear all
6000 	 * CPU buffers, even for those that may currently be offline.
6001 	 */
6002 	tracing_reset_all_cpus(&tr->array_buffer);
6003 
6004 	/* Using current data now */
6005 	tr->text_delta = 0;
6006 	tr->data_delta = 0;
6007 }
6008 
6009 /**
6010  * tracing_update_buffers - used by tracing facility to expand ring buffers
6011  * @tr: The tracing instance
6012  *
6013  * To save on memory when the tracing is never used on a system with it
6014  * configured in. The ring buffers are set to a minimum size. But once
6015  * a user starts to use the tracing facility, then they need to grow
6016  * to their default size.
6017  *
6018  * This function is to be called when a tracer is about to be used.
6019  */
6020 int tracing_update_buffers(struct trace_array *tr)
6021 {
6022 	int ret = 0;
6023 
6024 	mutex_lock(&trace_types_lock);
6025 
6026 	update_last_data(tr);
6027 
6028 	if (!tr->ring_buffer_expanded)
6029 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6030 						RING_BUFFER_ALL_CPUS);
6031 	mutex_unlock(&trace_types_lock);
6032 
6033 	return ret;
6034 }
6035 
6036 struct trace_option_dentry;
6037 
6038 static void
6039 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6040 
6041 /*
6042  * Used to clear out the tracer before deletion of an instance.
6043  * Must have trace_types_lock held.
6044  */
6045 static void tracing_set_nop(struct trace_array *tr)
6046 {
6047 	if (tr->current_trace == &nop_trace)
6048 		return;
6049 
6050 	tr->current_trace->enabled--;
6051 
6052 	if (tr->current_trace->reset)
6053 		tr->current_trace->reset(tr);
6054 
6055 	tr->current_trace = &nop_trace;
6056 }
6057 
6058 static bool tracer_options_updated;
6059 
6060 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6061 {
6062 	/* Only enable if the directory has been created already. */
6063 	if (!tr->dir)
6064 		return;
6065 
6066 	/* Only create trace option files after update_tracer_options finish */
6067 	if (!tracer_options_updated)
6068 		return;
6069 
6070 	create_trace_option_files(tr, t);
6071 }
6072 
6073 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6074 {
6075 	struct tracer *t;
6076 #ifdef CONFIG_TRACER_MAX_TRACE
6077 	bool had_max_tr;
6078 #endif
6079 	int ret;
6080 
6081 	guard(mutex)(&trace_types_lock);
6082 
6083 	update_last_data(tr);
6084 
6085 	if (!tr->ring_buffer_expanded) {
6086 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6087 						RING_BUFFER_ALL_CPUS);
6088 		if (ret < 0)
6089 			return ret;
6090 		ret = 0;
6091 	}
6092 
6093 	for (t = trace_types; t; t = t->next) {
6094 		if (strcmp(t->name, buf) == 0)
6095 			break;
6096 	}
6097 	if (!t)
6098 		return -EINVAL;
6099 
6100 	if (t == tr->current_trace)
6101 		return 0;
6102 
6103 #ifdef CONFIG_TRACER_SNAPSHOT
6104 	if (t->use_max_tr) {
6105 		local_irq_disable();
6106 		arch_spin_lock(&tr->max_lock);
6107 		ret = tr->cond_snapshot ? -EBUSY : 0;
6108 		arch_spin_unlock(&tr->max_lock);
6109 		local_irq_enable();
6110 		if (ret)
6111 			return ret;
6112 	}
6113 #endif
6114 	/* Some tracers won't work on kernel command line */
6115 	if (system_state < SYSTEM_RUNNING && t->noboot) {
6116 		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6117 			t->name);
6118 		return -EINVAL;
6119 	}
6120 
6121 	/* Some tracers are only allowed for the top level buffer */
6122 	if (!trace_ok_for_array(t, tr))
6123 		return -EINVAL;
6124 
6125 	/* If trace pipe files are being read, we can't change the tracer */
6126 	if (tr->trace_ref)
6127 		return -EBUSY;
6128 
6129 	trace_branch_disable();
6130 
6131 	tr->current_trace->enabled--;
6132 
6133 	if (tr->current_trace->reset)
6134 		tr->current_trace->reset(tr);
6135 
6136 #ifdef CONFIG_TRACER_MAX_TRACE
6137 	had_max_tr = tr->current_trace->use_max_tr;
6138 
6139 	/* Current trace needs to be nop_trace before synchronize_rcu */
6140 	tr->current_trace = &nop_trace;
6141 
6142 	if (had_max_tr && !t->use_max_tr) {
6143 		/*
6144 		 * We need to make sure that the update_max_tr sees that
6145 		 * current_trace changed to nop_trace to keep it from
6146 		 * swapping the buffers after we resize it.
6147 		 * The update_max_tr is called from interrupts disabled
6148 		 * so a synchronized_sched() is sufficient.
6149 		 */
6150 		synchronize_rcu();
6151 		free_snapshot(tr);
6152 		tracing_disarm_snapshot(tr);
6153 	}
6154 
6155 	if (!had_max_tr && t->use_max_tr) {
6156 		ret = tracing_arm_snapshot_locked(tr);
6157 		if (ret)
6158 			return ret;
6159 	}
6160 #else
6161 	tr->current_trace = &nop_trace;
6162 #endif
6163 
6164 	if (t->init) {
6165 		ret = tracer_init(t, tr);
6166 		if (ret) {
6167 #ifdef CONFIG_TRACER_MAX_TRACE
6168 			if (t->use_max_tr)
6169 				tracing_disarm_snapshot(tr);
6170 #endif
6171 			return ret;
6172 		}
6173 	}
6174 
6175 	tr->current_trace = t;
6176 	tr->current_trace->enabled++;
6177 	trace_branch_enable(tr);
6178 
6179 	return 0;
6180 }
6181 
6182 static ssize_t
6183 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6184 			size_t cnt, loff_t *ppos)
6185 {
6186 	struct trace_array *tr = filp->private_data;
6187 	char buf[MAX_TRACER_SIZE+1];
6188 	char *name;
6189 	size_t ret;
6190 	int err;
6191 
6192 	ret = cnt;
6193 
6194 	if (cnt > MAX_TRACER_SIZE)
6195 		cnt = MAX_TRACER_SIZE;
6196 
6197 	if (copy_from_user(buf, ubuf, cnt))
6198 		return -EFAULT;
6199 
6200 	buf[cnt] = 0;
6201 
6202 	name = strim(buf);
6203 
6204 	err = tracing_set_tracer(tr, name);
6205 	if (err)
6206 		return err;
6207 
6208 	*ppos += ret;
6209 
6210 	return ret;
6211 }
6212 
6213 static ssize_t
6214 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6215 		   size_t cnt, loff_t *ppos)
6216 {
6217 	char buf[64];
6218 	int r;
6219 
6220 	r = snprintf(buf, sizeof(buf), "%ld\n",
6221 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6222 	if (r > sizeof(buf))
6223 		r = sizeof(buf);
6224 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6225 }
6226 
6227 static ssize_t
6228 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6229 		    size_t cnt, loff_t *ppos)
6230 {
6231 	unsigned long val;
6232 	int ret;
6233 
6234 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6235 	if (ret)
6236 		return ret;
6237 
6238 	*ptr = val * 1000;
6239 
6240 	return cnt;
6241 }
6242 
6243 static ssize_t
6244 tracing_thresh_read(struct file *filp, char __user *ubuf,
6245 		    size_t cnt, loff_t *ppos)
6246 {
6247 	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6248 }
6249 
6250 static ssize_t
6251 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6252 		     size_t cnt, loff_t *ppos)
6253 {
6254 	struct trace_array *tr = filp->private_data;
6255 	int ret;
6256 
6257 	guard(mutex)(&trace_types_lock);
6258 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6259 	if (ret < 0)
6260 		return ret;
6261 
6262 	if (tr->current_trace->update_thresh) {
6263 		ret = tr->current_trace->update_thresh(tr);
6264 		if (ret < 0)
6265 			return ret;
6266 	}
6267 
6268 	return cnt;
6269 }
6270 
6271 #ifdef CONFIG_TRACER_MAX_TRACE
6272 
6273 static ssize_t
6274 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6275 		     size_t cnt, loff_t *ppos)
6276 {
6277 	struct trace_array *tr = filp->private_data;
6278 
6279 	return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6280 }
6281 
6282 static ssize_t
6283 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6284 		      size_t cnt, loff_t *ppos)
6285 {
6286 	struct trace_array *tr = filp->private_data;
6287 
6288 	return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6289 }
6290 
6291 #endif
6292 
6293 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6294 {
6295 	if (cpu == RING_BUFFER_ALL_CPUS) {
6296 		if (cpumask_empty(tr->pipe_cpumask)) {
6297 			cpumask_setall(tr->pipe_cpumask);
6298 			return 0;
6299 		}
6300 	} else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6301 		cpumask_set_cpu(cpu, tr->pipe_cpumask);
6302 		return 0;
6303 	}
6304 	return -EBUSY;
6305 }
6306 
6307 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6308 {
6309 	if (cpu == RING_BUFFER_ALL_CPUS) {
6310 		WARN_ON(!cpumask_full(tr->pipe_cpumask));
6311 		cpumask_clear(tr->pipe_cpumask);
6312 	} else {
6313 		WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6314 		cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6315 	}
6316 }
6317 
6318 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6319 {
6320 	struct trace_array *tr = inode->i_private;
6321 	struct trace_iterator *iter;
6322 	int cpu;
6323 	int ret;
6324 
6325 	ret = tracing_check_open_get_tr(tr);
6326 	if (ret)
6327 		return ret;
6328 
6329 	mutex_lock(&trace_types_lock);
6330 	cpu = tracing_get_cpu(inode);
6331 	ret = open_pipe_on_cpu(tr, cpu);
6332 	if (ret)
6333 		goto fail_pipe_on_cpu;
6334 
6335 	/* create a buffer to store the information to pass to userspace */
6336 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6337 	if (!iter) {
6338 		ret = -ENOMEM;
6339 		goto fail_alloc_iter;
6340 	}
6341 
6342 	trace_seq_init(&iter->seq);
6343 	iter->trace = tr->current_trace;
6344 
6345 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6346 		ret = -ENOMEM;
6347 		goto fail;
6348 	}
6349 
6350 	/* trace pipe does not show start of buffer */
6351 	cpumask_setall(iter->started);
6352 
6353 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6354 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
6355 
6356 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6357 	if (trace_clocks[tr->clock_id].in_ns)
6358 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6359 
6360 	iter->tr = tr;
6361 	iter->array_buffer = &tr->array_buffer;
6362 	iter->cpu_file = cpu;
6363 	mutex_init(&iter->mutex);
6364 	filp->private_data = iter;
6365 
6366 	if (iter->trace->pipe_open)
6367 		iter->trace->pipe_open(iter);
6368 
6369 	nonseekable_open(inode, filp);
6370 
6371 	tr->trace_ref++;
6372 
6373 	mutex_unlock(&trace_types_lock);
6374 	return ret;
6375 
6376 fail:
6377 	kfree(iter);
6378 fail_alloc_iter:
6379 	close_pipe_on_cpu(tr, cpu);
6380 fail_pipe_on_cpu:
6381 	__trace_array_put(tr);
6382 	mutex_unlock(&trace_types_lock);
6383 	return ret;
6384 }
6385 
6386 static int tracing_release_pipe(struct inode *inode, struct file *file)
6387 {
6388 	struct trace_iterator *iter = file->private_data;
6389 	struct trace_array *tr = inode->i_private;
6390 
6391 	mutex_lock(&trace_types_lock);
6392 
6393 	tr->trace_ref--;
6394 
6395 	if (iter->trace->pipe_close)
6396 		iter->trace->pipe_close(iter);
6397 	close_pipe_on_cpu(tr, iter->cpu_file);
6398 	mutex_unlock(&trace_types_lock);
6399 
6400 	free_trace_iter_content(iter);
6401 	kfree(iter);
6402 
6403 	trace_array_put(tr);
6404 
6405 	return 0;
6406 }
6407 
6408 static __poll_t
6409 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6410 {
6411 	struct trace_array *tr = iter->tr;
6412 
6413 	/* Iterators are static, they should be filled or empty */
6414 	if (trace_buffer_iter(iter, iter->cpu_file))
6415 		return EPOLLIN | EPOLLRDNORM;
6416 
6417 	if (tr->trace_flags & TRACE_ITER_BLOCK)
6418 		/*
6419 		 * Always select as readable when in blocking mode
6420 		 */
6421 		return EPOLLIN | EPOLLRDNORM;
6422 	else
6423 		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6424 					     filp, poll_table, iter->tr->buffer_percent);
6425 }
6426 
6427 static __poll_t
6428 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6429 {
6430 	struct trace_iterator *iter = filp->private_data;
6431 
6432 	return trace_poll(iter, filp, poll_table);
6433 }
6434 
6435 /* Must be called with iter->mutex held. */
6436 static int tracing_wait_pipe(struct file *filp)
6437 {
6438 	struct trace_iterator *iter = filp->private_data;
6439 	int ret;
6440 
6441 	while (trace_empty(iter)) {
6442 
6443 		if ((filp->f_flags & O_NONBLOCK)) {
6444 			return -EAGAIN;
6445 		}
6446 
6447 		/*
6448 		 * We block until we read something and tracing is disabled.
6449 		 * We still block if tracing is disabled, but we have never
6450 		 * read anything. This allows a user to cat this file, and
6451 		 * then enable tracing. But after we have read something,
6452 		 * we give an EOF when tracing is again disabled.
6453 		 *
6454 		 * iter->pos will be 0 if we haven't read anything.
6455 		 */
6456 		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6457 			break;
6458 
6459 		mutex_unlock(&iter->mutex);
6460 
6461 		ret = wait_on_pipe(iter, 0);
6462 
6463 		mutex_lock(&iter->mutex);
6464 
6465 		if (ret)
6466 			return ret;
6467 	}
6468 
6469 	return 1;
6470 }
6471 
6472 /*
6473  * Consumer reader.
6474  */
6475 static ssize_t
6476 tracing_read_pipe(struct file *filp, char __user *ubuf,
6477 		  size_t cnt, loff_t *ppos)
6478 {
6479 	struct trace_iterator *iter = filp->private_data;
6480 	ssize_t sret;
6481 
6482 	/*
6483 	 * Avoid more than one consumer on a single file descriptor
6484 	 * This is just a matter of traces coherency, the ring buffer itself
6485 	 * is protected.
6486 	 */
6487 	guard(mutex)(&iter->mutex);
6488 
6489 	/* return any leftover data */
6490 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6491 	if (sret != -EBUSY)
6492 		return sret;
6493 
6494 	trace_seq_init(&iter->seq);
6495 
6496 	if (iter->trace->read) {
6497 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6498 		if (sret)
6499 			return sret;
6500 	}
6501 
6502 waitagain:
6503 	sret = tracing_wait_pipe(filp);
6504 	if (sret <= 0)
6505 		return sret;
6506 
6507 	/* stop when tracing is finished */
6508 	if (trace_empty(iter))
6509 		return 0;
6510 
6511 	if (cnt >= TRACE_SEQ_BUFFER_SIZE)
6512 		cnt = TRACE_SEQ_BUFFER_SIZE - 1;
6513 
6514 	/* reset all but tr, trace, and overruns */
6515 	trace_iterator_reset(iter);
6516 	cpumask_clear(iter->started);
6517 	trace_seq_init(&iter->seq);
6518 
6519 	trace_event_read_lock();
6520 	trace_access_lock(iter->cpu_file);
6521 	while (trace_find_next_entry_inc(iter) != NULL) {
6522 		enum print_line_t ret;
6523 		int save_len = iter->seq.seq.len;
6524 
6525 		ret = print_trace_line(iter);
6526 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6527 			/*
6528 			 * If one print_trace_line() fills entire trace_seq in one shot,
6529 			 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6530 			 * In this case, we need to consume it, otherwise, loop will peek
6531 			 * this event next time, resulting in an infinite loop.
6532 			 */
6533 			if (save_len == 0) {
6534 				iter->seq.full = 0;
6535 				trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6536 				trace_consume(iter);
6537 				break;
6538 			}
6539 
6540 			/* In other cases, don't print partial lines */
6541 			iter->seq.seq.len = save_len;
6542 			break;
6543 		}
6544 		if (ret != TRACE_TYPE_NO_CONSUME)
6545 			trace_consume(iter);
6546 
6547 		if (trace_seq_used(&iter->seq) >= cnt)
6548 			break;
6549 
6550 		/*
6551 		 * Setting the full flag means we reached the trace_seq buffer
6552 		 * size and we should leave by partial output condition above.
6553 		 * One of the trace_seq_* functions is not used properly.
6554 		 */
6555 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6556 			  iter->ent->type);
6557 	}
6558 	trace_access_unlock(iter->cpu_file);
6559 	trace_event_read_unlock();
6560 
6561 	/* Now copy what we have to the user */
6562 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6563 	if (iter->seq.readpos >= trace_seq_used(&iter->seq))
6564 		trace_seq_init(&iter->seq);
6565 
6566 	/*
6567 	 * If there was nothing to send to user, in spite of consuming trace
6568 	 * entries, go back to wait for more entries.
6569 	 */
6570 	if (sret == -EBUSY)
6571 		goto waitagain;
6572 
6573 	return sret;
6574 }
6575 
6576 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6577 				     unsigned int idx)
6578 {
6579 	__free_page(spd->pages[idx]);
6580 }
6581 
6582 static size_t
6583 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6584 {
6585 	size_t count;
6586 	int save_len;
6587 	int ret;
6588 
6589 	/* Seq buffer is page-sized, exactly what we need. */
6590 	for (;;) {
6591 		save_len = iter->seq.seq.len;
6592 		ret = print_trace_line(iter);
6593 
6594 		if (trace_seq_has_overflowed(&iter->seq)) {
6595 			iter->seq.seq.len = save_len;
6596 			break;
6597 		}
6598 
6599 		/*
6600 		 * This should not be hit, because it should only
6601 		 * be set if the iter->seq overflowed. But check it
6602 		 * anyway to be safe.
6603 		 */
6604 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6605 			iter->seq.seq.len = save_len;
6606 			break;
6607 		}
6608 
6609 		count = trace_seq_used(&iter->seq) - save_len;
6610 		if (rem < count) {
6611 			rem = 0;
6612 			iter->seq.seq.len = save_len;
6613 			break;
6614 		}
6615 
6616 		if (ret != TRACE_TYPE_NO_CONSUME)
6617 			trace_consume(iter);
6618 		rem -= count;
6619 		if (!trace_find_next_entry_inc(iter))	{
6620 			rem = 0;
6621 			iter->ent = NULL;
6622 			break;
6623 		}
6624 	}
6625 
6626 	return rem;
6627 }
6628 
6629 static ssize_t tracing_splice_read_pipe(struct file *filp,
6630 					loff_t *ppos,
6631 					struct pipe_inode_info *pipe,
6632 					size_t len,
6633 					unsigned int flags)
6634 {
6635 	struct page *pages_def[PIPE_DEF_BUFFERS];
6636 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
6637 	struct trace_iterator *iter = filp->private_data;
6638 	struct splice_pipe_desc spd = {
6639 		.pages		= pages_def,
6640 		.partial	= partial_def,
6641 		.nr_pages	= 0, /* This gets updated below. */
6642 		.nr_pages_max	= PIPE_DEF_BUFFERS,
6643 		.ops		= &default_pipe_buf_ops,
6644 		.spd_release	= tracing_spd_release_pipe,
6645 	};
6646 	ssize_t ret;
6647 	size_t rem;
6648 	unsigned int i;
6649 
6650 	if (splice_grow_spd(pipe, &spd))
6651 		return -ENOMEM;
6652 
6653 	mutex_lock(&iter->mutex);
6654 
6655 	if (iter->trace->splice_read) {
6656 		ret = iter->trace->splice_read(iter, filp,
6657 					       ppos, pipe, len, flags);
6658 		if (ret)
6659 			goto out_err;
6660 	}
6661 
6662 	ret = tracing_wait_pipe(filp);
6663 	if (ret <= 0)
6664 		goto out_err;
6665 
6666 	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6667 		ret = -EFAULT;
6668 		goto out_err;
6669 	}
6670 
6671 	trace_event_read_lock();
6672 	trace_access_lock(iter->cpu_file);
6673 
6674 	/* Fill as many pages as possible. */
6675 	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6676 		spd.pages[i] = alloc_page(GFP_KERNEL);
6677 		if (!spd.pages[i])
6678 			break;
6679 
6680 		rem = tracing_fill_pipe_page(rem, iter);
6681 
6682 		/* Copy the data into the page, so we can start over. */
6683 		ret = trace_seq_to_buffer(&iter->seq,
6684 					  page_address(spd.pages[i]),
6685 					  trace_seq_used(&iter->seq));
6686 		if (ret < 0) {
6687 			__free_page(spd.pages[i]);
6688 			break;
6689 		}
6690 		spd.partial[i].offset = 0;
6691 		spd.partial[i].len = trace_seq_used(&iter->seq);
6692 
6693 		trace_seq_init(&iter->seq);
6694 	}
6695 
6696 	trace_access_unlock(iter->cpu_file);
6697 	trace_event_read_unlock();
6698 	mutex_unlock(&iter->mutex);
6699 
6700 	spd.nr_pages = i;
6701 
6702 	if (i)
6703 		ret = splice_to_pipe(pipe, &spd);
6704 	else
6705 		ret = 0;
6706 out:
6707 	splice_shrink_spd(&spd);
6708 	return ret;
6709 
6710 out_err:
6711 	mutex_unlock(&iter->mutex);
6712 	goto out;
6713 }
6714 
6715 static ssize_t
6716 tracing_entries_read(struct file *filp, char __user *ubuf,
6717 		     size_t cnt, loff_t *ppos)
6718 {
6719 	struct inode *inode = file_inode(filp);
6720 	struct trace_array *tr = inode->i_private;
6721 	int cpu = tracing_get_cpu(inode);
6722 	char buf[64];
6723 	int r = 0;
6724 	ssize_t ret;
6725 
6726 	mutex_lock(&trace_types_lock);
6727 
6728 	if (cpu == RING_BUFFER_ALL_CPUS) {
6729 		int cpu, buf_size_same;
6730 		unsigned long size;
6731 
6732 		size = 0;
6733 		buf_size_same = 1;
6734 		/* check if all cpu sizes are same */
6735 		for_each_tracing_cpu(cpu) {
6736 			/* fill in the size from first enabled cpu */
6737 			if (size == 0)
6738 				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6739 			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6740 				buf_size_same = 0;
6741 				break;
6742 			}
6743 		}
6744 
6745 		if (buf_size_same) {
6746 			if (!tr->ring_buffer_expanded)
6747 				r = sprintf(buf, "%lu (expanded: %lu)\n",
6748 					    size >> 10,
6749 					    trace_buf_size >> 10);
6750 			else
6751 				r = sprintf(buf, "%lu\n", size >> 10);
6752 		} else
6753 			r = sprintf(buf, "X\n");
6754 	} else
6755 		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6756 
6757 	mutex_unlock(&trace_types_lock);
6758 
6759 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6760 	return ret;
6761 }
6762 
6763 static ssize_t
6764 tracing_entries_write(struct file *filp, const char __user *ubuf,
6765 		      size_t cnt, loff_t *ppos)
6766 {
6767 	struct inode *inode = file_inode(filp);
6768 	struct trace_array *tr = inode->i_private;
6769 	unsigned long val;
6770 	int ret;
6771 
6772 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6773 	if (ret)
6774 		return ret;
6775 
6776 	/* must have at least 1 entry */
6777 	if (!val)
6778 		return -EINVAL;
6779 
6780 	/* value is in KB */
6781 	val <<= 10;
6782 	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6783 	if (ret < 0)
6784 		return ret;
6785 
6786 	*ppos += cnt;
6787 
6788 	return cnt;
6789 }
6790 
6791 static ssize_t
6792 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6793 				size_t cnt, loff_t *ppos)
6794 {
6795 	struct trace_array *tr = filp->private_data;
6796 	char buf[64];
6797 	int r, cpu;
6798 	unsigned long size = 0, expanded_size = 0;
6799 
6800 	mutex_lock(&trace_types_lock);
6801 	for_each_tracing_cpu(cpu) {
6802 		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6803 		if (!tr->ring_buffer_expanded)
6804 			expanded_size += trace_buf_size >> 10;
6805 	}
6806 	if (tr->ring_buffer_expanded)
6807 		r = sprintf(buf, "%lu\n", size);
6808 	else
6809 		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6810 	mutex_unlock(&trace_types_lock);
6811 
6812 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6813 }
6814 
6815 static ssize_t
6816 tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
6817 {
6818 	struct trace_array *tr = filp->private_data;
6819 	struct seq_buf seq;
6820 	char buf[64];
6821 
6822 	seq_buf_init(&seq, buf, 64);
6823 
6824 	seq_buf_printf(&seq, "text delta:\t%ld\n", tr->text_delta);
6825 	seq_buf_printf(&seq, "data delta:\t%ld\n", tr->data_delta);
6826 
6827 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, seq_buf_used(&seq));
6828 }
6829 
6830 static int tracing_buffer_meta_open(struct inode *inode, struct file *filp)
6831 {
6832 	struct trace_array *tr = inode->i_private;
6833 	int cpu = tracing_get_cpu(inode);
6834 	int ret;
6835 
6836 	ret = tracing_check_open_get_tr(tr);
6837 	if (ret)
6838 		return ret;
6839 
6840 	ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu);
6841 	if (ret < 0)
6842 		__trace_array_put(tr);
6843 	return ret;
6844 }
6845 
6846 static ssize_t
6847 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6848 			  size_t cnt, loff_t *ppos)
6849 {
6850 	/*
6851 	 * There is no need to read what the user has written, this function
6852 	 * is just to make sure that there is no error when "echo" is used
6853 	 */
6854 
6855 	*ppos += cnt;
6856 
6857 	return cnt;
6858 }
6859 
6860 static int
6861 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6862 {
6863 	struct trace_array *tr = inode->i_private;
6864 
6865 	/* disable tracing ? */
6866 	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6867 		tracer_tracing_off(tr);
6868 	/* resize the ring buffer to 0 */
6869 	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6870 
6871 	trace_array_put(tr);
6872 
6873 	return 0;
6874 }
6875 
6876 #define TRACE_MARKER_MAX_SIZE		4096
6877 
6878 static ssize_t
6879 tracing_mark_write(struct file *filp, const char __user *ubuf,
6880 					size_t cnt, loff_t *fpos)
6881 {
6882 	struct trace_array *tr = filp->private_data;
6883 	struct ring_buffer_event *event;
6884 	enum event_trigger_type tt = ETT_NONE;
6885 	struct trace_buffer *buffer;
6886 	struct print_entry *entry;
6887 	int meta_size;
6888 	ssize_t written;
6889 	size_t size;
6890 	int len;
6891 
6892 /* Used in tracing_mark_raw_write() as well */
6893 #define FAULTED_STR "<faulted>"
6894 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6895 
6896 	if (tracing_disabled)
6897 		return -EINVAL;
6898 
6899 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6900 		return -EINVAL;
6901 
6902 	if ((ssize_t)cnt < 0)
6903 		return -EINVAL;
6904 
6905 	if (cnt > TRACE_MARKER_MAX_SIZE)
6906 		cnt = TRACE_MARKER_MAX_SIZE;
6907 
6908 	meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
6909  again:
6910 	size = cnt + meta_size;
6911 
6912 	/* If less than "<faulted>", then make sure we can still add that */
6913 	if (cnt < FAULTED_SIZE)
6914 		size += FAULTED_SIZE - cnt;
6915 
6916 	buffer = tr->array_buffer.buffer;
6917 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6918 					    tracing_gen_ctx());
6919 	if (unlikely(!event)) {
6920 		/*
6921 		 * If the size was greater than what was allowed, then
6922 		 * make it smaller and try again.
6923 		 */
6924 		if (size > ring_buffer_max_event_size(buffer)) {
6925 			/* cnt < FAULTED size should never be bigger than max */
6926 			if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
6927 				return -EBADF;
6928 			cnt = ring_buffer_max_event_size(buffer) - meta_size;
6929 			/* The above should only happen once */
6930 			if (WARN_ON_ONCE(cnt + meta_size == size))
6931 				return -EBADF;
6932 			goto again;
6933 		}
6934 
6935 		/* Ring buffer disabled, return as if not open for write */
6936 		return -EBADF;
6937 	}
6938 
6939 	entry = ring_buffer_event_data(event);
6940 	entry->ip = _THIS_IP_;
6941 
6942 	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6943 	if (len) {
6944 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6945 		cnt = FAULTED_SIZE;
6946 		written = -EFAULT;
6947 	} else
6948 		written = cnt;
6949 
6950 	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6951 		/* do not add \n before testing triggers, but add \0 */
6952 		entry->buf[cnt] = '\0';
6953 		tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
6954 	}
6955 
6956 	if (entry->buf[cnt - 1] != '\n') {
6957 		entry->buf[cnt] = '\n';
6958 		entry->buf[cnt + 1] = '\0';
6959 	} else
6960 		entry->buf[cnt] = '\0';
6961 
6962 	if (static_branch_unlikely(&trace_marker_exports_enabled))
6963 		ftrace_exports(event, TRACE_EXPORT_MARKER);
6964 	__buffer_unlock_commit(buffer, event);
6965 
6966 	if (tt)
6967 		event_triggers_post_call(tr->trace_marker_file, tt);
6968 
6969 	return written;
6970 }
6971 
6972 static ssize_t
6973 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6974 					size_t cnt, loff_t *fpos)
6975 {
6976 	struct trace_array *tr = filp->private_data;
6977 	struct ring_buffer_event *event;
6978 	struct trace_buffer *buffer;
6979 	struct raw_data_entry *entry;
6980 	ssize_t written;
6981 	int size;
6982 	int len;
6983 
6984 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6985 
6986 	if (tracing_disabled)
6987 		return -EINVAL;
6988 
6989 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6990 		return -EINVAL;
6991 
6992 	/* The marker must at least have a tag id */
6993 	if (cnt < sizeof(unsigned int))
6994 		return -EINVAL;
6995 
6996 	size = sizeof(*entry) + cnt;
6997 	if (cnt < FAULT_SIZE_ID)
6998 		size += FAULT_SIZE_ID - cnt;
6999 
7000 	buffer = tr->array_buffer.buffer;
7001 
7002 	if (size > ring_buffer_max_event_size(buffer))
7003 		return -EINVAL;
7004 
7005 	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7006 					    tracing_gen_ctx());
7007 	if (!event)
7008 		/* Ring buffer disabled, return as if not open for write */
7009 		return -EBADF;
7010 
7011 	entry = ring_buffer_event_data(event);
7012 
7013 	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7014 	if (len) {
7015 		entry->id = -1;
7016 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7017 		written = -EFAULT;
7018 	} else
7019 		written = cnt;
7020 
7021 	__buffer_unlock_commit(buffer, event);
7022 
7023 	return written;
7024 }
7025 
7026 static int tracing_clock_show(struct seq_file *m, void *v)
7027 {
7028 	struct trace_array *tr = m->private;
7029 	int i;
7030 
7031 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7032 		seq_printf(m,
7033 			"%s%s%s%s", i ? " " : "",
7034 			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7035 			i == tr->clock_id ? "]" : "");
7036 	seq_putc(m, '\n');
7037 
7038 	return 0;
7039 }
7040 
7041 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7042 {
7043 	int i;
7044 
7045 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7046 		if (strcmp(trace_clocks[i].name, clockstr) == 0)
7047 			break;
7048 	}
7049 	if (i == ARRAY_SIZE(trace_clocks))
7050 		return -EINVAL;
7051 
7052 	mutex_lock(&trace_types_lock);
7053 
7054 	tr->clock_id = i;
7055 
7056 	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7057 
7058 	/*
7059 	 * New clock may not be consistent with the previous clock.
7060 	 * Reset the buffer so that it doesn't have incomparable timestamps.
7061 	 */
7062 	tracing_reset_online_cpus(&tr->array_buffer);
7063 
7064 #ifdef CONFIG_TRACER_MAX_TRACE
7065 	if (tr->max_buffer.buffer)
7066 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7067 	tracing_reset_online_cpus(&tr->max_buffer);
7068 #endif
7069 
7070 	mutex_unlock(&trace_types_lock);
7071 
7072 	return 0;
7073 }
7074 
7075 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7076 				   size_t cnt, loff_t *fpos)
7077 {
7078 	struct seq_file *m = filp->private_data;
7079 	struct trace_array *tr = m->private;
7080 	char buf[64];
7081 	const char *clockstr;
7082 	int ret;
7083 
7084 	if (cnt >= sizeof(buf))
7085 		return -EINVAL;
7086 
7087 	if (copy_from_user(buf, ubuf, cnt))
7088 		return -EFAULT;
7089 
7090 	buf[cnt] = 0;
7091 
7092 	clockstr = strstrip(buf);
7093 
7094 	ret = tracing_set_clock(tr, clockstr);
7095 	if (ret)
7096 		return ret;
7097 
7098 	*fpos += cnt;
7099 
7100 	return cnt;
7101 }
7102 
7103 static int tracing_clock_open(struct inode *inode, struct file *file)
7104 {
7105 	struct trace_array *tr = inode->i_private;
7106 	int ret;
7107 
7108 	ret = tracing_check_open_get_tr(tr);
7109 	if (ret)
7110 		return ret;
7111 
7112 	ret = single_open(file, tracing_clock_show, inode->i_private);
7113 	if (ret < 0)
7114 		trace_array_put(tr);
7115 
7116 	return ret;
7117 }
7118 
7119 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7120 {
7121 	struct trace_array *tr = m->private;
7122 
7123 	mutex_lock(&trace_types_lock);
7124 
7125 	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7126 		seq_puts(m, "delta [absolute]\n");
7127 	else
7128 		seq_puts(m, "[delta] absolute\n");
7129 
7130 	mutex_unlock(&trace_types_lock);
7131 
7132 	return 0;
7133 }
7134 
7135 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7136 {
7137 	struct trace_array *tr = inode->i_private;
7138 	int ret;
7139 
7140 	ret = tracing_check_open_get_tr(tr);
7141 	if (ret)
7142 		return ret;
7143 
7144 	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7145 	if (ret < 0)
7146 		trace_array_put(tr);
7147 
7148 	return ret;
7149 }
7150 
7151 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7152 {
7153 	if (rbe == this_cpu_read(trace_buffered_event))
7154 		return ring_buffer_time_stamp(buffer);
7155 
7156 	return ring_buffer_event_time_stamp(buffer, rbe);
7157 }
7158 
7159 /*
7160  * Set or disable using the per CPU trace_buffer_event when possible.
7161  */
7162 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7163 {
7164 	guard(mutex)(&trace_types_lock);
7165 
7166 	if (set && tr->no_filter_buffering_ref++)
7167 		return 0;
7168 
7169 	if (!set) {
7170 		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
7171 			return -EINVAL;
7172 
7173 		--tr->no_filter_buffering_ref;
7174 	}
7175 
7176 	return 0;
7177 }
7178 
7179 struct ftrace_buffer_info {
7180 	struct trace_iterator	iter;
7181 	void			*spare;
7182 	unsigned int		spare_cpu;
7183 	unsigned int		spare_size;
7184 	unsigned int		read;
7185 };
7186 
7187 #ifdef CONFIG_TRACER_SNAPSHOT
7188 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7189 {
7190 	struct trace_array *tr = inode->i_private;
7191 	struct trace_iterator *iter;
7192 	struct seq_file *m;
7193 	int ret;
7194 
7195 	ret = tracing_check_open_get_tr(tr);
7196 	if (ret)
7197 		return ret;
7198 
7199 	if (file->f_mode & FMODE_READ) {
7200 		iter = __tracing_open(inode, file, true);
7201 		if (IS_ERR(iter))
7202 			ret = PTR_ERR(iter);
7203 	} else {
7204 		/* Writes still need the seq_file to hold the private data */
7205 		ret = -ENOMEM;
7206 		m = kzalloc(sizeof(*m), GFP_KERNEL);
7207 		if (!m)
7208 			goto out;
7209 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7210 		if (!iter) {
7211 			kfree(m);
7212 			goto out;
7213 		}
7214 		ret = 0;
7215 
7216 		iter->tr = tr;
7217 		iter->array_buffer = &tr->max_buffer;
7218 		iter->cpu_file = tracing_get_cpu(inode);
7219 		m->private = iter;
7220 		file->private_data = m;
7221 	}
7222 out:
7223 	if (ret < 0)
7224 		trace_array_put(tr);
7225 
7226 	return ret;
7227 }
7228 
7229 static void tracing_swap_cpu_buffer(void *tr)
7230 {
7231 	update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7232 }
7233 
7234 static ssize_t
7235 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7236 		       loff_t *ppos)
7237 {
7238 	struct seq_file *m = filp->private_data;
7239 	struct trace_iterator *iter = m->private;
7240 	struct trace_array *tr = iter->tr;
7241 	unsigned long val;
7242 	int ret;
7243 
7244 	ret = tracing_update_buffers(tr);
7245 	if (ret < 0)
7246 		return ret;
7247 
7248 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7249 	if (ret)
7250 		return ret;
7251 
7252 	guard(mutex)(&trace_types_lock);
7253 
7254 	if (tr->current_trace->use_max_tr)
7255 		return -EBUSY;
7256 
7257 	local_irq_disable();
7258 	arch_spin_lock(&tr->max_lock);
7259 	if (tr->cond_snapshot)
7260 		ret = -EBUSY;
7261 	arch_spin_unlock(&tr->max_lock);
7262 	local_irq_enable();
7263 	if (ret)
7264 		return ret;
7265 
7266 	switch (val) {
7267 	case 0:
7268 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7269 			return -EINVAL;
7270 		if (tr->allocated_snapshot)
7271 			free_snapshot(tr);
7272 		break;
7273 	case 1:
7274 /* Only allow per-cpu swap if the ring buffer supports it */
7275 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7276 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7277 			return -EINVAL;
7278 #endif
7279 		if (tr->allocated_snapshot)
7280 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
7281 					&tr->array_buffer, iter->cpu_file);
7282 
7283 		ret = tracing_arm_snapshot_locked(tr);
7284 		if (ret)
7285 			return ret;
7286 
7287 		/* Now, we're going to swap */
7288 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7289 			local_irq_disable();
7290 			update_max_tr(tr, current, smp_processor_id(), NULL);
7291 			local_irq_enable();
7292 		} else {
7293 			smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7294 						 (void *)tr, 1);
7295 		}
7296 		tracing_disarm_snapshot(tr);
7297 		break;
7298 	default:
7299 		if (tr->allocated_snapshot) {
7300 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7301 				tracing_reset_online_cpus(&tr->max_buffer);
7302 			else
7303 				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7304 		}
7305 		break;
7306 	}
7307 
7308 	if (ret >= 0) {
7309 		*ppos += cnt;
7310 		ret = cnt;
7311 	}
7312 
7313 	return ret;
7314 }
7315 
7316 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7317 {
7318 	struct seq_file *m = file->private_data;
7319 	int ret;
7320 
7321 	ret = tracing_release(inode, file);
7322 
7323 	if (file->f_mode & FMODE_READ)
7324 		return ret;
7325 
7326 	/* If write only, the seq_file is just a stub */
7327 	if (m)
7328 		kfree(m->private);
7329 	kfree(m);
7330 
7331 	return 0;
7332 }
7333 
7334 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7335 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7336 				    size_t count, loff_t *ppos);
7337 static int tracing_buffers_release(struct inode *inode, struct file *file);
7338 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7339 		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7340 
7341 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7342 {
7343 	struct ftrace_buffer_info *info;
7344 	int ret;
7345 
7346 	/* The following checks for tracefs lockdown */
7347 	ret = tracing_buffers_open(inode, filp);
7348 	if (ret < 0)
7349 		return ret;
7350 
7351 	info = filp->private_data;
7352 
7353 	if (info->iter.trace->use_max_tr) {
7354 		tracing_buffers_release(inode, filp);
7355 		return -EBUSY;
7356 	}
7357 
7358 	info->iter.snapshot = true;
7359 	info->iter.array_buffer = &info->iter.tr->max_buffer;
7360 
7361 	return ret;
7362 }
7363 
7364 #endif /* CONFIG_TRACER_SNAPSHOT */
7365 
7366 
7367 static const struct file_operations tracing_thresh_fops = {
7368 	.open		= tracing_open_generic,
7369 	.read		= tracing_thresh_read,
7370 	.write		= tracing_thresh_write,
7371 	.llseek		= generic_file_llseek,
7372 };
7373 
7374 #ifdef CONFIG_TRACER_MAX_TRACE
7375 static const struct file_operations tracing_max_lat_fops = {
7376 	.open		= tracing_open_generic_tr,
7377 	.read		= tracing_max_lat_read,
7378 	.write		= tracing_max_lat_write,
7379 	.llseek		= generic_file_llseek,
7380 	.release	= tracing_release_generic_tr,
7381 };
7382 #endif
7383 
7384 static const struct file_operations set_tracer_fops = {
7385 	.open		= tracing_open_generic_tr,
7386 	.read		= tracing_set_trace_read,
7387 	.write		= tracing_set_trace_write,
7388 	.llseek		= generic_file_llseek,
7389 	.release	= tracing_release_generic_tr,
7390 };
7391 
7392 static const struct file_operations tracing_pipe_fops = {
7393 	.open		= tracing_open_pipe,
7394 	.poll		= tracing_poll_pipe,
7395 	.read		= tracing_read_pipe,
7396 	.splice_read	= tracing_splice_read_pipe,
7397 	.release	= tracing_release_pipe,
7398 };
7399 
7400 static const struct file_operations tracing_entries_fops = {
7401 	.open		= tracing_open_generic_tr,
7402 	.read		= tracing_entries_read,
7403 	.write		= tracing_entries_write,
7404 	.llseek		= generic_file_llseek,
7405 	.release	= tracing_release_generic_tr,
7406 };
7407 
7408 static const struct file_operations tracing_buffer_meta_fops = {
7409 	.open		= tracing_buffer_meta_open,
7410 	.read		= seq_read,
7411 	.llseek		= seq_lseek,
7412 	.release	= tracing_seq_release,
7413 };
7414 
7415 static const struct file_operations tracing_total_entries_fops = {
7416 	.open		= tracing_open_generic_tr,
7417 	.read		= tracing_total_entries_read,
7418 	.llseek		= generic_file_llseek,
7419 	.release	= tracing_release_generic_tr,
7420 };
7421 
7422 static const struct file_operations tracing_free_buffer_fops = {
7423 	.open		= tracing_open_generic_tr,
7424 	.write		= tracing_free_buffer_write,
7425 	.release	= tracing_free_buffer_release,
7426 };
7427 
7428 static const struct file_operations tracing_mark_fops = {
7429 	.open		= tracing_mark_open,
7430 	.write		= tracing_mark_write,
7431 	.release	= tracing_release_generic_tr,
7432 };
7433 
7434 static const struct file_operations tracing_mark_raw_fops = {
7435 	.open		= tracing_mark_open,
7436 	.write		= tracing_mark_raw_write,
7437 	.release	= tracing_release_generic_tr,
7438 };
7439 
7440 static const struct file_operations trace_clock_fops = {
7441 	.open		= tracing_clock_open,
7442 	.read		= seq_read,
7443 	.llseek		= seq_lseek,
7444 	.release	= tracing_single_release_tr,
7445 	.write		= tracing_clock_write,
7446 };
7447 
7448 static const struct file_operations trace_time_stamp_mode_fops = {
7449 	.open		= tracing_time_stamp_mode_open,
7450 	.read		= seq_read,
7451 	.llseek		= seq_lseek,
7452 	.release	= tracing_single_release_tr,
7453 };
7454 
7455 static const struct file_operations last_boot_fops = {
7456 	.open		= tracing_open_generic_tr,
7457 	.read		= tracing_last_boot_read,
7458 	.llseek		= generic_file_llseek,
7459 	.release	= tracing_release_generic_tr,
7460 };
7461 
7462 #ifdef CONFIG_TRACER_SNAPSHOT
7463 static const struct file_operations snapshot_fops = {
7464 	.open		= tracing_snapshot_open,
7465 	.read		= seq_read,
7466 	.write		= tracing_snapshot_write,
7467 	.llseek		= tracing_lseek,
7468 	.release	= tracing_snapshot_release,
7469 };
7470 
7471 static const struct file_operations snapshot_raw_fops = {
7472 	.open		= snapshot_raw_open,
7473 	.read		= tracing_buffers_read,
7474 	.release	= tracing_buffers_release,
7475 	.splice_read	= tracing_buffers_splice_read,
7476 };
7477 
7478 #endif /* CONFIG_TRACER_SNAPSHOT */
7479 
7480 /*
7481  * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7482  * @filp: The active open file structure
7483  * @ubuf: The userspace provided buffer to read value into
7484  * @cnt: The maximum number of bytes to read
7485  * @ppos: The current "file" position
7486  *
7487  * This function implements the write interface for a struct trace_min_max_param.
7488  * The filp->private_data must point to a trace_min_max_param structure that
7489  * defines where to write the value, the min and the max acceptable values,
7490  * and a lock to protect the write.
7491  */
7492 static ssize_t
7493 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7494 {
7495 	struct trace_min_max_param *param = filp->private_data;
7496 	u64 val;
7497 	int err;
7498 
7499 	if (!param)
7500 		return -EFAULT;
7501 
7502 	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7503 	if (err)
7504 		return err;
7505 
7506 	if (param->lock)
7507 		mutex_lock(param->lock);
7508 
7509 	if (param->min && val < *param->min)
7510 		err = -EINVAL;
7511 
7512 	if (param->max && val > *param->max)
7513 		err = -EINVAL;
7514 
7515 	if (!err)
7516 		*param->val = val;
7517 
7518 	if (param->lock)
7519 		mutex_unlock(param->lock);
7520 
7521 	if (err)
7522 		return err;
7523 
7524 	return cnt;
7525 }
7526 
7527 /*
7528  * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7529  * @filp: The active open file structure
7530  * @ubuf: The userspace provided buffer to read value into
7531  * @cnt: The maximum number of bytes to read
7532  * @ppos: The current "file" position
7533  *
7534  * This function implements the read interface for a struct trace_min_max_param.
7535  * The filp->private_data must point to a trace_min_max_param struct with valid
7536  * data.
7537  */
7538 static ssize_t
7539 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7540 {
7541 	struct trace_min_max_param *param = filp->private_data;
7542 	char buf[U64_STR_SIZE];
7543 	int len;
7544 	u64 val;
7545 
7546 	if (!param)
7547 		return -EFAULT;
7548 
7549 	val = *param->val;
7550 
7551 	if (cnt > sizeof(buf))
7552 		cnt = sizeof(buf);
7553 
7554 	len = snprintf(buf, sizeof(buf), "%llu\n", val);
7555 
7556 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7557 }
7558 
7559 const struct file_operations trace_min_max_fops = {
7560 	.open		= tracing_open_generic,
7561 	.read		= trace_min_max_read,
7562 	.write		= trace_min_max_write,
7563 };
7564 
7565 #define TRACING_LOG_ERRS_MAX	8
7566 #define TRACING_LOG_LOC_MAX	128
7567 
7568 #define CMD_PREFIX "  Command: "
7569 
7570 struct err_info {
7571 	const char	**errs;	/* ptr to loc-specific array of err strings */
7572 	u8		type;	/* index into errs -> specific err string */
7573 	u16		pos;	/* caret position */
7574 	u64		ts;
7575 };
7576 
7577 struct tracing_log_err {
7578 	struct list_head	list;
7579 	struct err_info		info;
7580 	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
7581 	char			*cmd;                     /* what caused err */
7582 };
7583 
7584 static DEFINE_MUTEX(tracing_err_log_lock);
7585 
7586 static struct tracing_log_err *alloc_tracing_log_err(int len)
7587 {
7588 	struct tracing_log_err *err;
7589 
7590 	err = kzalloc(sizeof(*err), GFP_KERNEL);
7591 	if (!err)
7592 		return ERR_PTR(-ENOMEM);
7593 
7594 	err->cmd = kzalloc(len, GFP_KERNEL);
7595 	if (!err->cmd) {
7596 		kfree(err);
7597 		return ERR_PTR(-ENOMEM);
7598 	}
7599 
7600 	return err;
7601 }
7602 
7603 static void free_tracing_log_err(struct tracing_log_err *err)
7604 {
7605 	kfree(err->cmd);
7606 	kfree(err);
7607 }
7608 
7609 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7610 						   int len)
7611 {
7612 	struct tracing_log_err *err;
7613 	char *cmd;
7614 
7615 	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7616 		err = alloc_tracing_log_err(len);
7617 		if (PTR_ERR(err) != -ENOMEM)
7618 			tr->n_err_log_entries++;
7619 
7620 		return err;
7621 	}
7622 	cmd = kzalloc(len, GFP_KERNEL);
7623 	if (!cmd)
7624 		return ERR_PTR(-ENOMEM);
7625 	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7626 	kfree(err->cmd);
7627 	err->cmd = cmd;
7628 	list_del(&err->list);
7629 
7630 	return err;
7631 }
7632 
7633 /**
7634  * err_pos - find the position of a string within a command for error careting
7635  * @cmd: The tracing command that caused the error
7636  * @str: The string to position the caret at within @cmd
7637  *
7638  * Finds the position of the first occurrence of @str within @cmd.  The
7639  * return value can be passed to tracing_log_err() for caret placement
7640  * within @cmd.
7641  *
7642  * Returns the index within @cmd of the first occurrence of @str or 0
7643  * if @str was not found.
7644  */
7645 unsigned int err_pos(char *cmd, const char *str)
7646 {
7647 	char *found;
7648 
7649 	if (WARN_ON(!strlen(cmd)))
7650 		return 0;
7651 
7652 	found = strstr(cmd, str);
7653 	if (found)
7654 		return found - cmd;
7655 
7656 	return 0;
7657 }
7658 
7659 /**
7660  * tracing_log_err - write an error to the tracing error log
7661  * @tr: The associated trace array for the error (NULL for top level array)
7662  * @loc: A string describing where the error occurred
7663  * @cmd: The tracing command that caused the error
7664  * @errs: The array of loc-specific static error strings
7665  * @type: The index into errs[], which produces the specific static err string
7666  * @pos: The position the caret should be placed in the cmd
7667  *
7668  * Writes an error into tracing/error_log of the form:
7669  *
7670  * <loc>: error: <text>
7671  *   Command: <cmd>
7672  *              ^
7673  *
7674  * tracing/error_log is a small log file containing the last
7675  * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
7676  * unless there has been a tracing error, and the error log can be
7677  * cleared and have its memory freed by writing the empty string in
7678  * truncation mode to it i.e. echo > tracing/error_log.
7679  *
7680  * NOTE: the @errs array along with the @type param are used to
7681  * produce a static error string - this string is not copied and saved
7682  * when the error is logged - only a pointer to it is saved.  See
7683  * existing callers for examples of how static strings are typically
7684  * defined for use with tracing_log_err().
7685  */
7686 void tracing_log_err(struct trace_array *tr,
7687 		     const char *loc, const char *cmd,
7688 		     const char **errs, u8 type, u16 pos)
7689 {
7690 	struct tracing_log_err *err;
7691 	int len = 0;
7692 
7693 	if (!tr)
7694 		tr = &global_trace;
7695 
7696 	len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7697 
7698 	guard(mutex)(&tracing_err_log_lock);
7699 
7700 	err = get_tracing_log_err(tr, len);
7701 	if (PTR_ERR(err) == -ENOMEM)
7702 		return;
7703 
7704 	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7705 	snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7706 
7707 	err->info.errs = errs;
7708 	err->info.type = type;
7709 	err->info.pos = pos;
7710 	err->info.ts = local_clock();
7711 
7712 	list_add_tail(&err->list, &tr->err_log);
7713 }
7714 
7715 static void clear_tracing_err_log(struct trace_array *tr)
7716 {
7717 	struct tracing_log_err *err, *next;
7718 
7719 	mutex_lock(&tracing_err_log_lock);
7720 	list_for_each_entry_safe(err, next, &tr->err_log, list) {
7721 		list_del(&err->list);
7722 		free_tracing_log_err(err);
7723 	}
7724 
7725 	tr->n_err_log_entries = 0;
7726 	mutex_unlock(&tracing_err_log_lock);
7727 }
7728 
7729 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7730 {
7731 	struct trace_array *tr = m->private;
7732 
7733 	mutex_lock(&tracing_err_log_lock);
7734 
7735 	return seq_list_start(&tr->err_log, *pos);
7736 }
7737 
7738 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7739 {
7740 	struct trace_array *tr = m->private;
7741 
7742 	return seq_list_next(v, &tr->err_log, pos);
7743 }
7744 
7745 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7746 {
7747 	mutex_unlock(&tracing_err_log_lock);
7748 }
7749 
7750 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7751 {
7752 	u16 i;
7753 
7754 	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7755 		seq_putc(m, ' ');
7756 	for (i = 0; i < pos; i++)
7757 		seq_putc(m, ' ');
7758 	seq_puts(m, "^\n");
7759 }
7760 
7761 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7762 {
7763 	struct tracing_log_err *err = v;
7764 
7765 	if (err) {
7766 		const char *err_text = err->info.errs[err->info.type];
7767 		u64 sec = err->info.ts;
7768 		u32 nsec;
7769 
7770 		nsec = do_div(sec, NSEC_PER_SEC);
7771 		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7772 			   err->loc, err_text);
7773 		seq_printf(m, "%s", err->cmd);
7774 		tracing_err_log_show_pos(m, err->info.pos);
7775 	}
7776 
7777 	return 0;
7778 }
7779 
7780 static const struct seq_operations tracing_err_log_seq_ops = {
7781 	.start  = tracing_err_log_seq_start,
7782 	.next   = tracing_err_log_seq_next,
7783 	.stop   = tracing_err_log_seq_stop,
7784 	.show   = tracing_err_log_seq_show
7785 };
7786 
7787 static int tracing_err_log_open(struct inode *inode, struct file *file)
7788 {
7789 	struct trace_array *tr = inode->i_private;
7790 	int ret = 0;
7791 
7792 	ret = tracing_check_open_get_tr(tr);
7793 	if (ret)
7794 		return ret;
7795 
7796 	/* If this file was opened for write, then erase contents */
7797 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7798 		clear_tracing_err_log(tr);
7799 
7800 	if (file->f_mode & FMODE_READ) {
7801 		ret = seq_open(file, &tracing_err_log_seq_ops);
7802 		if (!ret) {
7803 			struct seq_file *m = file->private_data;
7804 			m->private = tr;
7805 		} else {
7806 			trace_array_put(tr);
7807 		}
7808 	}
7809 	return ret;
7810 }
7811 
7812 static ssize_t tracing_err_log_write(struct file *file,
7813 				     const char __user *buffer,
7814 				     size_t count, loff_t *ppos)
7815 {
7816 	return count;
7817 }
7818 
7819 static int tracing_err_log_release(struct inode *inode, struct file *file)
7820 {
7821 	struct trace_array *tr = inode->i_private;
7822 
7823 	trace_array_put(tr);
7824 
7825 	if (file->f_mode & FMODE_READ)
7826 		seq_release(inode, file);
7827 
7828 	return 0;
7829 }
7830 
7831 static const struct file_operations tracing_err_log_fops = {
7832 	.open           = tracing_err_log_open,
7833 	.write		= tracing_err_log_write,
7834 	.read           = seq_read,
7835 	.llseek         = tracing_lseek,
7836 	.release        = tracing_err_log_release,
7837 };
7838 
7839 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7840 {
7841 	struct trace_array *tr = inode->i_private;
7842 	struct ftrace_buffer_info *info;
7843 	int ret;
7844 
7845 	ret = tracing_check_open_get_tr(tr);
7846 	if (ret)
7847 		return ret;
7848 
7849 	info = kvzalloc(sizeof(*info), GFP_KERNEL);
7850 	if (!info) {
7851 		trace_array_put(tr);
7852 		return -ENOMEM;
7853 	}
7854 
7855 	mutex_lock(&trace_types_lock);
7856 
7857 	info->iter.tr		= tr;
7858 	info->iter.cpu_file	= tracing_get_cpu(inode);
7859 	info->iter.trace	= tr->current_trace;
7860 	info->iter.array_buffer = &tr->array_buffer;
7861 	info->spare		= NULL;
7862 	/* Force reading ring buffer for first read */
7863 	info->read		= (unsigned int)-1;
7864 
7865 	filp->private_data = info;
7866 
7867 	tr->trace_ref++;
7868 
7869 	mutex_unlock(&trace_types_lock);
7870 
7871 	ret = nonseekable_open(inode, filp);
7872 	if (ret < 0)
7873 		trace_array_put(tr);
7874 
7875 	return ret;
7876 }
7877 
7878 static __poll_t
7879 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7880 {
7881 	struct ftrace_buffer_info *info = filp->private_data;
7882 	struct trace_iterator *iter = &info->iter;
7883 
7884 	return trace_poll(iter, filp, poll_table);
7885 }
7886 
7887 static ssize_t
7888 tracing_buffers_read(struct file *filp, char __user *ubuf,
7889 		     size_t count, loff_t *ppos)
7890 {
7891 	struct ftrace_buffer_info *info = filp->private_data;
7892 	struct trace_iterator *iter = &info->iter;
7893 	void *trace_data;
7894 	int page_size;
7895 	ssize_t ret = 0;
7896 	ssize_t size;
7897 
7898 	if (!count)
7899 		return 0;
7900 
7901 #ifdef CONFIG_TRACER_MAX_TRACE
7902 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7903 		return -EBUSY;
7904 #endif
7905 
7906 	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
7907 
7908 	/* Make sure the spare matches the current sub buffer size */
7909 	if (info->spare) {
7910 		if (page_size != info->spare_size) {
7911 			ring_buffer_free_read_page(iter->array_buffer->buffer,
7912 						   info->spare_cpu, info->spare);
7913 			info->spare = NULL;
7914 		}
7915 	}
7916 
7917 	if (!info->spare) {
7918 		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7919 							  iter->cpu_file);
7920 		if (IS_ERR(info->spare)) {
7921 			ret = PTR_ERR(info->spare);
7922 			info->spare = NULL;
7923 		} else {
7924 			info->spare_cpu = iter->cpu_file;
7925 			info->spare_size = page_size;
7926 		}
7927 	}
7928 	if (!info->spare)
7929 		return ret;
7930 
7931 	/* Do we have previous read data to read? */
7932 	if (info->read < page_size)
7933 		goto read;
7934 
7935  again:
7936 	trace_access_lock(iter->cpu_file);
7937 	ret = ring_buffer_read_page(iter->array_buffer->buffer,
7938 				    info->spare,
7939 				    count,
7940 				    iter->cpu_file, 0);
7941 	trace_access_unlock(iter->cpu_file);
7942 
7943 	if (ret < 0) {
7944 		if (trace_empty(iter) && !iter->closed) {
7945 			if ((filp->f_flags & O_NONBLOCK))
7946 				return -EAGAIN;
7947 
7948 			ret = wait_on_pipe(iter, 0);
7949 			if (ret)
7950 				return ret;
7951 
7952 			goto again;
7953 		}
7954 		return 0;
7955 	}
7956 
7957 	info->read = 0;
7958  read:
7959 	size = page_size - info->read;
7960 	if (size > count)
7961 		size = count;
7962 	trace_data = ring_buffer_read_page_data(info->spare);
7963 	ret = copy_to_user(ubuf, trace_data + info->read, size);
7964 	if (ret == size)
7965 		return -EFAULT;
7966 
7967 	size -= ret;
7968 
7969 	*ppos += size;
7970 	info->read += size;
7971 
7972 	return size;
7973 }
7974 
7975 static int tracing_buffers_flush(struct file *file, fl_owner_t id)
7976 {
7977 	struct ftrace_buffer_info *info = file->private_data;
7978 	struct trace_iterator *iter = &info->iter;
7979 
7980 	iter->closed = true;
7981 	/* Make sure the waiters see the new wait_index */
7982 	(void)atomic_fetch_inc_release(&iter->wait_index);
7983 
7984 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
7985 
7986 	return 0;
7987 }
7988 
7989 static int tracing_buffers_release(struct inode *inode, struct file *file)
7990 {
7991 	struct ftrace_buffer_info *info = file->private_data;
7992 	struct trace_iterator *iter = &info->iter;
7993 
7994 	mutex_lock(&trace_types_lock);
7995 
7996 	iter->tr->trace_ref--;
7997 
7998 	__trace_array_put(iter->tr);
7999 
8000 	if (info->spare)
8001 		ring_buffer_free_read_page(iter->array_buffer->buffer,
8002 					   info->spare_cpu, info->spare);
8003 	kvfree(info);
8004 
8005 	mutex_unlock(&trace_types_lock);
8006 
8007 	return 0;
8008 }
8009 
8010 struct buffer_ref {
8011 	struct trace_buffer	*buffer;
8012 	void			*page;
8013 	int			cpu;
8014 	refcount_t		refcount;
8015 };
8016 
8017 static void buffer_ref_release(struct buffer_ref *ref)
8018 {
8019 	if (!refcount_dec_and_test(&ref->refcount))
8020 		return;
8021 	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8022 	kfree(ref);
8023 }
8024 
8025 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8026 				    struct pipe_buffer *buf)
8027 {
8028 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8029 
8030 	buffer_ref_release(ref);
8031 	buf->private = 0;
8032 }
8033 
8034 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8035 				struct pipe_buffer *buf)
8036 {
8037 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8038 
8039 	if (refcount_read(&ref->refcount) > INT_MAX/2)
8040 		return false;
8041 
8042 	refcount_inc(&ref->refcount);
8043 	return true;
8044 }
8045 
8046 /* Pipe buffer operations for a buffer. */
8047 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8048 	.release		= buffer_pipe_buf_release,
8049 	.get			= buffer_pipe_buf_get,
8050 };
8051 
8052 /*
8053  * Callback from splice_to_pipe(), if we need to release some pages
8054  * at the end of the spd in case we error'ed out in filling the pipe.
8055  */
8056 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8057 {
8058 	struct buffer_ref *ref =
8059 		(struct buffer_ref *)spd->partial[i].private;
8060 
8061 	buffer_ref_release(ref);
8062 	spd->partial[i].private = 0;
8063 }
8064 
8065 static ssize_t
8066 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8067 			    struct pipe_inode_info *pipe, size_t len,
8068 			    unsigned int flags)
8069 {
8070 	struct ftrace_buffer_info *info = file->private_data;
8071 	struct trace_iterator *iter = &info->iter;
8072 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
8073 	struct page *pages_def[PIPE_DEF_BUFFERS];
8074 	struct splice_pipe_desc spd = {
8075 		.pages		= pages_def,
8076 		.partial	= partial_def,
8077 		.nr_pages_max	= PIPE_DEF_BUFFERS,
8078 		.ops		= &buffer_pipe_buf_ops,
8079 		.spd_release	= buffer_spd_release,
8080 	};
8081 	struct buffer_ref *ref;
8082 	bool woken = false;
8083 	int page_size;
8084 	int entries, i;
8085 	ssize_t ret = 0;
8086 
8087 #ifdef CONFIG_TRACER_MAX_TRACE
8088 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8089 		return -EBUSY;
8090 #endif
8091 
8092 	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8093 	if (*ppos & (page_size - 1))
8094 		return -EINVAL;
8095 
8096 	if (len & (page_size - 1)) {
8097 		if (len < page_size)
8098 			return -EINVAL;
8099 		len &= (~(page_size - 1));
8100 	}
8101 
8102 	if (splice_grow_spd(pipe, &spd))
8103 		return -ENOMEM;
8104 
8105  again:
8106 	trace_access_lock(iter->cpu_file);
8107 	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8108 
8109 	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
8110 		struct page *page;
8111 		int r;
8112 
8113 		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8114 		if (!ref) {
8115 			ret = -ENOMEM;
8116 			break;
8117 		}
8118 
8119 		refcount_set(&ref->refcount, 1);
8120 		ref->buffer = iter->array_buffer->buffer;
8121 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8122 		if (IS_ERR(ref->page)) {
8123 			ret = PTR_ERR(ref->page);
8124 			ref->page = NULL;
8125 			kfree(ref);
8126 			break;
8127 		}
8128 		ref->cpu = iter->cpu_file;
8129 
8130 		r = ring_buffer_read_page(ref->buffer, ref->page,
8131 					  len, iter->cpu_file, 1);
8132 		if (r < 0) {
8133 			ring_buffer_free_read_page(ref->buffer, ref->cpu,
8134 						   ref->page);
8135 			kfree(ref);
8136 			break;
8137 		}
8138 
8139 		page = virt_to_page(ring_buffer_read_page_data(ref->page));
8140 
8141 		spd.pages[i] = page;
8142 		spd.partial[i].len = page_size;
8143 		spd.partial[i].offset = 0;
8144 		spd.partial[i].private = (unsigned long)ref;
8145 		spd.nr_pages++;
8146 		*ppos += page_size;
8147 
8148 		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8149 	}
8150 
8151 	trace_access_unlock(iter->cpu_file);
8152 	spd.nr_pages = i;
8153 
8154 	/* did we read anything? */
8155 	if (!spd.nr_pages) {
8156 
8157 		if (ret)
8158 			goto out;
8159 
8160 		if (woken)
8161 			goto out;
8162 
8163 		ret = -EAGAIN;
8164 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8165 			goto out;
8166 
8167 		ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8168 		if (ret)
8169 			goto out;
8170 
8171 		/* No need to wait after waking up when tracing is off */
8172 		if (!tracer_tracing_is_on(iter->tr))
8173 			goto out;
8174 
8175 		/* Iterate one more time to collect any new data then exit */
8176 		woken = true;
8177 
8178 		goto again;
8179 	}
8180 
8181 	ret = splice_to_pipe(pipe, &spd);
8182 out:
8183 	splice_shrink_spd(&spd);
8184 
8185 	return ret;
8186 }
8187 
8188 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8189 {
8190 	struct ftrace_buffer_info *info = file->private_data;
8191 	struct trace_iterator *iter = &info->iter;
8192 	int err;
8193 
8194 	if (cmd == TRACE_MMAP_IOCTL_GET_READER) {
8195 		if (!(file->f_flags & O_NONBLOCK)) {
8196 			err = ring_buffer_wait(iter->array_buffer->buffer,
8197 					       iter->cpu_file,
8198 					       iter->tr->buffer_percent,
8199 					       NULL, NULL);
8200 			if (err)
8201 				return err;
8202 		}
8203 
8204 		return ring_buffer_map_get_reader(iter->array_buffer->buffer,
8205 						  iter->cpu_file);
8206 	} else if (cmd) {
8207 		return -ENOTTY;
8208 	}
8209 
8210 	/*
8211 	 * An ioctl call with cmd 0 to the ring buffer file will wake up all
8212 	 * waiters
8213 	 */
8214 	mutex_lock(&trace_types_lock);
8215 
8216 	/* Make sure the waiters see the new wait_index */
8217 	(void)atomic_fetch_inc_release(&iter->wait_index);
8218 
8219 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8220 
8221 	mutex_unlock(&trace_types_lock);
8222 	return 0;
8223 }
8224 
8225 #ifdef CONFIG_TRACER_MAX_TRACE
8226 static int get_snapshot_map(struct trace_array *tr)
8227 {
8228 	int err = 0;
8229 
8230 	/*
8231 	 * Called with mmap_lock held. lockdep would be unhappy if we would now
8232 	 * take trace_types_lock. Instead use the specific
8233 	 * snapshot_trigger_lock.
8234 	 */
8235 	spin_lock(&tr->snapshot_trigger_lock);
8236 
8237 	if (tr->snapshot || tr->mapped == UINT_MAX)
8238 		err = -EBUSY;
8239 	else
8240 		tr->mapped++;
8241 
8242 	spin_unlock(&tr->snapshot_trigger_lock);
8243 
8244 	/* Wait for update_max_tr() to observe iter->tr->mapped */
8245 	if (tr->mapped == 1)
8246 		synchronize_rcu();
8247 
8248 	return err;
8249 
8250 }
8251 static void put_snapshot_map(struct trace_array *tr)
8252 {
8253 	spin_lock(&tr->snapshot_trigger_lock);
8254 	if (!WARN_ON(!tr->mapped))
8255 		tr->mapped--;
8256 	spin_unlock(&tr->snapshot_trigger_lock);
8257 }
8258 #else
8259 static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
8260 static inline void put_snapshot_map(struct trace_array *tr) { }
8261 #endif
8262 
8263 static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
8264 {
8265 	struct ftrace_buffer_info *info = vma->vm_file->private_data;
8266 	struct trace_iterator *iter = &info->iter;
8267 
8268 	WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
8269 	put_snapshot_map(iter->tr);
8270 }
8271 
8272 static const struct vm_operations_struct tracing_buffers_vmops = {
8273 	.close		= tracing_buffers_mmap_close,
8274 };
8275 
8276 static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
8277 {
8278 	struct ftrace_buffer_info *info = filp->private_data;
8279 	struct trace_iterator *iter = &info->iter;
8280 	int ret = 0;
8281 
8282 	ret = get_snapshot_map(iter->tr);
8283 	if (ret)
8284 		return ret;
8285 
8286 	ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
8287 	if (ret)
8288 		put_snapshot_map(iter->tr);
8289 
8290 	vma->vm_ops = &tracing_buffers_vmops;
8291 
8292 	return ret;
8293 }
8294 
8295 static const struct file_operations tracing_buffers_fops = {
8296 	.open		= tracing_buffers_open,
8297 	.read		= tracing_buffers_read,
8298 	.poll		= tracing_buffers_poll,
8299 	.release	= tracing_buffers_release,
8300 	.flush		= tracing_buffers_flush,
8301 	.splice_read	= tracing_buffers_splice_read,
8302 	.unlocked_ioctl = tracing_buffers_ioctl,
8303 	.mmap		= tracing_buffers_mmap,
8304 };
8305 
8306 static ssize_t
8307 tracing_stats_read(struct file *filp, char __user *ubuf,
8308 		   size_t count, loff_t *ppos)
8309 {
8310 	struct inode *inode = file_inode(filp);
8311 	struct trace_array *tr = inode->i_private;
8312 	struct array_buffer *trace_buf = &tr->array_buffer;
8313 	int cpu = tracing_get_cpu(inode);
8314 	struct trace_seq *s;
8315 	unsigned long cnt;
8316 	unsigned long long t;
8317 	unsigned long usec_rem;
8318 
8319 	s = kmalloc(sizeof(*s), GFP_KERNEL);
8320 	if (!s)
8321 		return -ENOMEM;
8322 
8323 	trace_seq_init(s);
8324 
8325 	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8326 	trace_seq_printf(s, "entries: %ld\n", cnt);
8327 
8328 	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8329 	trace_seq_printf(s, "overrun: %ld\n", cnt);
8330 
8331 	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8332 	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8333 
8334 	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8335 	trace_seq_printf(s, "bytes: %ld\n", cnt);
8336 
8337 	if (trace_clocks[tr->clock_id].in_ns) {
8338 		/* local or global for trace_clock */
8339 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8340 		usec_rem = do_div(t, USEC_PER_SEC);
8341 		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8342 								t, usec_rem);
8343 
8344 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8345 		usec_rem = do_div(t, USEC_PER_SEC);
8346 		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8347 	} else {
8348 		/* counter or tsc mode for trace_clock */
8349 		trace_seq_printf(s, "oldest event ts: %llu\n",
8350 				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8351 
8352 		trace_seq_printf(s, "now ts: %llu\n",
8353 				ring_buffer_time_stamp(trace_buf->buffer));
8354 	}
8355 
8356 	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8357 	trace_seq_printf(s, "dropped events: %ld\n", cnt);
8358 
8359 	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8360 	trace_seq_printf(s, "read events: %ld\n", cnt);
8361 
8362 	count = simple_read_from_buffer(ubuf, count, ppos,
8363 					s->buffer, trace_seq_used(s));
8364 
8365 	kfree(s);
8366 
8367 	return count;
8368 }
8369 
8370 static const struct file_operations tracing_stats_fops = {
8371 	.open		= tracing_open_generic_tr,
8372 	.read		= tracing_stats_read,
8373 	.llseek		= generic_file_llseek,
8374 	.release	= tracing_release_generic_tr,
8375 };
8376 
8377 #ifdef CONFIG_DYNAMIC_FTRACE
8378 
8379 static ssize_t
8380 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8381 		  size_t cnt, loff_t *ppos)
8382 {
8383 	ssize_t ret;
8384 	char *buf;
8385 	int r;
8386 
8387 	/* 512 should be plenty to hold the amount needed */
8388 #define DYN_INFO_BUF_SIZE	512
8389 
8390 	buf = kmalloc(DYN_INFO_BUF_SIZE, GFP_KERNEL);
8391 	if (!buf)
8392 		return -ENOMEM;
8393 
8394 	r = scnprintf(buf, DYN_INFO_BUF_SIZE,
8395 		      "%ld pages:%ld groups: %ld\n"
8396 		      "ftrace boot update time = %llu (ns)\n"
8397 		      "ftrace module total update time = %llu (ns)\n",
8398 		      ftrace_update_tot_cnt,
8399 		      ftrace_number_of_pages,
8400 		      ftrace_number_of_groups,
8401 		      ftrace_update_time,
8402 		      ftrace_total_mod_time);
8403 
8404 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8405 	kfree(buf);
8406 	return ret;
8407 }
8408 
8409 static const struct file_operations tracing_dyn_info_fops = {
8410 	.open		= tracing_open_generic,
8411 	.read		= tracing_read_dyn_info,
8412 	.llseek		= generic_file_llseek,
8413 };
8414 #endif /* CONFIG_DYNAMIC_FTRACE */
8415 
8416 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8417 static void
8418 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8419 		struct trace_array *tr, struct ftrace_probe_ops *ops,
8420 		void *data)
8421 {
8422 	tracing_snapshot_instance(tr);
8423 }
8424 
8425 static void
8426 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8427 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
8428 		      void *data)
8429 {
8430 	struct ftrace_func_mapper *mapper = data;
8431 	long *count = NULL;
8432 
8433 	if (mapper)
8434 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8435 
8436 	if (count) {
8437 
8438 		if (*count <= 0)
8439 			return;
8440 
8441 		(*count)--;
8442 	}
8443 
8444 	tracing_snapshot_instance(tr);
8445 }
8446 
8447 static int
8448 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8449 		      struct ftrace_probe_ops *ops, void *data)
8450 {
8451 	struct ftrace_func_mapper *mapper = data;
8452 	long *count = NULL;
8453 
8454 	seq_printf(m, "%ps:", (void *)ip);
8455 
8456 	seq_puts(m, "snapshot");
8457 
8458 	if (mapper)
8459 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8460 
8461 	if (count)
8462 		seq_printf(m, ":count=%ld\n", *count);
8463 	else
8464 		seq_puts(m, ":unlimited\n");
8465 
8466 	return 0;
8467 }
8468 
8469 static int
8470 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8471 		     unsigned long ip, void *init_data, void **data)
8472 {
8473 	struct ftrace_func_mapper *mapper = *data;
8474 
8475 	if (!mapper) {
8476 		mapper = allocate_ftrace_func_mapper();
8477 		if (!mapper)
8478 			return -ENOMEM;
8479 		*data = mapper;
8480 	}
8481 
8482 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8483 }
8484 
8485 static void
8486 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8487 		     unsigned long ip, void *data)
8488 {
8489 	struct ftrace_func_mapper *mapper = data;
8490 
8491 	if (!ip) {
8492 		if (!mapper)
8493 			return;
8494 		free_ftrace_func_mapper(mapper, NULL);
8495 		return;
8496 	}
8497 
8498 	ftrace_func_mapper_remove_ip(mapper, ip);
8499 }
8500 
8501 static struct ftrace_probe_ops snapshot_probe_ops = {
8502 	.func			= ftrace_snapshot,
8503 	.print			= ftrace_snapshot_print,
8504 };
8505 
8506 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8507 	.func			= ftrace_count_snapshot,
8508 	.print			= ftrace_snapshot_print,
8509 	.init			= ftrace_snapshot_init,
8510 	.free			= ftrace_snapshot_free,
8511 };
8512 
8513 static int
8514 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8515 			       char *glob, char *cmd, char *param, int enable)
8516 {
8517 	struct ftrace_probe_ops *ops;
8518 	void *count = (void *)-1;
8519 	char *number;
8520 	int ret;
8521 
8522 	if (!tr)
8523 		return -ENODEV;
8524 
8525 	/* hash funcs only work with set_ftrace_filter */
8526 	if (!enable)
8527 		return -EINVAL;
8528 
8529 	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
8530 
8531 	if (glob[0] == '!') {
8532 		ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
8533 		if (!ret)
8534 			tracing_disarm_snapshot(tr);
8535 
8536 		return ret;
8537 	}
8538 
8539 	if (!param)
8540 		goto out_reg;
8541 
8542 	number = strsep(&param, ":");
8543 
8544 	if (!strlen(number))
8545 		goto out_reg;
8546 
8547 	/*
8548 	 * We use the callback data field (which is a pointer)
8549 	 * as our counter.
8550 	 */
8551 	ret = kstrtoul(number, 0, (unsigned long *)&count);
8552 	if (ret)
8553 		return ret;
8554 
8555  out_reg:
8556 	ret = tracing_arm_snapshot(tr);
8557 	if (ret < 0)
8558 		goto out;
8559 
8560 	ret = register_ftrace_function_probe(glob, tr, ops, count);
8561 	if (ret < 0)
8562 		tracing_disarm_snapshot(tr);
8563  out:
8564 	return ret < 0 ? ret : 0;
8565 }
8566 
8567 static struct ftrace_func_command ftrace_snapshot_cmd = {
8568 	.name			= "snapshot",
8569 	.func			= ftrace_trace_snapshot_callback,
8570 };
8571 
8572 static __init int register_snapshot_cmd(void)
8573 {
8574 	return register_ftrace_command(&ftrace_snapshot_cmd);
8575 }
8576 #else
8577 static inline __init int register_snapshot_cmd(void) { return 0; }
8578 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8579 
8580 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8581 {
8582 	if (WARN_ON(!tr->dir))
8583 		return ERR_PTR(-ENODEV);
8584 
8585 	/* Top directory uses NULL as the parent */
8586 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8587 		return NULL;
8588 
8589 	/* All sub buffers have a descriptor */
8590 	return tr->dir;
8591 }
8592 
8593 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8594 {
8595 	struct dentry *d_tracer;
8596 
8597 	if (tr->percpu_dir)
8598 		return tr->percpu_dir;
8599 
8600 	d_tracer = tracing_get_dentry(tr);
8601 	if (IS_ERR(d_tracer))
8602 		return NULL;
8603 
8604 	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8605 
8606 	MEM_FAIL(!tr->percpu_dir,
8607 		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8608 
8609 	return tr->percpu_dir;
8610 }
8611 
8612 static struct dentry *
8613 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8614 		      void *data, long cpu, const struct file_operations *fops)
8615 {
8616 	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8617 
8618 	if (ret) /* See tracing_get_cpu() */
8619 		d_inode(ret)->i_cdev = (void *)(cpu + 1);
8620 	return ret;
8621 }
8622 
8623 static void
8624 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8625 {
8626 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8627 	struct dentry *d_cpu;
8628 	char cpu_dir[30]; /* 30 characters should be more than enough */
8629 
8630 	if (!d_percpu)
8631 		return;
8632 
8633 	snprintf(cpu_dir, 30, "cpu%ld", cpu);
8634 	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8635 	if (!d_cpu) {
8636 		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8637 		return;
8638 	}
8639 
8640 	/* per cpu trace_pipe */
8641 	trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8642 				tr, cpu, &tracing_pipe_fops);
8643 
8644 	/* per cpu trace */
8645 	trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8646 				tr, cpu, &tracing_fops);
8647 
8648 	trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8649 				tr, cpu, &tracing_buffers_fops);
8650 
8651 	trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8652 				tr, cpu, &tracing_stats_fops);
8653 
8654 	trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8655 				tr, cpu, &tracing_entries_fops);
8656 
8657 	if (tr->range_addr_start)
8658 		trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu,
8659 				      tr, cpu, &tracing_buffer_meta_fops);
8660 #ifdef CONFIG_TRACER_SNAPSHOT
8661 	if (!tr->range_addr_start) {
8662 		trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8663 				      tr, cpu, &snapshot_fops);
8664 
8665 		trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8666 				      tr, cpu, &snapshot_raw_fops);
8667 	}
8668 #endif
8669 }
8670 
8671 #ifdef CONFIG_FTRACE_SELFTEST
8672 /* Let selftest have access to static functions in this file */
8673 #include "trace_selftest.c"
8674 #endif
8675 
8676 static ssize_t
8677 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8678 			loff_t *ppos)
8679 {
8680 	struct trace_option_dentry *topt = filp->private_data;
8681 	char *buf;
8682 
8683 	if (topt->flags->val & topt->opt->bit)
8684 		buf = "1\n";
8685 	else
8686 		buf = "0\n";
8687 
8688 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8689 }
8690 
8691 static ssize_t
8692 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8693 			 loff_t *ppos)
8694 {
8695 	struct trace_option_dentry *topt = filp->private_data;
8696 	unsigned long val;
8697 	int ret;
8698 
8699 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8700 	if (ret)
8701 		return ret;
8702 
8703 	if (val != 0 && val != 1)
8704 		return -EINVAL;
8705 
8706 	if (!!(topt->flags->val & topt->opt->bit) != val) {
8707 		mutex_lock(&trace_types_lock);
8708 		ret = __set_tracer_option(topt->tr, topt->flags,
8709 					  topt->opt, !val);
8710 		mutex_unlock(&trace_types_lock);
8711 		if (ret)
8712 			return ret;
8713 	}
8714 
8715 	*ppos += cnt;
8716 
8717 	return cnt;
8718 }
8719 
8720 static int tracing_open_options(struct inode *inode, struct file *filp)
8721 {
8722 	struct trace_option_dentry *topt = inode->i_private;
8723 	int ret;
8724 
8725 	ret = tracing_check_open_get_tr(topt->tr);
8726 	if (ret)
8727 		return ret;
8728 
8729 	filp->private_data = inode->i_private;
8730 	return 0;
8731 }
8732 
8733 static int tracing_release_options(struct inode *inode, struct file *file)
8734 {
8735 	struct trace_option_dentry *topt = file->private_data;
8736 
8737 	trace_array_put(topt->tr);
8738 	return 0;
8739 }
8740 
8741 static const struct file_operations trace_options_fops = {
8742 	.open = tracing_open_options,
8743 	.read = trace_options_read,
8744 	.write = trace_options_write,
8745 	.llseek	= generic_file_llseek,
8746 	.release = tracing_release_options,
8747 };
8748 
8749 /*
8750  * In order to pass in both the trace_array descriptor as well as the index
8751  * to the flag that the trace option file represents, the trace_array
8752  * has a character array of trace_flags_index[], which holds the index
8753  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8754  * The address of this character array is passed to the flag option file
8755  * read/write callbacks.
8756  *
8757  * In order to extract both the index and the trace_array descriptor,
8758  * get_tr_index() uses the following algorithm.
8759  *
8760  *   idx = *ptr;
8761  *
8762  * As the pointer itself contains the address of the index (remember
8763  * index[1] == 1).
8764  *
8765  * Then to get the trace_array descriptor, by subtracting that index
8766  * from the ptr, we get to the start of the index itself.
8767  *
8768  *   ptr - idx == &index[0]
8769  *
8770  * Then a simple container_of() from that pointer gets us to the
8771  * trace_array descriptor.
8772  */
8773 static void get_tr_index(void *data, struct trace_array **ptr,
8774 			 unsigned int *pindex)
8775 {
8776 	*pindex = *(unsigned char *)data;
8777 
8778 	*ptr = container_of(data - *pindex, struct trace_array,
8779 			    trace_flags_index);
8780 }
8781 
8782 static ssize_t
8783 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8784 			loff_t *ppos)
8785 {
8786 	void *tr_index = filp->private_data;
8787 	struct trace_array *tr;
8788 	unsigned int index;
8789 	char *buf;
8790 
8791 	get_tr_index(tr_index, &tr, &index);
8792 
8793 	if (tr->trace_flags & (1 << index))
8794 		buf = "1\n";
8795 	else
8796 		buf = "0\n";
8797 
8798 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8799 }
8800 
8801 static ssize_t
8802 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8803 			 loff_t *ppos)
8804 {
8805 	void *tr_index = filp->private_data;
8806 	struct trace_array *tr;
8807 	unsigned int index;
8808 	unsigned long val;
8809 	int ret;
8810 
8811 	get_tr_index(tr_index, &tr, &index);
8812 
8813 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8814 	if (ret)
8815 		return ret;
8816 
8817 	if (val != 0 && val != 1)
8818 		return -EINVAL;
8819 
8820 	mutex_lock(&event_mutex);
8821 	mutex_lock(&trace_types_lock);
8822 	ret = set_tracer_flag(tr, 1 << index, val);
8823 	mutex_unlock(&trace_types_lock);
8824 	mutex_unlock(&event_mutex);
8825 
8826 	if (ret < 0)
8827 		return ret;
8828 
8829 	*ppos += cnt;
8830 
8831 	return cnt;
8832 }
8833 
8834 static const struct file_operations trace_options_core_fops = {
8835 	.open = tracing_open_generic,
8836 	.read = trace_options_core_read,
8837 	.write = trace_options_core_write,
8838 	.llseek = generic_file_llseek,
8839 };
8840 
8841 struct dentry *trace_create_file(const char *name,
8842 				 umode_t mode,
8843 				 struct dentry *parent,
8844 				 void *data,
8845 				 const struct file_operations *fops)
8846 {
8847 	struct dentry *ret;
8848 
8849 	ret = tracefs_create_file(name, mode, parent, data, fops);
8850 	if (!ret)
8851 		pr_warn("Could not create tracefs '%s' entry\n", name);
8852 
8853 	return ret;
8854 }
8855 
8856 
8857 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8858 {
8859 	struct dentry *d_tracer;
8860 
8861 	if (tr->options)
8862 		return tr->options;
8863 
8864 	d_tracer = tracing_get_dentry(tr);
8865 	if (IS_ERR(d_tracer))
8866 		return NULL;
8867 
8868 	tr->options = tracefs_create_dir("options", d_tracer);
8869 	if (!tr->options) {
8870 		pr_warn("Could not create tracefs directory 'options'\n");
8871 		return NULL;
8872 	}
8873 
8874 	return tr->options;
8875 }
8876 
8877 static void
8878 create_trace_option_file(struct trace_array *tr,
8879 			 struct trace_option_dentry *topt,
8880 			 struct tracer_flags *flags,
8881 			 struct tracer_opt *opt)
8882 {
8883 	struct dentry *t_options;
8884 
8885 	t_options = trace_options_init_dentry(tr);
8886 	if (!t_options)
8887 		return;
8888 
8889 	topt->flags = flags;
8890 	topt->opt = opt;
8891 	topt->tr = tr;
8892 
8893 	topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8894 					t_options, topt, &trace_options_fops);
8895 
8896 }
8897 
8898 static void
8899 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8900 {
8901 	struct trace_option_dentry *topts;
8902 	struct trace_options *tr_topts;
8903 	struct tracer_flags *flags;
8904 	struct tracer_opt *opts;
8905 	int cnt;
8906 	int i;
8907 
8908 	if (!tracer)
8909 		return;
8910 
8911 	flags = tracer->flags;
8912 
8913 	if (!flags || !flags->opts)
8914 		return;
8915 
8916 	/*
8917 	 * If this is an instance, only create flags for tracers
8918 	 * the instance may have.
8919 	 */
8920 	if (!trace_ok_for_array(tracer, tr))
8921 		return;
8922 
8923 	for (i = 0; i < tr->nr_topts; i++) {
8924 		/* Make sure there's no duplicate flags. */
8925 		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8926 			return;
8927 	}
8928 
8929 	opts = flags->opts;
8930 
8931 	for (cnt = 0; opts[cnt].name; cnt++)
8932 		;
8933 
8934 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8935 	if (!topts)
8936 		return;
8937 
8938 	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8939 			    GFP_KERNEL);
8940 	if (!tr_topts) {
8941 		kfree(topts);
8942 		return;
8943 	}
8944 
8945 	tr->topts = tr_topts;
8946 	tr->topts[tr->nr_topts].tracer = tracer;
8947 	tr->topts[tr->nr_topts].topts = topts;
8948 	tr->nr_topts++;
8949 
8950 	for (cnt = 0; opts[cnt].name; cnt++) {
8951 		create_trace_option_file(tr, &topts[cnt], flags,
8952 					 &opts[cnt]);
8953 		MEM_FAIL(topts[cnt].entry == NULL,
8954 			  "Failed to create trace option: %s",
8955 			  opts[cnt].name);
8956 	}
8957 }
8958 
8959 static struct dentry *
8960 create_trace_option_core_file(struct trace_array *tr,
8961 			      const char *option, long index)
8962 {
8963 	struct dentry *t_options;
8964 
8965 	t_options = trace_options_init_dentry(tr);
8966 	if (!t_options)
8967 		return NULL;
8968 
8969 	return trace_create_file(option, TRACE_MODE_WRITE, t_options,
8970 				 (void *)&tr->trace_flags_index[index],
8971 				 &trace_options_core_fops);
8972 }
8973 
8974 static void create_trace_options_dir(struct trace_array *tr)
8975 {
8976 	struct dentry *t_options;
8977 	bool top_level = tr == &global_trace;
8978 	int i;
8979 
8980 	t_options = trace_options_init_dentry(tr);
8981 	if (!t_options)
8982 		return;
8983 
8984 	for (i = 0; trace_options[i]; i++) {
8985 		if (top_level ||
8986 		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8987 			create_trace_option_core_file(tr, trace_options[i], i);
8988 	}
8989 }
8990 
8991 static ssize_t
8992 rb_simple_read(struct file *filp, char __user *ubuf,
8993 	       size_t cnt, loff_t *ppos)
8994 {
8995 	struct trace_array *tr = filp->private_data;
8996 	char buf[64];
8997 	int r;
8998 
8999 	r = tracer_tracing_is_on(tr);
9000 	r = sprintf(buf, "%d\n", r);
9001 
9002 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9003 }
9004 
9005 static ssize_t
9006 rb_simple_write(struct file *filp, const char __user *ubuf,
9007 		size_t cnt, loff_t *ppos)
9008 {
9009 	struct trace_array *tr = filp->private_data;
9010 	struct trace_buffer *buffer = tr->array_buffer.buffer;
9011 	unsigned long val;
9012 	int ret;
9013 
9014 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9015 	if (ret)
9016 		return ret;
9017 
9018 	if (buffer) {
9019 		mutex_lock(&trace_types_lock);
9020 		if (!!val == tracer_tracing_is_on(tr)) {
9021 			val = 0; /* do nothing */
9022 		} else if (val) {
9023 			tracer_tracing_on(tr);
9024 			if (tr->current_trace->start)
9025 				tr->current_trace->start(tr);
9026 		} else {
9027 			tracer_tracing_off(tr);
9028 			if (tr->current_trace->stop)
9029 				tr->current_trace->stop(tr);
9030 			/* Wake up any waiters */
9031 			ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9032 		}
9033 		mutex_unlock(&trace_types_lock);
9034 	}
9035 
9036 	(*ppos)++;
9037 
9038 	return cnt;
9039 }
9040 
9041 static const struct file_operations rb_simple_fops = {
9042 	.open		= tracing_open_generic_tr,
9043 	.read		= rb_simple_read,
9044 	.write		= rb_simple_write,
9045 	.release	= tracing_release_generic_tr,
9046 	.llseek		= default_llseek,
9047 };
9048 
9049 static ssize_t
9050 buffer_percent_read(struct file *filp, char __user *ubuf,
9051 		    size_t cnt, loff_t *ppos)
9052 {
9053 	struct trace_array *tr = filp->private_data;
9054 	char buf[64];
9055 	int r;
9056 
9057 	r = tr->buffer_percent;
9058 	r = sprintf(buf, "%d\n", r);
9059 
9060 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9061 }
9062 
9063 static ssize_t
9064 buffer_percent_write(struct file *filp, const char __user *ubuf,
9065 		     size_t cnt, loff_t *ppos)
9066 {
9067 	struct trace_array *tr = filp->private_data;
9068 	unsigned long val;
9069 	int ret;
9070 
9071 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9072 	if (ret)
9073 		return ret;
9074 
9075 	if (val > 100)
9076 		return -EINVAL;
9077 
9078 	tr->buffer_percent = val;
9079 
9080 	(*ppos)++;
9081 
9082 	return cnt;
9083 }
9084 
9085 static const struct file_operations buffer_percent_fops = {
9086 	.open		= tracing_open_generic_tr,
9087 	.read		= buffer_percent_read,
9088 	.write		= buffer_percent_write,
9089 	.release	= tracing_release_generic_tr,
9090 	.llseek		= default_llseek,
9091 };
9092 
9093 static ssize_t
9094 buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
9095 {
9096 	struct trace_array *tr = filp->private_data;
9097 	size_t size;
9098 	char buf[64];
9099 	int order;
9100 	int r;
9101 
9102 	order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9103 	size = (PAGE_SIZE << order) / 1024;
9104 
9105 	r = sprintf(buf, "%zd\n", size);
9106 
9107 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9108 }
9109 
9110 static ssize_t
9111 buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
9112 			 size_t cnt, loff_t *ppos)
9113 {
9114 	struct trace_array *tr = filp->private_data;
9115 	unsigned long val;
9116 	int old_order;
9117 	int order;
9118 	int pages;
9119 	int ret;
9120 
9121 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9122 	if (ret)
9123 		return ret;
9124 
9125 	val *= 1024; /* value passed in is in KB */
9126 
9127 	pages = DIV_ROUND_UP(val, PAGE_SIZE);
9128 	order = fls(pages - 1);
9129 
9130 	/* limit between 1 and 128 system pages */
9131 	if (order < 0 || order > 7)
9132 		return -EINVAL;
9133 
9134 	/* Do not allow tracing while changing the order of the ring buffer */
9135 	tracing_stop_tr(tr);
9136 
9137 	old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9138 	if (old_order == order)
9139 		goto out;
9140 
9141 	ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
9142 	if (ret)
9143 		goto out;
9144 
9145 #ifdef CONFIG_TRACER_MAX_TRACE
9146 
9147 	if (!tr->allocated_snapshot)
9148 		goto out_max;
9149 
9150 	ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
9151 	if (ret) {
9152 		/* Put back the old order */
9153 		cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
9154 		if (WARN_ON_ONCE(cnt)) {
9155 			/*
9156 			 * AARGH! We are left with different orders!
9157 			 * The max buffer is our "snapshot" buffer.
9158 			 * When a tracer needs a snapshot (one of the
9159 			 * latency tracers), it swaps the max buffer
9160 			 * with the saved snap shot. We succeeded to
9161 			 * update the order of the main buffer, but failed to
9162 			 * update the order of the max buffer. But when we tried
9163 			 * to reset the main buffer to the original size, we
9164 			 * failed there too. This is very unlikely to
9165 			 * happen, but if it does, warn and kill all
9166 			 * tracing.
9167 			 */
9168 			tracing_disabled = 1;
9169 		}
9170 		goto out;
9171 	}
9172  out_max:
9173 #endif
9174 	(*ppos)++;
9175  out:
9176 	if (ret)
9177 		cnt = ret;
9178 	tracing_start_tr(tr);
9179 	return cnt;
9180 }
9181 
9182 static const struct file_operations buffer_subbuf_size_fops = {
9183 	.open		= tracing_open_generic_tr,
9184 	.read		= buffer_subbuf_size_read,
9185 	.write		= buffer_subbuf_size_write,
9186 	.release	= tracing_release_generic_tr,
9187 	.llseek		= default_llseek,
9188 };
9189 
9190 static struct dentry *trace_instance_dir;
9191 
9192 static void
9193 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9194 
9195 static int
9196 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9197 {
9198 	enum ring_buffer_flags rb_flags;
9199 
9200 	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9201 
9202 	buf->tr = tr;
9203 
9204 	if (tr->range_addr_start && tr->range_addr_size) {
9205 		buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
9206 						      tr->range_addr_start,
9207 						      tr->range_addr_size);
9208 
9209 		ring_buffer_last_boot_delta(buf->buffer,
9210 					    &tr->text_delta, &tr->data_delta);
9211 		/*
9212 		 * This is basically the same as a mapped buffer,
9213 		 * with the same restrictions.
9214 		 */
9215 		tr->mapped++;
9216 	} else {
9217 		buf->buffer = ring_buffer_alloc(size, rb_flags);
9218 	}
9219 	if (!buf->buffer)
9220 		return -ENOMEM;
9221 
9222 	buf->data = alloc_percpu(struct trace_array_cpu);
9223 	if (!buf->data) {
9224 		ring_buffer_free(buf->buffer);
9225 		buf->buffer = NULL;
9226 		return -ENOMEM;
9227 	}
9228 
9229 	/* Allocate the first page for all buffers */
9230 	set_buffer_entries(&tr->array_buffer,
9231 			   ring_buffer_size(tr->array_buffer.buffer, 0));
9232 
9233 	return 0;
9234 }
9235 
9236 static void free_trace_buffer(struct array_buffer *buf)
9237 {
9238 	if (buf->buffer) {
9239 		ring_buffer_free(buf->buffer);
9240 		buf->buffer = NULL;
9241 		free_percpu(buf->data);
9242 		buf->data = NULL;
9243 	}
9244 }
9245 
9246 static int allocate_trace_buffers(struct trace_array *tr, int size)
9247 {
9248 	int ret;
9249 
9250 	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9251 	if (ret)
9252 		return ret;
9253 
9254 #ifdef CONFIG_TRACER_MAX_TRACE
9255 	/* Fix mapped buffer trace arrays do not have snapshot buffers */
9256 	if (tr->range_addr_start)
9257 		return 0;
9258 
9259 	ret = allocate_trace_buffer(tr, &tr->max_buffer,
9260 				    allocate_snapshot ? size : 1);
9261 	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9262 		free_trace_buffer(&tr->array_buffer);
9263 		return -ENOMEM;
9264 	}
9265 	tr->allocated_snapshot = allocate_snapshot;
9266 
9267 	allocate_snapshot = false;
9268 #endif
9269 
9270 	return 0;
9271 }
9272 
9273 static void free_trace_buffers(struct trace_array *tr)
9274 {
9275 	if (!tr)
9276 		return;
9277 
9278 	free_trace_buffer(&tr->array_buffer);
9279 
9280 #ifdef CONFIG_TRACER_MAX_TRACE
9281 	free_trace_buffer(&tr->max_buffer);
9282 #endif
9283 }
9284 
9285 static void init_trace_flags_index(struct trace_array *tr)
9286 {
9287 	int i;
9288 
9289 	/* Used by the trace options files */
9290 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9291 		tr->trace_flags_index[i] = i;
9292 }
9293 
9294 static void __update_tracer_options(struct trace_array *tr)
9295 {
9296 	struct tracer *t;
9297 
9298 	for (t = trace_types; t; t = t->next)
9299 		add_tracer_options(tr, t);
9300 }
9301 
9302 static void update_tracer_options(struct trace_array *tr)
9303 {
9304 	mutex_lock(&trace_types_lock);
9305 	tracer_options_updated = true;
9306 	__update_tracer_options(tr);
9307 	mutex_unlock(&trace_types_lock);
9308 }
9309 
9310 /* Must have trace_types_lock held */
9311 struct trace_array *trace_array_find(const char *instance)
9312 {
9313 	struct trace_array *tr, *found = NULL;
9314 
9315 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9316 		if (tr->name && strcmp(tr->name, instance) == 0) {
9317 			found = tr;
9318 			break;
9319 		}
9320 	}
9321 
9322 	return found;
9323 }
9324 
9325 struct trace_array *trace_array_find_get(const char *instance)
9326 {
9327 	struct trace_array *tr;
9328 
9329 	mutex_lock(&trace_types_lock);
9330 	tr = trace_array_find(instance);
9331 	if (tr)
9332 		tr->ref++;
9333 	mutex_unlock(&trace_types_lock);
9334 
9335 	return tr;
9336 }
9337 
9338 static int trace_array_create_dir(struct trace_array *tr)
9339 {
9340 	int ret;
9341 
9342 	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9343 	if (!tr->dir)
9344 		return -EINVAL;
9345 
9346 	ret = event_trace_add_tracer(tr->dir, tr);
9347 	if (ret) {
9348 		tracefs_remove(tr->dir);
9349 		return ret;
9350 	}
9351 
9352 	init_tracer_tracefs(tr, tr->dir);
9353 	__update_tracer_options(tr);
9354 
9355 	return ret;
9356 }
9357 
9358 static struct trace_array *
9359 trace_array_create_systems(const char *name, const char *systems,
9360 			   unsigned long range_addr_start,
9361 			   unsigned long range_addr_size)
9362 {
9363 	struct trace_array *tr;
9364 	int ret;
9365 
9366 	ret = -ENOMEM;
9367 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9368 	if (!tr)
9369 		return ERR_PTR(ret);
9370 
9371 	tr->name = kstrdup(name, GFP_KERNEL);
9372 	if (!tr->name)
9373 		goto out_free_tr;
9374 
9375 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9376 		goto out_free_tr;
9377 
9378 	if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9379 		goto out_free_tr;
9380 
9381 	if (systems) {
9382 		tr->system_names = kstrdup_const(systems, GFP_KERNEL);
9383 		if (!tr->system_names)
9384 			goto out_free_tr;
9385 	}
9386 
9387 	/* Only for boot up memory mapped ring buffers */
9388 	tr->range_addr_start = range_addr_start;
9389 	tr->range_addr_size = range_addr_size;
9390 
9391 	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9392 
9393 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9394 
9395 	raw_spin_lock_init(&tr->start_lock);
9396 
9397 	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9398 #ifdef CONFIG_TRACER_MAX_TRACE
9399 	spin_lock_init(&tr->snapshot_trigger_lock);
9400 #endif
9401 	tr->current_trace = &nop_trace;
9402 
9403 	INIT_LIST_HEAD(&tr->systems);
9404 	INIT_LIST_HEAD(&tr->events);
9405 	INIT_LIST_HEAD(&tr->hist_vars);
9406 	INIT_LIST_HEAD(&tr->err_log);
9407 
9408 	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9409 		goto out_free_tr;
9410 
9411 	/* The ring buffer is defaultly expanded */
9412 	trace_set_ring_buffer_expanded(tr);
9413 
9414 	if (ftrace_allocate_ftrace_ops(tr) < 0)
9415 		goto out_free_tr;
9416 
9417 	ftrace_init_trace_array(tr);
9418 
9419 	init_trace_flags_index(tr);
9420 
9421 	if (trace_instance_dir) {
9422 		ret = trace_array_create_dir(tr);
9423 		if (ret)
9424 			goto out_free_tr;
9425 	} else
9426 		__trace_early_add_events(tr);
9427 
9428 	list_add(&tr->list, &ftrace_trace_arrays);
9429 
9430 	tr->ref++;
9431 
9432 	return tr;
9433 
9434  out_free_tr:
9435 	ftrace_free_ftrace_ops(tr);
9436 	free_trace_buffers(tr);
9437 	free_cpumask_var(tr->pipe_cpumask);
9438 	free_cpumask_var(tr->tracing_cpumask);
9439 	kfree_const(tr->system_names);
9440 	kfree(tr->name);
9441 	kfree(tr);
9442 
9443 	return ERR_PTR(ret);
9444 }
9445 
9446 static struct trace_array *trace_array_create(const char *name)
9447 {
9448 	return trace_array_create_systems(name, NULL, 0, 0);
9449 }
9450 
9451 static int instance_mkdir(const char *name)
9452 {
9453 	struct trace_array *tr;
9454 	int ret;
9455 
9456 	guard(mutex)(&event_mutex);
9457 	guard(mutex)(&trace_types_lock);
9458 
9459 	ret = -EEXIST;
9460 	if (trace_array_find(name))
9461 		return -EEXIST;
9462 
9463 	tr = trace_array_create(name);
9464 
9465 	ret = PTR_ERR_OR_ZERO(tr);
9466 
9467 	return ret;
9468 }
9469 
9470 static u64 map_pages(u64 start, u64 size)
9471 {
9472 	struct page **pages;
9473 	phys_addr_t page_start;
9474 	unsigned int page_count;
9475 	unsigned int i;
9476 	void *vaddr;
9477 
9478 	page_count = DIV_ROUND_UP(size, PAGE_SIZE);
9479 
9480 	page_start = start;
9481 	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
9482 	if (!pages)
9483 		return 0;
9484 
9485 	for (i = 0; i < page_count; i++) {
9486 		phys_addr_t addr = page_start + i * PAGE_SIZE;
9487 		pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
9488 	}
9489 	vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
9490 	kfree(pages);
9491 
9492 	return (u64)(unsigned long)vaddr;
9493 }
9494 
9495 /**
9496  * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9497  * @name: The name of the trace array to be looked up/created.
9498  * @systems: A list of systems to create event directories for (NULL for all)
9499  *
9500  * Returns pointer to trace array with given name.
9501  * NULL, if it cannot be created.
9502  *
9503  * NOTE: This function increments the reference counter associated with the
9504  * trace array returned. This makes sure it cannot be freed while in use.
9505  * Use trace_array_put() once the trace array is no longer needed.
9506  * If the trace_array is to be freed, trace_array_destroy() needs to
9507  * be called after the trace_array_put(), or simply let user space delete
9508  * it from the tracefs instances directory. But until the
9509  * trace_array_put() is called, user space can not delete it.
9510  *
9511  */
9512 struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
9513 {
9514 	struct trace_array *tr;
9515 
9516 	guard(mutex)(&event_mutex);
9517 	guard(mutex)(&trace_types_lock);
9518 
9519 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9520 		if (tr->name && strcmp(tr->name, name) == 0) {
9521 			tr->ref++;
9522 			return tr;
9523 		}
9524 	}
9525 
9526 	tr = trace_array_create_systems(name, systems, 0, 0);
9527 
9528 	if (IS_ERR(tr))
9529 		tr = NULL;
9530 	else
9531 		tr->ref++;
9532 
9533 	return tr;
9534 }
9535 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9536 
9537 static int __remove_instance(struct trace_array *tr)
9538 {
9539 	int i;
9540 
9541 	/* Reference counter for a newly created trace array = 1. */
9542 	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9543 		return -EBUSY;
9544 
9545 	list_del(&tr->list);
9546 
9547 	/* Disable all the flags that were enabled coming in */
9548 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9549 		if ((1 << i) & ZEROED_TRACE_FLAGS)
9550 			set_tracer_flag(tr, 1 << i, 0);
9551 	}
9552 
9553 	if (printk_trace == tr)
9554 		update_printk_trace(&global_trace);
9555 
9556 	tracing_set_nop(tr);
9557 	clear_ftrace_function_probes(tr);
9558 	event_trace_del_tracer(tr);
9559 	ftrace_clear_pids(tr);
9560 	ftrace_destroy_function_files(tr);
9561 	tracefs_remove(tr->dir);
9562 	free_percpu(tr->last_func_repeats);
9563 	free_trace_buffers(tr);
9564 	clear_tracing_err_log(tr);
9565 
9566 	for (i = 0; i < tr->nr_topts; i++) {
9567 		kfree(tr->topts[i].topts);
9568 	}
9569 	kfree(tr->topts);
9570 
9571 	free_cpumask_var(tr->pipe_cpumask);
9572 	free_cpumask_var(tr->tracing_cpumask);
9573 	kfree_const(tr->system_names);
9574 	kfree(tr->name);
9575 	kfree(tr);
9576 
9577 	return 0;
9578 }
9579 
9580 int trace_array_destroy(struct trace_array *this_tr)
9581 {
9582 	struct trace_array *tr;
9583 
9584 	if (!this_tr)
9585 		return -EINVAL;
9586 
9587 	guard(mutex)(&event_mutex);
9588 	guard(mutex)(&trace_types_lock);
9589 
9590 
9591 	/* Making sure trace array exists before destroying it. */
9592 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9593 		if (tr == this_tr)
9594 			return __remove_instance(tr);
9595 	}
9596 
9597 	return -ENODEV;
9598 }
9599 EXPORT_SYMBOL_GPL(trace_array_destroy);
9600 
9601 static int instance_rmdir(const char *name)
9602 {
9603 	struct trace_array *tr;
9604 
9605 	guard(mutex)(&event_mutex);
9606 	guard(mutex)(&trace_types_lock);
9607 
9608 	tr = trace_array_find(name);
9609 	if (!tr)
9610 		return -ENODEV;
9611 
9612 	return __remove_instance(tr);
9613 }
9614 
9615 static __init void create_trace_instances(struct dentry *d_tracer)
9616 {
9617 	struct trace_array *tr;
9618 
9619 	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9620 							 instance_mkdir,
9621 							 instance_rmdir);
9622 	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9623 		return;
9624 
9625 	guard(mutex)(&event_mutex);
9626 	guard(mutex)(&trace_types_lock);
9627 
9628 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9629 		if (!tr->name)
9630 			continue;
9631 		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9632 			     "Failed to create instance directory\n"))
9633 			return;
9634 	}
9635 }
9636 
9637 static void
9638 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9639 {
9640 	int cpu;
9641 
9642 	trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9643 			tr, &show_traces_fops);
9644 
9645 	trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9646 			tr, &set_tracer_fops);
9647 
9648 	trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9649 			  tr, &tracing_cpumask_fops);
9650 
9651 	trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9652 			  tr, &tracing_iter_fops);
9653 
9654 	trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9655 			  tr, &tracing_fops);
9656 
9657 	trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9658 			  tr, &tracing_pipe_fops);
9659 
9660 	trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9661 			  tr, &tracing_entries_fops);
9662 
9663 	trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9664 			  tr, &tracing_total_entries_fops);
9665 
9666 	trace_create_file("free_buffer", 0200, d_tracer,
9667 			  tr, &tracing_free_buffer_fops);
9668 
9669 	trace_create_file("trace_marker", 0220, d_tracer,
9670 			  tr, &tracing_mark_fops);
9671 
9672 	tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9673 
9674 	trace_create_file("trace_marker_raw", 0220, d_tracer,
9675 			  tr, &tracing_mark_raw_fops);
9676 
9677 	trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9678 			  &trace_clock_fops);
9679 
9680 	trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9681 			  tr, &rb_simple_fops);
9682 
9683 	trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9684 			  &trace_time_stamp_mode_fops);
9685 
9686 	tr->buffer_percent = 50;
9687 
9688 	trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9689 			tr, &buffer_percent_fops);
9690 
9691 	trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
9692 			  tr, &buffer_subbuf_size_fops);
9693 
9694 	create_trace_options_dir(tr);
9695 
9696 #ifdef CONFIG_TRACER_MAX_TRACE
9697 	trace_create_maxlat_file(tr, d_tracer);
9698 #endif
9699 
9700 	if (ftrace_create_function_files(tr, d_tracer))
9701 		MEM_FAIL(1, "Could not allocate function filter files");
9702 
9703 	if (tr->range_addr_start) {
9704 		trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer,
9705 				  tr, &last_boot_fops);
9706 #ifdef CONFIG_TRACER_SNAPSHOT
9707 	} else {
9708 		trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9709 				  tr, &snapshot_fops);
9710 #endif
9711 	}
9712 
9713 	trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9714 			  tr, &tracing_err_log_fops);
9715 
9716 	for_each_tracing_cpu(cpu)
9717 		tracing_init_tracefs_percpu(tr, cpu);
9718 
9719 	ftrace_init_tracefs(tr, d_tracer);
9720 }
9721 
9722 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9723 {
9724 	struct vfsmount *mnt;
9725 	struct file_system_type *type;
9726 
9727 	/*
9728 	 * To maintain backward compatibility for tools that mount
9729 	 * debugfs to get to the tracing facility, tracefs is automatically
9730 	 * mounted to the debugfs/tracing directory.
9731 	 */
9732 	type = get_fs_type("tracefs");
9733 	if (!type)
9734 		return NULL;
9735 	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9736 	put_filesystem(type);
9737 	if (IS_ERR(mnt))
9738 		return NULL;
9739 	mntget(mnt);
9740 
9741 	return mnt;
9742 }
9743 
9744 /**
9745  * tracing_init_dentry - initialize top level trace array
9746  *
9747  * This is called when creating files or directories in the tracing
9748  * directory. It is called via fs_initcall() by any of the boot up code
9749  * and expects to return the dentry of the top level tracing directory.
9750  */
9751 int tracing_init_dentry(void)
9752 {
9753 	struct trace_array *tr = &global_trace;
9754 
9755 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
9756 		pr_warn("Tracing disabled due to lockdown\n");
9757 		return -EPERM;
9758 	}
9759 
9760 	/* The top level trace array uses  NULL as parent */
9761 	if (tr->dir)
9762 		return 0;
9763 
9764 	if (WARN_ON(!tracefs_initialized()))
9765 		return -ENODEV;
9766 
9767 	/*
9768 	 * As there may still be users that expect the tracing
9769 	 * files to exist in debugfs/tracing, we must automount
9770 	 * the tracefs file system there, so older tools still
9771 	 * work with the newer kernel.
9772 	 */
9773 	tr->dir = debugfs_create_automount("tracing", NULL,
9774 					   trace_automount, NULL);
9775 
9776 	return 0;
9777 }
9778 
9779 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9780 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9781 
9782 static struct workqueue_struct *eval_map_wq __initdata;
9783 static struct work_struct eval_map_work __initdata;
9784 static struct work_struct tracerfs_init_work __initdata;
9785 
9786 static void __init eval_map_work_func(struct work_struct *work)
9787 {
9788 	int len;
9789 
9790 	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9791 	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9792 }
9793 
9794 static int __init trace_eval_init(void)
9795 {
9796 	INIT_WORK(&eval_map_work, eval_map_work_func);
9797 
9798 	eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9799 	if (!eval_map_wq) {
9800 		pr_err("Unable to allocate eval_map_wq\n");
9801 		/* Do work here */
9802 		eval_map_work_func(&eval_map_work);
9803 		return -ENOMEM;
9804 	}
9805 
9806 	queue_work(eval_map_wq, &eval_map_work);
9807 	return 0;
9808 }
9809 
9810 subsys_initcall(trace_eval_init);
9811 
9812 static int __init trace_eval_sync(void)
9813 {
9814 	/* Make sure the eval map updates are finished */
9815 	if (eval_map_wq)
9816 		destroy_workqueue(eval_map_wq);
9817 	return 0;
9818 }
9819 
9820 late_initcall_sync(trace_eval_sync);
9821 
9822 
9823 #ifdef CONFIG_MODULES
9824 static void trace_module_add_evals(struct module *mod)
9825 {
9826 	if (!mod->num_trace_evals)
9827 		return;
9828 
9829 	/*
9830 	 * Modules with bad taint do not have events created, do
9831 	 * not bother with enums either.
9832 	 */
9833 	if (trace_module_has_bad_taint(mod))
9834 		return;
9835 
9836 	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9837 }
9838 
9839 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9840 static void trace_module_remove_evals(struct module *mod)
9841 {
9842 	union trace_eval_map_item *map;
9843 	union trace_eval_map_item **last = &trace_eval_maps;
9844 
9845 	if (!mod->num_trace_evals)
9846 		return;
9847 
9848 	guard(mutex)(&trace_eval_mutex);
9849 
9850 	map = trace_eval_maps;
9851 
9852 	while (map) {
9853 		if (map->head.mod == mod)
9854 			break;
9855 		map = trace_eval_jmp_to_tail(map);
9856 		last = &map->tail.next;
9857 		map = map->tail.next;
9858 	}
9859 	if (!map)
9860 		return;
9861 
9862 	*last = trace_eval_jmp_to_tail(map)->tail.next;
9863 	kfree(map);
9864 }
9865 #else
9866 static inline void trace_module_remove_evals(struct module *mod) { }
9867 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9868 
9869 static int trace_module_notify(struct notifier_block *self,
9870 			       unsigned long val, void *data)
9871 {
9872 	struct module *mod = data;
9873 
9874 	switch (val) {
9875 	case MODULE_STATE_COMING:
9876 		trace_module_add_evals(mod);
9877 		break;
9878 	case MODULE_STATE_GOING:
9879 		trace_module_remove_evals(mod);
9880 		break;
9881 	}
9882 
9883 	return NOTIFY_OK;
9884 }
9885 
9886 static struct notifier_block trace_module_nb = {
9887 	.notifier_call = trace_module_notify,
9888 	.priority = 0,
9889 };
9890 #endif /* CONFIG_MODULES */
9891 
9892 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9893 {
9894 
9895 	event_trace_init();
9896 
9897 	init_tracer_tracefs(&global_trace, NULL);
9898 	ftrace_init_tracefs_toplevel(&global_trace, NULL);
9899 
9900 	trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9901 			&global_trace, &tracing_thresh_fops);
9902 
9903 	trace_create_file("README", TRACE_MODE_READ, NULL,
9904 			NULL, &tracing_readme_fops);
9905 
9906 	trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9907 			NULL, &tracing_saved_cmdlines_fops);
9908 
9909 	trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9910 			  NULL, &tracing_saved_cmdlines_size_fops);
9911 
9912 	trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9913 			NULL, &tracing_saved_tgids_fops);
9914 
9915 	trace_create_eval_file(NULL);
9916 
9917 #ifdef CONFIG_MODULES
9918 	register_module_notifier(&trace_module_nb);
9919 #endif
9920 
9921 #ifdef CONFIG_DYNAMIC_FTRACE
9922 	trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9923 			NULL, &tracing_dyn_info_fops);
9924 #endif
9925 
9926 	create_trace_instances(NULL);
9927 
9928 	update_tracer_options(&global_trace);
9929 }
9930 
9931 static __init int tracer_init_tracefs(void)
9932 {
9933 	int ret;
9934 
9935 	trace_access_lock_init();
9936 
9937 	ret = tracing_init_dentry();
9938 	if (ret)
9939 		return 0;
9940 
9941 	if (eval_map_wq) {
9942 		INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
9943 		queue_work(eval_map_wq, &tracerfs_init_work);
9944 	} else {
9945 		tracer_init_tracefs_work_func(NULL);
9946 	}
9947 
9948 	rv_init_interface();
9949 
9950 	return 0;
9951 }
9952 
9953 fs_initcall(tracer_init_tracefs);
9954 
9955 static int trace_die_panic_handler(struct notifier_block *self,
9956 				unsigned long ev, void *unused);
9957 
9958 static struct notifier_block trace_panic_notifier = {
9959 	.notifier_call = trace_die_panic_handler,
9960 	.priority = INT_MAX - 1,
9961 };
9962 
9963 static struct notifier_block trace_die_notifier = {
9964 	.notifier_call = trace_die_panic_handler,
9965 	.priority = INT_MAX - 1,
9966 };
9967 
9968 /*
9969  * The idea is to execute the following die/panic callback early, in order
9970  * to avoid showing irrelevant information in the trace (like other panic
9971  * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
9972  * warnings get disabled (to prevent potential log flooding).
9973  */
9974 static int trace_die_panic_handler(struct notifier_block *self,
9975 				unsigned long ev, void *unused)
9976 {
9977 	if (!ftrace_dump_on_oops_enabled())
9978 		return NOTIFY_DONE;
9979 
9980 	/* The die notifier requires DIE_OOPS to trigger */
9981 	if (self == &trace_die_notifier && ev != DIE_OOPS)
9982 		return NOTIFY_DONE;
9983 
9984 	ftrace_dump(DUMP_PARAM);
9985 
9986 	return NOTIFY_DONE;
9987 }
9988 
9989 /*
9990  * printk is set to max of 1024, we really don't need it that big.
9991  * Nothing should be printing 1000 characters anyway.
9992  */
9993 #define TRACE_MAX_PRINT		1000
9994 
9995 /*
9996  * Define here KERN_TRACE so that we have one place to modify
9997  * it if we decide to change what log level the ftrace dump
9998  * should be at.
9999  */
10000 #define KERN_TRACE		KERN_EMERG
10001 
10002 void
10003 trace_printk_seq(struct trace_seq *s)
10004 {
10005 	/* Probably should print a warning here. */
10006 	if (s->seq.len >= TRACE_MAX_PRINT)
10007 		s->seq.len = TRACE_MAX_PRINT;
10008 
10009 	/*
10010 	 * More paranoid code. Although the buffer size is set to
10011 	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10012 	 * an extra layer of protection.
10013 	 */
10014 	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10015 		s->seq.len = s->seq.size - 1;
10016 
10017 	/* should be zero ended, but we are paranoid. */
10018 	s->buffer[s->seq.len] = 0;
10019 
10020 	printk(KERN_TRACE "%s", s->buffer);
10021 
10022 	trace_seq_init(s);
10023 }
10024 
10025 static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr)
10026 {
10027 	iter->tr = tr;
10028 	iter->trace = iter->tr->current_trace;
10029 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
10030 	iter->array_buffer = &tr->array_buffer;
10031 
10032 	if (iter->trace && iter->trace->open)
10033 		iter->trace->open(iter);
10034 
10035 	/* Annotate start of buffers if we had overruns */
10036 	if (ring_buffer_overruns(iter->array_buffer->buffer))
10037 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
10038 
10039 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
10040 	if (trace_clocks[iter->tr->clock_id].in_ns)
10041 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10042 
10043 	/* Can not use kmalloc for iter.temp and iter.fmt */
10044 	iter->temp = static_temp_buf;
10045 	iter->temp_size = STATIC_TEMP_BUF_SIZE;
10046 	iter->fmt = static_fmt_buf;
10047 	iter->fmt_size = STATIC_FMT_BUF_SIZE;
10048 }
10049 
10050 void trace_init_global_iter(struct trace_iterator *iter)
10051 {
10052 	trace_init_iter(iter, &global_trace);
10053 }
10054 
10055 static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode)
10056 {
10057 	/* use static because iter can be a bit big for the stack */
10058 	static struct trace_iterator iter;
10059 	unsigned int old_userobj;
10060 	unsigned long flags;
10061 	int cnt = 0, cpu;
10062 
10063 	/*
10064 	 * Always turn off tracing when we dump.
10065 	 * We don't need to show trace output of what happens
10066 	 * between multiple crashes.
10067 	 *
10068 	 * If the user does a sysrq-z, then they can re-enable
10069 	 * tracing with echo 1 > tracing_on.
10070 	 */
10071 	tracer_tracing_off(tr);
10072 
10073 	local_irq_save(flags);
10074 
10075 	/* Simulate the iterator */
10076 	trace_init_iter(&iter, tr);
10077 
10078 	for_each_tracing_cpu(cpu) {
10079 		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10080 	}
10081 
10082 	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10083 
10084 	/* don't look at user memory in panic mode */
10085 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10086 
10087 	if (dump_mode == DUMP_ORIG)
10088 		iter.cpu_file = raw_smp_processor_id();
10089 	else
10090 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
10091 
10092 	if (tr == &global_trace)
10093 		printk(KERN_TRACE "Dumping ftrace buffer:\n");
10094 	else
10095 		printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name);
10096 
10097 	/* Did function tracer already get disabled? */
10098 	if (ftrace_is_dead()) {
10099 		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10100 		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
10101 	}
10102 
10103 	/*
10104 	 * We need to stop all tracing on all CPUS to read
10105 	 * the next buffer. This is a bit expensive, but is
10106 	 * not done often. We fill all what we can read,
10107 	 * and then release the locks again.
10108 	 */
10109 
10110 	while (!trace_empty(&iter)) {
10111 
10112 		if (!cnt)
10113 			printk(KERN_TRACE "---------------------------------\n");
10114 
10115 		cnt++;
10116 
10117 		trace_iterator_reset(&iter);
10118 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
10119 
10120 		if (trace_find_next_entry_inc(&iter) != NULL) {
10121 			int ret;
10122 
10123 			ret = print_trace_line(&iter);
10124 			if (ret != TRACE_TYPE_NO_CONSUME)
10125 				trace_consume(&iter);
10126 		}
10127 		touch_nmi_watchdog();
10128 
10129 		trace_printk_seq(&iter.seq);
10130 	}
10131 
10132 	if (!cnt)
10133 		printk(KERN_TRACE "   (ftrace buffer empty)\n");
10134 	else
10135 		printk(KERN_TRACE "---------------------------------\n");
10136 
10137 	tr->trace_flags |= old_userobj;
10138 
10139 	for_each_tracing_cpu(cpu) {
10140 		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10141 	}
10142 	local_irq_restore(flags);
10143 }
10144 
10145 static void ftrace_dump_by_param(void)
10146 {
10147 	bool first_param = true;
10148 	char dump_param[MAX_TRACER_SIZE];
10149 	char *buf, *token, *inst_name;
10150 	struct trace_array *tr;
10151 
10152 	strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE);
10153 	buf = dump_param;
10154 
10155 	while ((token = strsep(&buf, ",")) != NULL) {
10156 		if (first_param) {
10157 			first_param = false;
10158 			if (!strcmp("0", token))
10159 				continue;
10160 			else if (!strcmp("1", token)) {
10161 				ftrace_dump_one(&global_trace, DUMP_ALL);
10162 				continue;
10163 			}
10164 			else if (!strcmp("2", token) ||
10165 			  !strcmp("orig_cpu", token)) {
10166 				ftrace_dump_one(&global_trace, DUMP_ORIG);
10167 				continue;
10168 			}
10169 		}
10170 
10171 		inst_name = strsep(&token, "=");
10172 		tr = trace_array_find(inst_name);
10173 		if (!tr) {
10174 			printk(KERN_TRACE "Instance %s not found\n", inst_name);
10175 			continue;
10176 		}
10177 
10178 		if (token && (!strcmp("2", token) ||
10179 			  !strcmp("orig_cpu", token)))
10180 			ftrace_dump_one(tr, DUMP_ORIG);
10181 		else
10182 			ftrace_dump_one(tr, DUMP_ALL);
10183 	}
10184 }
10185 
10186 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10187 {
10188 	static atomic_t dump_running;
10189 
10190 	/* Only allow one dump user at a time. */
10191 	if (atomic_inc_return(&dump_running) != 1) {
10192 		atomic_dec(&dump_running);
10193 		return;
10194 	}
10195 
10196 	switch (oops_dump_mode) {
10197 	case DUMP_ALL:
10198 		ftrace_dump_one(&global_trace, DUMP_ALL);
10199 		break;
10200 	case DUMP_ORIG:
10201 		ftrace_dump_one(&global_trace, DUMP_ORIG);
10202 		break;
10203 	case DUMP_PARAM:
10204 		ftrace_dump_by_param();
10205 		break;
10206 	case DUMP_NONE:
10207 		break;
10208 	default:
10209 		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10210 		ftrace_dump_one(&global_trace, DUMP_ALL);
10211 	}
10212 
10213 	atomic_dec(&dump_running);
10214 }
10215 EXPORT_SYMBOL_GPL(ftrace_dump);
10216 
10217 #define WRITE_BUFSIZE  4096
10218 
10219 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10220 				size_t count, loff_t *ppos,
10221 				int (*createfn)(const char *))
10222 {
10223 	char *kbuf, *buf, *tmp;
10224 	int ret = 0;
10225 	size_t done = 0;
10226 	size_t size;
10227 
10228 	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10229 	if (!kbuf)
10230 		return -ENOMEM;
10231 
10232 	while (done < count) {
10233 		size = count - done;
10234 
10235 		if (size >= WRITE_BUFSIZE)
10236 			size = WRITE_BUFSIZE - 1;
10237 
10238 		if (copy_from_user(kbuf, buffer + done, size)) {
10239 			ret = -EFAULT;
10240 			goto out;
10241 		}
10242 		kbuf[size] = '\0';
10243 		buf = kbuf;
10244 		do {
10245 			tmp = strchr(buf, '\n');
10246 			if (tmp) {
10247 				*tmp = '\0';
10248 				size = tmp - buf + 1;
10249 			} else {
10250 				size = strlen(buf);
10251 				if (done + size < count) {
10252 					if (buf != kbuf)
10253 						break;
10254 					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10255 					pr_warn("Line length is too long: Should be less than %d\n",
10256 						WRITE_BUFSIZE - 2);
10257 					ret = -EINVAL;
10258 					goto out;
10259 				}
10260 			}
10261 			done += size;
10262 
10263 			/* Remove comments */
10264 			tmp = strchr(buf, '#');
10265 
10266 			if (tmp)
10267 				*tmp = '\0';
10268 
10269 			ret = createfn(buf);
10270 			if (ret)
10271 				goto out;
10272 			buf += size;
10273 
10274 		} while (done < count);
10275 	}
10276 	ret = done;
10277 
10278 out:
10279 	kfree(kbuf);
10280 
10281 	return ret;
10282 }
10283 
10284 #ifdef CONFIG_TRACER_MAX_TRACE
10285 __init static bool tr_needs_alloc_snapshot(const char *name)
10286 {
10287 	char *test;
10288 	int len = strlen(name);
10289 	bool ret;
10290 
10291 	if (!boot_snapshot_index)
10292 		return false;
10293 
10294 	if (strncmp(name, boot_snapshot_info, len) == 0 &&
10295 	    boot_snapshot_info[len] == '\t')
10296 		return true;
10297 
10298 	test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10299 	if (!test)
10300 		return false;
10301 
10302 	sprintf(test, "\t%s\t", name);
10303 	ret = strstr(boot_snapshot_info, test) == NULL;
10304 	kfree(test);
10305 	return ret;
10306 }
10307 
10308 __init static void do_allocate_snapshot(const char *name)
10309 {
10310 	if (!tr_needs_alloc_snapshot(name))
10311 		return;
10312 
10313 	/*
10314 	 * When allocate_snapshot is set, the next call to
10315 	 * allocate_trace_buffers() (called by trace_array_get_by_name())
10316 	 * will allocate the snapshot buffer. That will alse clear
10317 	 * this flag.
10318 	 */
10319 	allocate_snapshot = true;
10320 }
10321 #else
10322 static inline void do_allocate_snapshot(const char *name) { }
10323 #endif
10324 
10325 __init static void enable_instances(void)
10326 {
10327 	struct trace_array *tr;
10328 	char *curr_str;
10329 	char *name;
10330 	char *str;
10331 	char *tok;
10332 
10333 	/* A tab is always appended */
10334 	boot_instance_info[boot_instance_index - 1] = '\0';
10335 	str = boot_instance_info;
10336 
10337 	while ((curr_str = strsep(&str, "\t"))) {
10338 		phys_addr_t start = 0;
10339 		phys_addr_t size = 0;
10340 		unsigned long addr = 0;
10341 		bool traceprintk = false;
10342 		bool traceoff = false;
10343 		char *flag_delim;
10344 		char *addr_delim;
10345 
10346 		tok = strsep(&curr_str, ",");
10347 
10348 		flag_delim = strchr(tok, '^');
10349 		addr_delim = strchr(tok, '@');
10350 
10351 		if (addr_delim)
10352 			*addr_delim++ = '\0';
10353 
10354 		if (flag_delim)
10355 			*flag_delim++ = '\0';
10356 
10357 		name = tok;
10358 
10359 		if (flag_delim) {
10360 			char *flag;
10361 
10362 			while ((flag = strsep(&flag_delim, "^"))) {
10363 				if (strcmp(flag, "traceoff") == 0) {
10364 					traceoff = true;
10365 				} else if ((strcmp(flag, "printk") == 0) ||
10366 					   (strcmp(flag, "traceprintk") == 0) ||
10367 					   (strcmp(flag, "trace_printk") == 0)) {
10368 					traceprintk = true;
10369 				} else {
10370 					pr_info("Tracing: Invalid instance flag '%s' for %s\n",
10371 						flag, name);
10372 				}
10373 			}
10374 		}
10375 
10376 		tok = addr_delim;
10377 		if (tok && isdigit(*tok)) {
10378 			start = memparse(tok, &tok);
10379 			if (!start) {
10380 				pr_warn("Tracing: Invalid boot instance address for %s\n",
10381 					name);
10382 				continue;
10383 			}
10384 			if (*tok != ':') {
10385 				pr_warn("Tracing: No size specified for instance %s\n", name);
10386 				continue;
10387 			}
10388 			tok++;
10389 			size = memparse(tok, &tok);
10390 			if (!size) {
10391 				pr_warn("Tracing: Invalid boot instance size for %s\n",
10392 					name);
10393 				continue;
10394 			}
10395 		} else if (tok) {
10396 			if (!reserve_mem_find_by_name(tok, &start, &size)) {
10397 				start = 0;
10398 				pr_warn("Failed to map boot instance %s to %s\n", name, tok);
10399 				continue;
10400 			}
10401 		}
10402 
10403 		if (start) {
10404 			addr = map_pages(start, size);
10405 			if (addr) {
10406 				pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n",
10407 					name, &start, (unsigned long)size);
10408 			} else {
10409 				pr_warn("Tracing: Failed to map boot instance %s\n", name);
10410 				continue;
10411 			}
10412 		} else {
10413 			/* Only non mapped buffers have snapshot buffers */
10414 			if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10415 				do_allocate_snapshot(name);
10416 		}
10417 
10418 		tr = trace_array_create_systems(name, NULL, addr, size);
10419 		if (IS_ERR(tr)) {
10420 			pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str);
10421 			continue;
10422 		}
10423 
10424 		if (traceoff)
10425 			tracer_tracing_off(tr);
10426 
10427 		if (traceprintk)
10428 			update_printk_trace(tr);
10429 
10430 		/*
10431 		 * If start is set, then this is a mapped buffer, and
10432 		 * cannot be deleted by user space, so keep the reference
10433 		 * to it.
10434 		 */
10435 		if (start) {
10436 			tr->flags |= TRACE_ARRAY_FL_BOOT;
10437 			tr->ref++;
10438 		}
10439 
10440 		while ((tok = strsep(&curr_str, ","))) {
10441 			early_enable_events(tr, tok, true);
10442 		}
10443 	}
10444 }
10445 
10446 __init static int tracer_alloc_buffers(void)
10447 {
10448 	int ring_buf_size;
10449 	int ret = -ENOMEM;
10450 
10451 
10452 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
10453 		pr_warn("Tracing disabled due to lockdown\n");
10454 		return -EPERM;
10455 	}
10456 
10457 	/*
10458 	 * Make sure we don't accidentally add more trace options
10459 	 * than we have bits for.
10460 	 */
10461 	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10462 
10463 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10464 		goto out;
10465 
10466 	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10467 		goto out_free_buffer_mask;
10468 
10469 	/* Only allocate trace_printk buffers if a trace_printk exists */
10470 	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10471 		/* Must be called before global_trace.buffer is allocated */
10472 		trace_printk_init_buffers();
10473 
10474 	/* To save memory, keep the ring buffer size to its minimum */
10475 	if (global_trace.ring_buffer_expanded)
10476 		ring_buf_size = trace_buf_size;
10477 	else
10478 		ring_buf_size = 1;
10479 
10480 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10481 	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10482 
10483 	raw_spin_lock_init(&global_trace.start_lock);
10484 
10485 	/*
10486 	 * The prepare callbacks allocates some memory for the ring buffer. We
10487 	 * don't free the buffer if the CPU goes down. If we were to free
10488 	 * the buffer, then the user would lose any trace that was in the
10489 	 * buffer. The memory will be removed once the "instance" is removed.
10490 	 */
10491 	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10492 				      "trace/RB:prepare", trace_rb_cpu_prepare,
10493 				      NULL);
10494 	if (ret < 0)
10495 		goto out_free_cpumask;
10496 	/* Used for event triggers */
10497 	ret = -ENOMEM;
10498 	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10499 	if (!temp_buffer)
10500 		goto out_rm_hp_state;
10501 
10502 	if (trace_create_savedcmd() < 0)
10503 		goto out_free_temp_buffer;
10504 
10505 	if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10506 		goto out_free_savedcmd;
10507 
10508 	/* TODO: make the number of buffers hot pluggable with CPUS */
10509 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10510 		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10511 		goto out_free_pipe_cpumask;
10512 	}
10513 	if (global_trace.buffer_disabled)
10514 		tracing_off();
10515 
10516 	if (trace_boot_clock) {
10517 		ret = tracing_set_clock(&global_trace, trace_boot_clock);
10518 		if (ret < 0)
10519 			pr_warn("Trace clock %s not defined, going back to default\n",
10520 				trace_boot_clock);
10521 	}
10522 
10523 	/*
10524 	 * register_tracer() might reference current_trace, so it
10525 	 * needs to be set before we register anything. This is
10526 	 * just a bootstrap of current_trace anyway.
10527 	 */
10528 	global_trace.current_trace = &nop_trace;
10529 
10530 	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10531 #ifdef CONFIG_TRACER_MAX_TRACE
10532 	spin_lock_init(&global_trace.snapshot_trigger_lock);
10533 #endif
10534 	ftrace_init_global_array_ops(&global_trace);
10535 
10536 	init_trace_flags_index(&global_trace);
10537 
10538 	register_tracer(&nop_trace);
10539 
10540 	/* Function tracing may start here (via kernel command line) */
10541 	init_function_trace();
10542 
10543 	/* All seems OK, enable tracing */
10544 	tracing_disabled = 0;
10545 
10546 	atomic_notifier_chain_register(&panic_notifier_list,
10547 				       &trace_panic_notifier);
10548 
10549 	register_die_notifier(&trace_die_notifier);
10550 
10551 	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10552 
10553 	INIT_LIST_HEAD(&global_trace.systems);
10554 	INIT_LIST_HEAD(&global_trace.events);
10555 	INIT_LIST_HEAD(&global_trace.hist_vars);
10556 	INIT_LIST_HEAD(&global_trace.err_log);
10557 	list_add(&global_trace.list, &ftrace_trace_arrays);
10558 
10559 	apply_trace_boot_options();
10560 
10561 	register_snapshot_cmd();
10562 
10563 	return 0;
10564 
10565 out_free_pipe_cpumask:
10566 	free_cpumask_var(global_trace.pipe_cpumask);
10567 out_free_savedcmd:
10568 	trace_free_saved_cmdlines_buffer();
10569 out_free_temp_buffer:
10570 	ring_buffer_free(temp_buffer);
10571 out_rm_hp_state:
10572 	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10573 out_free_cpumask:
10574 	free_cpumask_var(global_trace.tracing_cpumask);
10575 out_free_buffer_mask:
10576 	free_cpumask_var(tracing_buffer_mask);
10577 out:
10578 	return ret;
10579 }
10580 
10581 void __init ftrace_boot_snapshot(void)
10582 {
10583 #ifdef CONFIG_TRACER_MAX_TRACE
10584 	struct trace_array *tr;
10585 
10586 	if (!snapshot_at_boot)
10587 		return;
10588 
10589 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10590 		if (!tr->allocated_snapshot)
10591 			continue;
10592 
10593 		tracing_snapshot_instance(tr);
10594 		trace_array_puts(tr, "** Boot snapshot taken **\n");
10595 	}
10596 #endif
10597 }
10598 
10599 void __init early_trace_init(void)
10600 {
10601 	if (tracepoint_printk) {
10602 		tracepoint_print_iter =
10603 			kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10604 		if (MEM_FAIL(!tracepoint_print_iter,
10605 			     "Failed to allocate trace iterator\n"))
10606 			tracepoint_printk = 0;
10607 		else
10608 			static_key_enable(&tracepoint_printk_key.key);
10609 	}
10610 	tracer_alloc_buffers();
10611 
10612 	init_events();
10613 }
10614 
10615 void __init trace_init(void)
10616 {
10617 	trace_event_init();
10618 
10619 	if (boot_instance_index)
10620 		enable_instances();
10621 }
10622 
10623 __init static void clear_boot_tracer(void)
10624 {
10625 	/*
10626 	 * The default tracer at boot buffer is an init section.
10627 	 * This function is called in lateinit. If we did not
10628 	 * find the boot tracer, then clear it out, to prevent
10629 	 * later registration from accessing the buffer that is
10630 	 * about to be freed.
10631 	 */
10632 	if (!default_bootup_tracer)
10633 		return;
10634 
10635 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10636 	       default_bootup_tracer);
10637 	default_bootup_tracer = NULL;
10638 }
10639 
10640 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10641 __init static void tracing_set_default_clock(void)
10642 {
10643 	/* sched_clock_stable() is determined in late_initcall */
10644 	if (!trace_boot_clock && !sched_clock_stable()) {
10645 		if (security_locked_down(LOCKDOWN_TRACEFS)) {
10646 			pr_warn("Can not set tracing clock due to lockdown\n");
10647 			return;
10648 		}
10649 
10650 		printk(KERN_WARNING
10651 		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
10652 		       "If you want to keep using the local clock, then add:\n"
10653 		       "  \"trace_clock=local\"\n"
10654 		       "on the kernel command line\n");
10655 		tracing_set_clock(&global_trace, "global");
10656 	}
10657 }
10658 #else
10659 static inline void tracing_set_default_clock(void) { }
10660 #endif
10661 
10662 __init static int late_trace_init(void)
10663 {
10664 	if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10665 		static_key_disable(&tracepoint_printk_key.key);
10666 		tracepoint_printk = 0;
10667 	}
10668 
10669 	tracing_set_default_clock();
10670 	clear_boot_tracer();
10671 	return 0;
10672 }
10673 
10674 late_initcall_sync(late_trace_init);
10675