xref: /linux/kernel/trace/trace.c (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 Nadia Yvette Chambers
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
40 #include <linux/fs.h>
41 #include <linux/sched/rt.h>
42 
43 #include "trace.h"
44 #include "trace_output.h"
45 
46 /*
47  * On boot up, the ring buffer is set to the minimum size, so that
48  * we do not waste memory on systems that are not using tracing.
49  */
50 bool ring_buffer_expanded;
51 
52 /*
53  * We need to change this state when a selftest is running.
54  * A selftest will lurk into the ring-buffer to count the
55  * entries inserted during the selftest although some concurrent
56  * insertions into the ring-buffer such as trace_printk could occurred
57  * at the same time, giving false positive or negative results.
58  */
59 static bool __read_mostly tracing_selftest_running;
60 
61 /*
62  * If a tracer is running, we do not want to run SELFTEST.
63  */
64 bool __read_mostly tracing_selftest_disabled;
65 
66 /* Pipe tracepoints to printk */
67 struct trace_iterator *tracepoint_print_iter;
68 int tracepoint_printk;
69 
70 /* For tracers that don't implement custom flags */
71 static struct tracer_opt dummy_tracer_opt[] = {
72 	{ }
73 };
74 
75 static struct tracer_flags dummy_tracer_flags = {
76 	.val = 0,
77 	.opts = dummy_tracer_opt
78 };
79 
80 static int
81 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
82 {
83 	return 0;
84 }
85 
86 /*
87  * To prevent the comm cache from being overwritten when no
88  * tracing is active, only save the comm when a trace event
89  * occurred.
90  */
91 static DEFINE_PER_CPU(bool, trace_cmdline_save);
92 
93 /*
94  * Kill all tracing for good (never come back).
95  * It is initialized to 1 but will turn to zero if the initialization
96  * of the tracer is successful. But that is the only place that sets
97  * this back to zero.
98  */
99 static int tracing_disabled = 1;
100 
101 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
102 
103 cpumask_var_t __read_mostly	tracing_buffer_mask;
104 
105 /*
106  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107  *
108  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109  * is set, then ftrace_dump is called. This will output the contents
110  * of the ftrace buffers to the console.  This is very useful for
111  * capturing traces that lead to crashes and outputing it to a
112  * serial console.
113  *
114  * It is default off, but you can enable it with either specifying
115  * "ftrace_dump_on_oops" in the kernel command line, or setting
116  * /proc/sys/kernel/ftrace_dump_on_oops
117  * Set 1 if you want to dump buffers of all CPUs
118  * Set 2 if you want to dump the buffer of the CPU that triggered oops
119  */
120 
121 enum ftrace_dump_mode ftrace_dump_on_oops;
122 
123 /* When set, tracing will stop when a WARN*() is hit */
124 int __disable_trace_on_warning;
125 
126 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
127 
128 #define MAX_TRACER_SIZE		100
129 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130 static char *default_bootup_tracer;
131 
132 static bool allocate_snapshot;
133 
134 static int __init set_cmdline_ftrace(char *str)
135 {
136 	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
137 	default_bootup_tracer = bootup_tracer_buf;
138 	/* We are using ftrace early, expand it */
139 	ring_buffer_expanded = true;
140 	return 1;
141 }
142 __setup("ftrace=", set_cmdline_ftrace);
143 
144 static int __init set_ftrace_dump_on_oops(char *str)
145 {
146 	if (*str++ != '=' || !*str) {
147 		ftrace_dump_on_oops = DUMP_ALL;
148 		return 1;
149 	}
150 
151 	if (!strcmp("orig_cpu", str)) {
152 		ftrace_dump_on_oops = DUMP_ORIG;
153                 return 1;
154         }
155 
156         return 0;
157 }
158 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
159 
160 static int __init stop_trace_on_warning(char *str)
161 {
162 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
163 		__disable_trace_on_warning = 1;
164 	return 1;
165 }
166 __setup("traceoff_on_warning", stop_trace_on_warning);
167 
168 static int __init boot_alloc_snapshot(char *str)
169 {
170 	allocate_snapshot = true;
171 	/* We also need the main ring buffer expanded */
172 	ring_buffer_expanded = true;
173 	return 1;
174 }
175 __setup("alloc_snapshot", boot_alloc_snapshot);
176 
177 
178 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
179 static char *trace_boot_options __initdata;
180 
181 static int __init set_trace_boot_options(char *str)
182 {
183 	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
184 	trace_boot_options = trace_boot_options_buf;
185 	return 0;
186 }
187 __setup("trace_options=", set_trace_boot_options);
188 
189 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
190 static char *trace_boot_clock __initdata;
191 
192 static int __init set_trace_boot_clock(char *str)
193 {
194 	strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
195 	trace_boot_clock = trace_boot_clock_buf;
196 	return 0;
197 }
198 __setup("trace_clock=", set_trace_boot_clock);
199 
200 static int __init set_tracepoint_printk(char *str)
201 {
202 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
203 		tracepoint_printk = 1;
204 	return 1;
205 }
206 __setup("tp_printk", set_tracepoint_printk);
207 
208 unsigned long long ns2usecs(cycle_t nsec)
209 {
210 	nsec += 500;
211 	do_div(nsec, 1000);
212 	return nsec;
213 }
214 
215 /*
216  * The global_trace is the descriptor that holds the tracing
217  * buffers for the live tracing. For each CPU, it contains
218  * a link list of pages that will store trace entries. The
219  * page descriptor of the pages in the memory is used to hold
220  * the link list by linking the lru item in the page descriptor
221  * to each of the pages in the buffer per CPU.
222  *
223  * For each active CPU there is a data field that holds the
224  * pages for the buffer for that CPU. Each CPU has the same number
225  * of pages allocated for its buffer.
226  */
227 static struct trace_array	global_trace;
228 
229 LIST_HEAD(ftrace_trace_arrays);
230 
231 int trace_array_get(struct trace_array *this_tr)
232 {
233 	struct trace_array *tr;
234 	int ret = -ENODEV;
235 
236 	mutex_lock(&trace_types_lock);
237 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
238 		if (tr == this_tr) {
239 			tr->ref++;
240 			ret = 0;
241 			break;
242 		}
243 	}
244 	mutex_unlock(&trace_types_lock);
245 
246 	return ret;
247 }
248 
249 static void __trace_array_put(struct trace_array *this_tr)
250 {
251 	WARN_ON(!this_tr->ref);
252 	this_tr->ref--;
253 }
254 
255 void trace_array_put(struct trace_array *this_tr)
256 {
257 	mutex_lock(&trace_types_lock);
258 	__trace_array_put(this_tr);
259 	mutex_unlock(&trace_types_lock);
260 }
261 
262 int filter_check_discard(struct ftrace_event_file *file, void *rec,
263 			 struct ring_buffer *buffer,
264 			 struct ring_buffer_event *event)
265 {
266 	if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
267 	    !filter_match_preds(file->filter, rec)) {
268 		ring_buffer_discard_commit(buffer, event);
269 		return 1;
270 	}
271 
272 	return 0;
273 }
274 EXPORT_SYMBOL_GPL(filter_check_discard);
275 
276 int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
277 			      struct ring_buffer *buffer,
278 			      struct ring_buffer_event *event)
279 {
280 	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
281 	    !filter_match_preds(call->filter, rec)) {
282 		ring_buffer_discard_commit(buffer, event);
283 		return 1;
284 	}
285 
286 	return 0;
287 }
288 EXPORT_SYMBOL_GPL(call_filter_check_discard);
289 
290 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
291 {
292 	u64 ts;
293 
294 	/* Early boot up does not have a buffer yet */
295 	if (!buf->buffer)
296 		return trace_clock_local();
297 
298 	ts = ring_buffer_time_stamp(buf->buffer, cpu);
299 	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
300 
301 	return ts;
302 }
303 
304 cycle_t ftrace_now(int cpu)
305 {
306 	return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
307 }
308 
309 /**
310  * tracing_is_enabled - Show if global_trace has been disabled
311  *
312  * Shows if the global trace has been enabled or not. It uses the
313  * mirror flag "buffer_disabled" to be used in fast paths such as for
314  * the irqsoff tracer. But it may be inaccurate due to races. If you
315  * need to know the accurate state, use tracing_is_on() which is a little
316  * slower, but accurate.
317  */
318 int tracing_is_enabled(void)
319 {
320 	/*
321 	 * For quick access (irqsoff uses this in fast path), just
322 	 * return the mirror variable of the state of the ring buffer.
323 	 * It's a little racy, but we don't really care.
324 	 */
325 	smp_rmb();
326 	return !global_trace.buffer_disabled;
327 }
328 
329 /*
330  * trace_buf_size is the size in bytes that is allocated
331  * for a buffer. Note, the number of bytes is always rounded
332  * to page size.
333  *
334  * This number is purposely set to a low number of 16384.
335  * If the dump on oops happens, it will be much appreciated
336  * to not have to wait for all that output. Anyway this can be
337  * boot time and run time configurable.
338  */
339 #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
340 
341 static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
342 
343 /* trace_types holds a link list of available tracers. */
344 static struct tracer		*trace_types __read_mostly;
345 
346 /*
347  * trace_types_lock is used to protect the trace_types list.
348  */
349 DEFINE_MUTEX(trace_types_lock);
350 
351 /*
352  * serialize the access of the ring buffer
353  *
354  * ring buffer serializes readers, but it is low level protection.
355  * The validity of the events (which returns by ring_buffer_peek() ..etc)
356  * are not protected by ring buffer.
357  *
358  * The content of events may become garbage if we allow other process consumes
359  * these events concurrently:
360  *   A) the page of the consumed events may become a normal page
361  *      (not reader page) in ring buffer, and this page will be rewrited
362  *      by events producer.
363  *   B) The page of the consumed events may become a page for splice_read,
364  *      and this page will be returned to system.
365  *
366  * These primitives allow multi process access to different cpu ring buffer
367  * concurrently.
368  *
369  * These primitives don't distinguish read-only and read-consume access.
370  * Multi read-only access are also serialized.
371  */
372 
373 #ifdef CONFIG_SMP
374 static DECLARE_RWSEM(all_cpu_access_lock);
375 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
376 
377 static inline void trace_access_lock(int cpu)
378 {
379 	if (cpu == RING_BUFFER_ALL_CPUS) {
380 		/* gain it for accessing the whole ring buffer. */
381 		down_write(&all_cpu_access_lock);
382 	} else {
383 		/* gain it for accessing a cpu ring buffer. */
384 
385 		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
386 		down_read(&all_cpu_access_lock);
387 
388 		/* Secondly block other access to this @cpu ring buffer. */
389 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
390 	}
391 }
392 
393 static inline void trace_access_unlock(int cpu)
394 {
395 	if (cpu == RING_BUFFER_ALL_CPUS) {
396 		up_write(&all_cpu_access_lock);
397 	} else {
398 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
399 		up_read(&all_cpu_access_lock);
400 	}
401 }
402 
403 static inline void trace_access_lock_init(void)
404 {
405 	int cpu;
406 
407 	for_each_possible_cpu(cpu)
408 		mutex_init(&per_cpu(cpu_access_lock, cpu));
409 }
410 
411 #else
412 
413 static DEFINE_MUTEX(access_lock);
414 
415 static inline void trace_access_lock(int cpu)
416 {
417 	(void)cpu;
418 	mutex_lock(&access_lock);
419 }
420 
421 static inline void trace_access_unlock(int cpu)
422 {
423 	(void)cpu;
424 	mutex_unlock(&access_lock);
425 }
426 
427 static inline void trace_access_lock_init(void)
428 {
429 }
430 
431 #endif
432 
433 /* trace_flags holds trace_options default values */
434 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
435 	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
436 	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
437 	TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
438 
439 static void tracer_tracing_on(struct trace_array *tr)
440 {
441 	if (tr->trace_buffer.buffer)
442 		ring_buffer_record_on(tr->trace_buffer.buffer);
443 	/*
444 	 * This flag is looked at when buffers haven't been allocated
445 	 * yet, or by some tracers (like irqsoff), that just want to
446 	 * know if the ring buffer has been disabled, but it can handle
447 	 * races of where it gets disabled but we still do a record.
448 	 * As the check is in the fast path of the tracers, it is more
449 	 * important to be fast than accurate.
450 	 */
451 	tr->buffer_disabled = 0;
452 	/* Make the flag seen by readers */
453 	smp_wmb();
454 }
455 
456 /**
457  * tracing_on - enable tracing buffers
458  *
459  * This function enables tracing buffers that may have been
460  * disabled with tracing_off.
461  */
462 void tracing_on(void)
463 {
464 	tracer_tracing_on(&global_trace);
465 }
466 EXPORT_SYMBOL_GPL(tracing_on);
467 
468 /**
469  * __trace_puts - write a constant string into the trace buffer.
470  * @ip:	   The address of the caller
471  * @str:   The constant string to write
472  * @size:  The size of the string.
473  */
474 int __trace_puts(unsigned long ip, const char *str, int size)
475 {
476 	struct ring_buffer_event *event;
477 	struct ring_buffer *buffer;
478 	struct print_entry *entry;
479 	unsigned long irq_flags;
480 	int alloc;
481 	int pc;
482 
483 	if (!(trace_flags & TRACE_ITER_PRINTK))
484 		return 0;
485 
486 	pc = preempt_count();
487 
488 	if (unlikely(tracing_selftest_running || tracing_disabled))
489 		return 0;
490 
491 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
492 
493 	local_save_flags(irq_flags);
494 	buffer = global_trace.trace_buffer.buffer;
495 	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
496 					  irq_flags, pc);
497 	if (!event)
498 		return 0;
499 
500 	entry = ring_buffer_event_data(event);
501 	entry->ip = ip;
502 
503 	memcpy(&entry->buf, str, size);
504 
505 	/* Add a newline if necessary */
506 	if (entry->buf[size - 1] != '\n') {
507 		entry->buf[size] = '\n';
508 		entry->buf[size + 1] = '\0';
509 	} else
510 		entry->buf[size] = '\0';
511 
512 	__buffer_unlock_commit(buffer, event);
513 	ftrace_trace_stack(buffer, irq_flags, 4, pc);
514 
515 	return size;
516 }
517 EXPORT_SYMBOL_GPL(__trace_puts);
518 
519 /**
520  * __trace_bputs - write the pointer to a constant string into trace buffer
521  * @ip:	   The address of the caller
522  * @str:   The constant string to write to the buffer to
523  */
524 int __trace_bputs(unsigned long ip, const char *str)
525 {
526 	struct ring_buffer_event *event;
527 	struct ring_buffer *buffer;
528 	struct bputs_entry *entry;
529 	unsigned long irq_flags;
530 	int size = sizeof(struct bputs_entry);
531 	int pc;
532 
533 	if (!(trace_flags & TRACE_ITER_PRINTK))
534 		return 0;
535 
536 	pc = preempt_count();
537 
538 	if (unlikely(tracing_selftest_running || tracing_disabled))
539 		return 0;
540 
541 	local_save_flags(irq_flags);
542 	buffer = global_trace.trace_buffer.buffer;
543 	event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
544 					  irq_flags, pc);
545 	if (!event)
546 		return 0;
547 
548 	entry = ring_buffer_event_data(event);
549 	entry->ip			= ip;
550 	entry->str			= str;
551 
552 	__buffer_unlock_commit(buffer, event);
553 	ftrace_trace_stack(buffer, irq_flags, 4, pc);
554 
555 	return 1;
556 }
557 EXPORT_SYMBOL_GPL(__trace_bputs);
558 
559 #ifdef CONFIG_TRACER_SNAPSHOT
560 /**
561  * trace_snapshot - take a snapshot of the current buffer.
562  *
563  * This causes a swap between the snapshot buffer and the current live
564  * tracing buffer. You can use this to take snapshots of the live
565  * trace when some condition is triggered, but continue to trace.
566  *
567  * Note, make sure to allocate the snapshot with either
568  * a tracing_snapshot_alloc(), or by doing it manually
569  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
570  *
571  * If the snapshot buffer is not allocated, it will stop tracing.
572  * Basically making a permanent snapshot.
573  */
574 void tracing_snapshot(void)
575 {
576 	struct trace_array *tr = &global_trace;
577 	struct tracer *tracer = tr->current_trace;
578 	unsigned long flags;
579 
580 	if (in_nmi()) {
581 		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
582 		internal_trace_puts("*** snapshot is being ignored        ***\n");
583 		return;
584 	}
585 
586 	if (!tr->allocated_snapshot) {
587 		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
588 		internal_trace_puts("*** stopping trace here!   ***\n");
589 		tracing_off();
590 		return;
591 	}
592 
593 	/* Note, snapshot can not be used when the tracer uses it */
594 	if (tracer->use_max_tr) {
595 		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
596 		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
597 		return;
598 	}
599 
600 	local_irq_save(flags);
601 	update_max_tr(tr, current, smp_processor_id());
602 	local_irq_restore(flags);
603 }
604 EXPORT_SYMBOL_GPL(tracing_snapshot);
605 
606 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
607 					struct trace_buffer *size_buf, int cpu_id);
608 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
609 
610 static int alloc_snapshot(struct trace_array *tr)
611 {
612 	int ret;
613 
614 	if (!tr->allocated_snapshot) {
615 
616 		/* allocate spare buffer */
617 		ret = resize_buffer_duplicate_size(&tr->max_buffer,
618 				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
619 		if (ret < 0)
620 			return ret;
621 
622 		tr->allocated_snapshot = true;
623 	}
624 
625 	return 0;
626 }
627 
628 static void free_snapshot(struct trace_array *tr)
629 {
630 	/*
631 	 * We don't free the ring buffer. instead, resize it because
632 	 * The max_tr ring buffer has some state (e.g. ring->clock) and
633 	 * we want preserve it.
634 	 */
635 	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
636 	set_buffer_entries(&tr->max_buffer, 1);
637 	tracing_reset_online_cpus(&tr->max_buffer);
638 	tr->allocated_snapshot = false;
639 }
640 
641 /**
642  * tracing_alloc_snapshot - allocate snapshot buffer.
643  *
644  * This only allocates the snapshot buffer if it isn't already
645  * allocated - it doesn't also take a snapshot.
646  *
647  * This is meant to be used in cases where the snapshot buffer needs
648  * to be set up for events that can't sleep but need to be able to
649  * trigger a snapshot.
650  */
651 int tracing_alloc_snapshot(void)
652 {
653 	struct trace_array *tr = &global_trace;
654 	int ret;
655 
656 	ret = alloc_snapshot(tr);
657 	WARN_ON(ret < 0);
658 
659 	return ret;
660 }
661 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
662 
663 /**
664  * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
665  *
666  * This is similar to trace_snapshot(), but it will allocate the
667  * snapshot buffer if it isn't already allocated. Use this only
668  * where it is safe to sleep, as the allocation may sleep.
669  *
670  * This causes a swap between the snapshot buffer and the current live
671  * tracing buffer. You can use this to take snapshots of the live
672  * trace when some condition is triggered, but continue to trace.
673  */
674 void tracing_snapshot_alloc(void)
675 {
676 	int ret;
677 
678 	ret = tracing_alloc_snapshot();
679 	if (ret < 0)
680 		return;
681 
682 	tracing_snapshot();
683 }
684 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
685 #else
686 void tracing_snapshot(void)
687 {
688 	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
689 }
690 EXPORT_SYMBOL_GPL(tracing_snapshot);
691 int tracing_alloc_snapshot(void)
692 {
693 	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
694 	return -ENODEV;
695 }
696 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
697 void tracing_snapshot_alloc(void)
698 {
699 	/* Give warning */
700 	tracing_snapshot();
701 }
702 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
703 #endif /* CONFIG_TRACER_SNAPSHOT */
704 
705 static void tracer_tracing_off(struct trace_array *tr)
706 {
707 	if (tr->trace_buffer.buffer)
708 		ring_buffer_record_off(tr->trace_buffer.buffer);
709 	/*
710 	 * This flag is looked at when buffers haven't been allocated
711 	 * yet, or by some tracers (like irqsoff), that just want to
712 	 * know if the ring buffer has been disabled, but it can handle
713 	 * races of where it gets disabled but we still do a record.
714 	 * As the check is in the fast path of the tracers, it is more
715 	 * important to be fast than accurate.
716 	 */
717 	tr->buffer_disabled = 1;
718 	/* Make the flag seen by readers */
719 	smp_wmb();
720 }
721 
722 /**
723  * tracing_off - turn off tracing buffers
724  *
725  * This function stops the tracing buffers from recording data.
726  * It does not disable any overhead the tracers themselves may
727  * be causing. This function simply causes all recording to
728  * the ring buffers to fail.
729  */
730 void tracing_off(void)
731 {
732 	tracer_tracing_off(&global_trace);
733 }
734 EXPORT_SYMBOL_GPL(tracing_off);
735 
736 void disable_trace_on_warning(void)
737 {
738 	if (__disable_trace_on_warning)
739 		tracing_off();
740 }
741 
742 /**
743  * tracer_tracing_is_on - show real state of ring buffer enabled
744  * @tr : the trace array to know if ring buffer is enabled
745  *
746  * Shows real state of the ring buffer if it is enabled or not.
747  */
748 static int tracer_tracing_is_on(struct trace_array *tr)
749 {
750 	if (tr->trace_buffer.buffer)
751 		return ring_buffer_record_is_on(tr->trace_buffer.buffer);
752 	return !tr->buffer_disabled;
753 }
754 
755 /**
756  * tracing_is_on - show state of ring buffers enabled
757  */
758 int tracing_is_on(void)
759 {
760 	return tracer_tracing_is_on(&global_trace);
761 }
762 EXPORT_SYMBOL_GPL(tracing_is_on);
763 
764 static int __init set_buf_size(char *str)
765 {
766 	unsigned long buf_size;
767 
768 	if (!str)
769 		return 0;
770 	buf_size = memparse(str, &str);
771 	/* nr_entries can not be zero */
772 	if (buf_size == 0)
773 		return 0;
774 	trace_buf_size = buf_size;
775 	return 1;
776 }
777 __setup("trace_buf_size=", set_buf_size);
778 
779 static int __init set_tracing_thresh(char *str)
780 {
781 	unsigned long threshold;
782 	int ret;
783 
784 	if (!str)
785 		return 0;
786 	ret = kstrtoul(str, 0, &threshold);
787 	if (ret < 0)
788 		return 0;
789 	tracing_thresh = threshold * 1000;
790 	return 1;
791 }
792 __setup("tracing_thresh=", set_tracing_thresh);
793 
794 unsigned long nsecs_to_usecs(unsigned long nsecs)
795 {
796 	return nsecs / 1000;
797 }
798 
799 /* These must match the bit postions in trace_iterator_flags */
800 static const char *trace_options[] = {
801 	"print-parent",
802 	"sym-offset",
803 	"sym-addr",
804 	"verbose",
805 	"raw",
806 	"hex",
807 	"bin",
808 	"block",
809 	"stacktrace",
810 	"trace_printk",
811 	"ftrace_preempt",
812 	"branch",
813 	"annotate",
814 	"userstacktrace",
815 	"sym-userobj",
816 	"printk-msg-only",
817 	"context-info",
818 	"latency-format",
819 	"sleep-time",
820 	"graph-time",
821 	"record-cmd",
822 	"overwrite",
823 	"disable_on_free",
824 	"irq-info",
825 	"markers",
826 	"function-trace",
827 	NULL
828 };
829 
830 static struct {
831 	u64 (*func)(void);
832 	const char *name;
833 	int in_ns;		/* is this clock in nanoseconds? */
834 } trace_clocks[] = {
835 	{ trace_clock_local,		"local",	1 },
836 	{ trace_clock_global,		"global",	1 },
837 	{ trace_clock_counter,		"counter",	0 },
838 	{ trace_clock_jiffies,		"uptime",	0 },
839 	{ trace_clock,			"perf",		1 },
840 	{ ktime_get_mono_fast_ns,	"mono",		1 },
841 	ARCH_TRACE_CLOCKS
842 };
843 
844 /*
845  * trace_parser_get_init - gets the buffer for trace parser
846  */
847 int trace_parser_get_init(struct trace_parser *parser, int size)
848 {
849 	memset(parser, 0, sizeof(*parser));
850 
851 	parser->buffer = kmalloc(size, GFP_KERNEL);
852 	if (!parser->buffer)
853 		return 1;
854 
855 	parser->size = size;
856 	return 0;
857 }
858 
859 /*
860  * trace_parser_put - frees the buffer for trace parser
861  */
862 void trace_parser_put(struct trace_parser *parser)
863 {
864 	kfree(parser->buffer);
865 }
866 
867 /*
868  * trace_get_user - reads the user input string separated by  space
869  * (matched by isspace(ch))
870  *
871  * For each string found the 'struct trace_parser' is updated,
872  * and the function returns.
873  *
874  * Returns number of bytes read.
875  *
876  * See kernel/trace/trace.h for 'struct trace_parser' details.
877  */
878 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
879 	size_t cnt, loff_t *ppos)
880 {
881 	char ch;
882 	size_t read = 0;
883 	ssize_t ret;
884 
885 	if (!*ppos)
886 		trace_parser_clear(parser);
887 
888 	ret = get_user(ch, ubuf++);
889 	if (ret)
890 		goto out;
891 
892 	read++;
893 	cnt--;
894 
895 	/*
896 	 * The parser is not finished with the last write,
897 	 * continue reading the user input without skipping spaces.
898 	 */
899 	if (!parser->cont) {
900 		/* skip white space */
901 		while (cnt && isspace(ch)) {
902 			ret = get_user(ch, ubuf++);
903 			if (ret)
904 				goto out;
905 			read++;
906 			cnt--;
907 		}
908 
909 		/* only spaces were written */
910 		if (isspace(ch)) {
911 			*ppos += read;
912 			ret = read;
913 			goto out;
914 		}
915 
916 		parser->idx = 0;
917 	}
918 
919 	/* read the non-space input */
920 	while (cnt && !isspace(ch)) {
921 		if (parser->idx < parser->size - 1)
922 			parser->buffer[parser->idx++] = ch;
923 		else {
924 			ret = -EINVAL;
925 			goto out;
926 		}
927 		ret = get_user(ch, ubuf++);
928 		if (ret)
929 			goto out;
930 		read++;
931 		cnt--;
932 	}
933 
934 	/* We either got finished input or we have to wait for another call. */
935 	if (isspace(ch)) {
936 		parser->buffer[parser->idx] = 0;
937 		parser->cont = false;
938 	} else if (parser->idx < parser->size - 1) {
939 		parser->cont = true;
940 		parser->buffer[parser->idx++] = ch;
941 	} else {
942 		ret = -EINVAL;
943 		goto out;
944 	}
945 
946 	*ppos += read;
947 	ret = read;
948 
949 out:
950 	return ret;
951 }
952 
953 /* TODO add a seq_buf_to_buffer() */
954 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
955 {
956 	int len;
957 
958 	if (trace_seq_used(s) <= s->seq.readpos)
959 		return -EBUSY;
960 
961 	len = trace_seq_used(s) - s->seq.readpos;
962 	if (cnt > len)
963 		cnt = len;
964 	memcpy(buf, s->buffer + s->seq.readpos, cnt);
965 
966 	s->seq.readpos += cnt;
967 	return cnt;
968 }
969 
970 unsigned long __read_mostly	tracing_thresh;
971 
972 #ifdef CONFIG_TRACER_MAX_TRACE
973 /*
974  * Copy the new maximum trace into the separate maximum-trace
975  * structure. (this way the maximum trace is permanently saved,
976  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
977  */
978 static void
979 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
980 {
981 	struct trace_buffer *trace_buf = &tr->trace_buffer;
982 	struct trace_buffer *max_buf = &tr->max_buffer;
983 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
984 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
985 
986 	max_buf->cpu = cpu;
987 	max_buf->time_start = data->preempt_timestamp;
988 
989 	max_data->saved_latency = tr->max_latency;
990 	max_data->critical_start = data->critical_start;
991 	max_data->critical_end = data->critical_end;
992 
993 	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
994 	max_data->pid = tsk->pid;
995 	/*
996 	 * If tsk == current, then use current_uid(), as that does not use
997 	 * RCU. The irq tracer can be called out of RCU scope.
998 	 */
999 	if (tsk == current)
1000 		max_data->uid = current_uid();
1001 	else
1002 		max_data->uid = task_uid(tsk);
1003 
1004 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1005 	max_data->policy = tsk->policy;
1006 	max_data->rt_priority = tsk->rt_priority;
1007 
1008 	/* record this tasks comm */
1009 	tracing_record_cmdline(tsk);
1010 }
1011 
1012 /**
1013  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1014  * @tr: tracer
1015  * @tsk: the task with the latency
1016  * @cpu: The cpu that initiated the trace.
1017  *
1018  * Flip the buffers between the @tr and the max_tr and record information
1019  * about which task was the cause of this latency.
1020  */
1021 void
1022 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1023 {
1024 	struct ring_buffer *buf;
1025 
1026 	if (tr->stop_count)
1027 		return;
1028 
1029 	WARN_ON_ONCE(!irqs_disabled());
1030 
1031 	if (!tr->allocated_snapshot) {
1032 		/* Only the nop tracer should hit this when disabling */
1033 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1034 		return;
1035 	}
1036 
1037 	arch_spin_lock(&tr->max_lock);
1038 
1039 	buf = tr->trace_buffer.buffer;
1040 	tr->trace_buffer.buffer = tr->max_buffer.buffer;
1041 	tr->max_buffer.buffer = buf;
1042 
1043 	__update_max_tr(tr, tsk, cpu);
1044 	arch_spin_unlock(&tr->max_lock);
1045 }
1046 
1047 /**
1048  * update_max_tr_single - only copy one trace over, and reset the rest
1049  * @tr - tracer
1050  * @tsk - task with the latency
1051  * @cpu - the cpu of the buffer to copy.
1052  *
1053  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1054  */
1055 void
1056 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1057 {
1058 	int ret;
1059 
1060 	if (tr->stop_count)
1061 		return;
1062 
1063 	WARN_ON_ONCE(!irqs_disabled());
1064 	if (!tr->allocated_snapshot) {
1065 		/* Only the nop tracer should hit this when disabling */
1066 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1067 		return;
1068 	}
1069 
1070 	arch_spin_lock(&tr->max_lock);
1071 
1072 	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1073 
1074 	if (ret == -EBUSY) {
1075 		/*
1076 		 * We failed to swap the buffer due to a commit taking
1077 		 * place on this CPU. We fail to record, but we reset
1078 		 * the max trace buffer (no one writes directly to it)
1079 		 * and flag that it failed.
1080 		 */
1081 		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1082 			"Failed to swap buffers due to commit in progress\n");
1083 	}
1084 
1085 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1086 
1087 	__update_max_tr(tr, tsk, cpu);
1088 	arch_spin_unlock(&tr->max_lock);
1089 }
1090 #endif /* CONFIG_TRACER_MAX_TRACE */
1091 
1092 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1093 {
1094 	/* Iterators are static, they should be filled or empty */
1095 	if (trace_buffer_iter(iter, iter->cpu_file))
1096 		return 0;
1097 
1098 	return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1099 				full);
1100 }
1101 
1102 #ifdef CONFIG_FTRACE_STARTUP_TEST
1103 static int run_tracer_selftest(struct tracer *type)
1104 {
1105 	struct trace_array *tr = &global_trace;
1106 	struct tracer *saved_tracer = tr->current_trace;
1107 	int ret;
1108 
1109 	if (!type->selftest || tracing_selftest_disabled)
1110 		return 0;
1111 
1112 	/*
1113 	 * Run a selftest on this tracer.
1114 	 * Here we reset the trace buffer, and set the current
1115 	 * tracer to be this tracer. The tracer can then run some
1116 	 * internal tracing to verify that everything is in order.
1117 	 * If we fail, we do not register this tracer.
1118 	 */
1119 	tracing_reset_online_cpus(&tr->trace_buffer);
1120 
1121 	tr->current_trace = type;
1122 
1123 #ifdef CONFIG_TRACER_MAX_TRACE
1124 	if (type->use_max_tr) {
1125 		/* If we expanded the buffers, make sure the max is expanded too */
1126 		if (ring_buffer_expanded)
1127 			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1128 					   RING_BUFFER_ALL_CPUS);
1129 		tr->allocated_snapshot = true;
1130 	}
1131 #endif
1132 
1133 	/* the test is responsible for initializing and enabling */
1134 	pr_info("Testing tracer %s: ", type->name);
1135 	ret = type->selftest(type, tr);
1136 	/* the test is responsible for resetting too */
1137 	tr->current_trace = saved_tracer;
1138 	if (ret) {
1139 		printk(KERN_CONT "FAILED!\n");
1140 		/* Add the warning after printing 'FAILED' */
1141 		WARN_ON(1);
1142 		return -1;
1143 	}
1144 	/* Only reset on passing, to avoid touching corrupted buffers */
1145 	tracing_reset_online_cpus(&tr->trace_buffer);
1146 
1147 #ifdef CONFIG_TRACER_MAX_TRACE
1148 	if (type->use_max_tr) {
1149 		tr->allocated_snapshot = false;
1150 
1151 		/* Shrink the max buffer again */
1152 		if (ring_buffer_expanded)
1153 			ring_buffer_resize(tr->max_buffer.buffer, 1,
1154 					   RING_BUFFER_ALL_CPUS);
1155 	}
1156 #endif
1157 
1158 	printk(KERN_CONT "PASSED\n");
1159 	return 0;
1160 }
1161 #else
1162 static inline int run_tracer_selftest(struct tracer *type)
1163 {
1164 	return 0;
1165 }
1166 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1167 
1168 /**
1169  * register_tracer - register a tracer with the ftrace system.
1170  * @type - the plugin for the tracer
1171  *
1172  * Register a new plugin tracer.
1173  */
1174 int register_tracer(struct tracer *type)
1175 {
1176 	struct tracer *t;
1177 	int ret = 0;
1178 
1179 	if (!type->name) {
1180 		pr_info("Tracer must have a name\n");
1181 		return -1;
1182 	}
1183 
1184 	if (strlen(type->name) >= MAX_TRACER_SIZE) {
1185 		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1186 		return -1;
1187 	}
1188 
1189 	mutex_lock(&trace_types_lock);
1190 
1191 	tracing_selftest_running = true;
1192 
1193 	for (t = trace_types; t; t = t->next) {
1194 		if (strcmp(type->name, t->name) == 0) {
1195 			/* already found */
1196 			pr_info("Tracer %s already registered\n",
1197 				type->name);
1198 			ret = -1;
1199 			goto out;
1200 		}
1201 	}
1202 
1203 	if (!type->set_flag)
1204 		type->set_flag = &dummy_set_flag;
1205 	if (!type->flags)
1206 		type->flags = &dummy_tracer_flags;
1207 	else
1208 		if (!type->flags->opts)
1209 			type->flags->opts = dummy_tracer_opt;
1210 
1211 	ret = run_tracer_selftest(type);
1212 	if (ret < 0)
1213 		goto out;
1214 
1215 	type->next = trace_types;
1216 	trace_types = type;
1217 
1218  out:
1219 	tracing_selftest_running = false;
1220 	mutex_unlock(&trace_types_lock);
1221 
1222 	if (ret || !default_bootup_tracer)
1223 		goto out_unlock;
1224 
1225 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1226 		goto out_unlock;
1227 
1228 	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1229 	/* Do we want this tracer to start on bootup? */
1230 	tracing_set_tracer(&global_trace, type->name);
1231 	default_bootup_tracer = NULL;
1232 	/* disable other selftests, since this will break it. */
1233 	tracing_selftest_disabled = true;
1234 #ifdef CONFIG_FTRACE_STARTUP_TEST
1235 	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1236 	       type->name);
1237 #endif
1238 
1239  out_unlock:
1240 	return ret;
1241 }
1242 
1243 void tracing_reset(struct trace_buffer *buf, int cpu)
1244 {
1245 	struct ring_buffer *buffer = buf->buffer;
1246 
1247 	if (!buffer)
1248 		return;
1249 
1250 	ring_buffer_record_disable(buffer);
1251 
1252 	/* Make sure all commits have finished */
1253 	synchronize_sched();
1254 	ring_buffer_reset_cpu(buffer, cpu);
1255 
1256 	ring_buffer_record_enable(buffer);
1257 }
1258 
1259 void tracing_reset_online_cpus(struct trace_buffer *buf)
1260 {
1261 	struct ring_buffer *buffer = buf->buffer;
1262 	int cpu;
1263 
1264 	if (!buffer)
1265 		return;
1266 
1267 	ring_buffer_record_disable(buffer);
1268 
1269 	/* Make sure all commits have finished */
1270 	synchronize_sched();
1271 
1272 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1273 
1274 	for_each_online_cpu(cpu)
1275 		ring_buffer_reset_cpu(buffer, cpu);
1276 
1277 	ring_buffer_record_enable(buffer);
1278 }
1279 
1280 /* Must have trace_types_lock held */
1281 void tracing_reset_all_online_cpus(void)
1282 {
1283 	struct trace_array *tr;
1284 
1285 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1286 		tracing_reset_online_cpus(&tr->trace_buffer);
1287 #ifdef CONFIG_TRACER_MAX_TRACE
1288 		tracing_reset_online_cpus(&tr->max_buffer);
1289 #endif
1290 	}
1291 }
1292 
1293 #define SAVED_CMDLINES_DEFAULT 128
1294 #define NO_CMDLINE_MAP UINT_MAX
1295 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1296 struct saved_cmdlines_buffer {
1297 	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1298 	unsigned *map_cmdline_to_pid;
1299 	unsigned cmdline_num;
1300 	int cmdline_idx;
1301 	char *saved_cmdlines;
1302 };
1303 static struct saved_cmdlines_buffer *savedcmd;
1304 
1305 /* temporary disable recording */
1306 static atomic_t trace_record_cmdline_disabled __read_mostly;
1307 
1308 static inline char *get_saved_cmdlines(int idx)
1309 {
1310 	return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1311 }
1312 
1313 static inline void set_cmdline(int idx, const char *cmdline)
1314 {
1315 	memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1316 }
1317 
1318 static int allocate_cmdlines_buffer(unsigned int val,
1319 				    struct saved_cmdlines_buffer *s)
1320 {
1321 	s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1322 					GFP_KERNEL);
1323 	if (!s->map_cmdline_to_pid)
1324 		return -ENOMEM;
1325 
1326 	s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1327 	if (!s->saved_cmdlines) {
1328 		kfree(s->map_cmdline_to_pid);
1329 		return -ENOMEM;
1330 	}
1331 
1332 	s->cmdline_idx = 0;
1333 	s->cmdline_num = val;
1334 	memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1335 	       sizeof(s->map_pid_to_cmdline));
1336 	memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1337 	       val * sizeof(*s->map_cmdline_to_pid));
1338 
1339 	return 0;
1340 }
1341 
1342 static int trace_create_savedcmd(void)
1343 {
1344 	int ret;
1345 
1346 	savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1347 	if (!savedcmd)
1348 		return -ENOMEM;
1349 
1350 	ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1351 	if (ret < 0) {
1352 		kfree(savedcmd);
1353 		savedcmd = NULL;
1354 		return -ENOMEM;
1355 	}
1356 
1357 	return 0;
1358 }
1359 
1360 int is_tracing_stopped(void)
1361 {
1362 	return global_trace.stop_count;
1363 }
1364 
1365 /**
1366  * tracing_start - quick start of the tracer
1367  *
1368  * If tracing is enabled but was stopped by tracing_stop,
1369  * this will start the tracer back up.
1370  */
1371 void tracing_start(void)
1372 {
1373 	struct ring_buffer *buffer;
1374 	unsigned long flags;
1375 
1376 	if (tracing_disabled)
1377 		return;
1378 
1379 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1380 	if (--global_trace.stop_count) {
1381 		if (global_trace.stop_count < 0) {
1382 			/* Someone screwed up their debugging */
1383 			WARN_ON_ONCE(1);
1384 			global_trace.stop_count = 0;
1385 		}
1386 		goto out;
1387 	}
1388 
1389 	/* Prevent the buffers from switching */
1390 	arch_spin_lock(&global_trace.max_lock);
1391 
1392 	buffer = global_trace.trace_buffer.buffer;
1393 	if (buffer)
1394 		ring_buffer_record_enable(buffer);
1395 
1396 #ifdef CONFIG_TRACER_MAX_TRACE
1397 	buffer = global_trace.max_buffer.buffer;
1398 	if (buffer)
1399 		ring_buffer_record_enable(buffer);
1400 #endif
1401 
1402 	arch_spin_unlock(&global_trace.max_lock);
1403 
1404  out:
1405 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1406 }
1407 
1408 static void tracing_start_tr(struct trace_array *tr)
1409 {
1410 	struct ring_buffer *buffer;
1411 	unsigned long flags;
1412 
1413 	if (tracing_disabled)
1414 		return;
1415 
1416 	/* If global, we need to also start the max tracer */
1417 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1418 		return tracing_start();
1419 
1420 	raw_spin_lock_irqsave(&tr->start_lock, flags);
1421 
1422 	if (--tr->stop_count) {
1423 		if (tr->stop_count < 0) {
1424 			/* Someone screwed up their debugging */
1425 			WARN_ON_ONCE(1);
1426 			tr->stop_count = 0;
1427 		}
1428 		goto out;
1429 	}
1430 
1431 	buffer = tr->trace_buffer.buffer;
1432 	if (buffer)
1433 		ring_buffer_record_enable(buffer);
1434 
1435  out:
1436 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1437 }
1438 
1439 /**
1440  * tracing_stop - quick stop of the tracer
1441  *
1442  * Light weight way to stop tracing. Use in conjunction with
1443  * tracing_start.
1444  */
1445 void tracing_stop(void)
1446 {
1447 	struct ring_buffer *buffer;
1448 	unsigned long flags;
1449 
1450 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1451 	if (global_trace.stop_count++)
1452 		goto out;
1453 
1454 	/* Prevent the buffers from switching */
1455 	arch_spin_lock(&global_trace.max_lock);
1456 
1457 	buffer = global_trace.trace_buffer.buffer;
1458 	if (buffer)
1459 		ring_buffer_record_disable(buffer);
1460 
1461 #ifdef CONFIG_TRACER_MAX_TRACE
1462 	buffer = global_trace.max_buffer.buffer;
1463 	if (buffer)
1464 		ring_buffer_record_disable(buffer);
1465 #endif
1466 
1467 	arch_spin_unlock(&global_trace.max_lock);
1468 
1469  out:
1470 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471 }
1472 
1473 static void tracing_stop_tr(struct trace_array *tr)
1474 {
1475 	struct ring_buffer *buffer;
1476 	unsigned long flags;
1477 
1478 	/* If global, we need to also stop the max tracer */
1479 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480 		return tracing_stop();
1481 
1482 	raw_spin_lock_irqsave(&tr->start_lock, flags);
1483 	if (tr->stop_count++)
1484 		goto out;
1485 
1486 	buffer = tr->trace_buffer.buffer;
1487 	if (buffer)
1488 		ring_buffer_record_disable(buffer);
1489 
1490  out:
1491 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1492 }
1493 
1494 void trace_stop_cmdline_recording(void);
1495 
1496 static int trace_save_cmdline(struct task_struct *tsk)
1497 {
1498 	unsigned pid, idx;
1499 
1500 	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1501 		return 0;
1502 
1503 	/*
1504 	 * It's not the end of the world if we don't get
1505 	 * the lock, but we also don't want to spin
1506 	 * nor do we want to disable interrupts,
1507 	 * so if we miss here, then better luck next time.
1508 	 */
1509 	if (!arch_spin_trylock(&trace_cmdline_lock))
1510 		return 0;
1511 
1512 	idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1513 	if (idx == NO_CMDLINE_MAP) {
1514 		idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1515 
1516 		/*
1517 		 * Check whether the cmdline buffer at idx has a pid
1518 		 * mapped. We are going to overwrite that entry so we
1519 		 * need to clear the map_pid_to_cmdline. Otherwise we
1520 		 * would read the new comm for the old pid.
1521 		 */
1522 		pid = savedcmd->map_cmdline_to_pid[idx];
1523 		if (pid != NO_CMDLINE_MAP)
1524 			savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1525 
1526 		savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1527 		savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1528 
1529 		savedcmd->cmdline_idx = idx;
1530 	}
1531 
1532 	set_cmdline(idx, tsk->comm);
1533 
1534 	arch_spin_unlock(&trace_cmdline_lock);
1535 
1536 	return 1;
1537 }
1538 
1539 static void __trace_find_cmdline(int pid, char comm[])
1540 {
1541 	unsigned map;
1542 
1543 	if (!pid) {
1544 		strcpy(comm, "<idle>");
1545 		return;
1546 	}
1547 
1548 	if (WARN_ON_ONCE(pid < 0)) {
1549 		strcpy(comm, "<XXX>");
1550 		return;
1551 	}
1552 
1553 	if (pid > PID_MAX_DEFAULT) {
1554 		strcpy(comm, "<...>");
1555 		return;
1556 	}
1557 
1558 	map = savedcmd->map_pid_to_cmdline[pid];
1559 	if (map != NO_CMDLINE_MAP)
1560 		strcpy(comm, get_saved_cmdlines(map));
1561 	else
1562 		strcpy(comm, "<...>");
1563 }
1564 
1565 void trace_find_cmdline(int pid, char comm[])
1566 {
1567 	preempt_disable();
1568 	arch_spin_lock(&trace_cmdline_lock);
1569 
1570 	__trace_find_cmdline(pid, comm);
1571 
1572 	arch_spin_unlock(&trace_cmdline_lock);
1573 	preempt_enable();
1574 }
1575 
1576 void tracing_record_cmdline(struct task_struct *tsk)
1577 {
1578 	if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1579 		return;
1580 
1581 	if (!__this_cpu_read(trace_cmdline_save))
1582 		return;
1583 
1584 	if (trace_save_cmdline(tsk))
1585 		__this_cpu_write(trace_cmdline_save, false);
1586 }
1587 
1588 void
1589 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1590 			     int pc)
1591 {
1592 	struct task_struct *tsk = current;
1593 
1594 	entry->preempt_count		= pc & 0xff;
1595 	entry->pid			= (tsk) ? tsk->pid : 0;
1596 	entry->flags =
1597 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1598 		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1599 #else
1600 		TRACE_FLAG_IRQS_NOSUPPORT |
1601 #endif
1602 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1603 		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1604 		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1605 		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1606 }
1607 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1608 
1609 struct ring_buffer_event *
1610 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1611 			  int type,
1612 			  unsigned long len,
1613 			  unsigned long flags, int pc)
1614 {
1615 	struct ring_buffer_event *event;
1616 
1617 	event = ring_buffer_lock_reserve(buffer, len);
1618 	if (event != NULL) {
1619 		struct trace_entry *ent = ring_buffer_event_data(event);
1620 
1621 		tracing_generic_entry_update(ent, flags, pc);
1622 		ent->type = type;
1623 	}
1624 
1625 	return event;
1626 }
1627 
1628 void
1629 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1630 {
1631 	__this_cpu_write(trace_cmdline_save, true);
1632 	ring_buffer_unlock_commit(buffer, event);
1633 }
1634 
1635 static inline void
1636 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1637 			     struct ring_buffer_event *event,
1638 			     unsigned long flags, int pc)
1639 {
1640 	__buffer_unlock_commit(buffer, event);
1641 
1642 	ftrace_trace_stack(buffer, flags, 6, pc);
1643 	ftrace_trace_userstack(buffer, flags, pc);
1644 }
1645 
1646 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1647 				struct ring_buffer_event *event,
1648 				unsigned long flags, int pc)
1649 {
1650 	__trace_buffer_unlock_commit(buffer, event, flags, pc);
1651 }
1652 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1653 
1654 static struct ring_buffer *temp_buffer;
1655 
1656 struct ring_buffer_event *
1657 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1658 			  struct ftrace_event_file *ftrace_file,
1659 			  int type, unsigned long len,
1660 			  unsigned long flags, int pc)
1661 {
1662 	struct ring_buffer_event *entry;
1663 
1664 	*current_rb = ftrace_file->tr->trace_buffer.buffer;
1665 	entry = trace_buffer_lock_reserve(*current_rb,
1666 					 type, len, flags, pc);
1667 	/*
1668 	 * If tracing is off, but we have triggers enabled
1669 	 * we still need to look at the event data. Use the temp_buffer
1670 	 * to store the trace event for the tigger to use. It's recusive
1671 	 * safe and will not be recorded anywhere.
1672 	 */
1673 	if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1674 		*current_rb = temp_buffer;
1675 		entry = trace_buffer_lock_reserve(*current_rb,
1676 						  type, len, flags, pc);
1677 	}
1678 	return entry;
1679 }
1680 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1681 
1682 struct ring_buffer_event *
1683 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1684 				  int type, unsigned long len,
1685 				  unsigned long flags, int pc)
1686 {
1687 	*current_rb = global_trace.trace_buffer.buffer;
1688 	return trace_buffer_lock_reserve(*current_rb,
1689 					 type, len, flags, pc);
1690 }
1691 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1692 
1693 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1694 					struct ring_buffer_event *event,
1695 					unsigned long flags, int pc)
1696 {
1697 	__trace_buffer_unlock_commit(buffer, event, flags, pc);
1698 }
1699 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1700 
1701 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1702 				     struct ring_buffer_event *event,
1703 				     unsigned long flags, int pc,
1704 				     struct pt_regs *regs)
1705 {
1706 	__buffer_unlock_commit(buffer, event);
1707 
1708 	ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1709 	ftrace_trace_userstack(buffer, flags, pc);
1710 }
1711 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1712 
1713 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1714 					 struct ring_buffer_event *event)
1715 {
1716 	ring_buffer_discard_commit(buffer, event);
1717 }
1718 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1719 
1720 void
1721 trace_function(struct trace_array *tr,
1722 	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
1723 	       int pc)
1724 {
1725 	struct ftrace_event_call *call = &event_function;
1726 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
1727 	struct ring_buffer_event *event;
1728 	struct ftrace_entry *entry;
1729 
1730 	/* If we are reading the ring buffer, don't trace */
1731 	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1732 		return;
1733 
1734 	event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1735 					  flags, pc);
1736 	if (!event)
1737 		return;
1738 	entry	= ring_buffer_event_data(event);
1739 	entry->ip			= ip;
1740 	entry->parent_ip		= parent_ip;
1741 
1742 	if (!call_filter_check_discard(call, entry, buffer, event))
1743 		__buffer_unlock_commit(buffer, event);
1744 }
1745 
1746 #ifdef CONFIG_STACKTRACE
1747 
1748 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1749 struct ftrace_stack {
1750 	unsigned long		calls[FTRACE_STACK_MAX_ENTRIES];
1751 };
1752 
1753 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1754 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1755 
1756 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1757 				 unsigned long flags,
1758 				 int skip, int pc, struct pt_regs *regs)
1759 {
1760 	struct ftrace_event_call *call = &event_kernel_stack;
1761 	struct ring_buffer_event *event;
1762 	struct stack_entry *entry;
1763 	struct stack_trace trace;
1764 	int use_stack;
1765 	int size = FTRACE_STACK_ENTRIES;
1766 
1767 	trace.nr_entries	= 0;
1768 	trace.skip		= skip;
1769 
1770 	/*
1771 	 * Since events can happen in NMIs there's no safe way to
1772 	 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1773 	 * or NMI comes in, it will just have to use the default
1774 	 * FTRACE_STACK_SIZE.
1775 	 */
1776 	preempt_disable_notrace();
1777 
1778 	use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1779 	/*
1780 	 * We don't need any atomic variables, just a barrier.
1781 	 * If an interrupt comes in, we don't care, because it would
1782 	 * have exited and put the counter back to what we want.
1783 	 * We just need a barrier to keep gcc from moving things
1784 	 * around.
1785 	 */
1786 	barrier();
1787 	if (use_stack == 1) {
1788 		trace.entries		= this_cpu_ptr(ftrace_stack.calls);
1789 		trace.max_entries	= FTRACE_STACK_MAX_ENTRIES;
1790 
1791 		if (regs)
1792 			save_stack_trace_regs(regs, &trace);
1793 		else
1794 			save_stack_trace(&trace);
1795 
1796 		if (trace.nr_entries > size)
1797 			size = trace.nr_entries;
1798 	} else
1799 		/* From now on, use_stack is a boolean */
1800 		use_stack = 0;
1801 
1802 	size *= sizeof(unsigned long);
1803 
1804 	event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1805 					  sizeof(*entry) + size, flags, pc);
1806 	if (!event)
1807 		goto out;
1808 	entry = ring_buffer_event_data(event);
1809 
1810 	memset(&entry->caller, 0, size);
1811 
1812 	if (use_stack)
1813 		memcpy(&entry->caller, trace.entries,
1814 		       trace.nr_entries * sizeof(unsigned long));
1815 	else {
1816 		trace.max_entries	= FTRACE_STACK_ENTRIES;
1817 		trace.entries		= entry->caller;
1818 		if (regs)
1819 			save_stack_trace_regs(regs, &trace);
1820 		else
1821 			save_stack_trace(&trace);
1822 	}
1823 
1824 	entry->size = trace.nr_entries;
1825 
1826 	if (!call_filter_check_discard(call, entry, buffer, event))
1827 		__buffer_unlock_commit(buffer, event);
1828 
1829  out:
1830 	/* Again, don't let gcc optimize things here */
1831 	barrier();
1832 	__this_cpu_dec(ftrace_stack_reserve);
1833 	preempt_enable_notrace();
1834 
1835 }
1836 
1837 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1838 			     int skip, int pc, struct pt_regs *regs)
1839 {
1840 	if (!(trace_flags & TRACE_ITER_STACKTRACE))
1841 		return;
1842 
1843 	__ftrace_trace_stack(buffer, flags, skip, pc, regs);
1844 }
1845 
1846 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1847 			int skip, int pc)
1848 {
1849 	if (!(trace_flags & TRACE_ITER_STACKTRACE))
1850 		return;
1851 
1852 	__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1853 }
1854 
1855 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1856 		   int pc)
1857 {
1858 	__ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1859 }
1860 
1861 /**
1862  * trace_dump_stack - record a stack back trace in the trace buffer
1863  * @skip: Number of functions to skip (helper handlers)
1864  */
1865 void trace_dump_stack(int skip)
1866 {
1867 	unsigned long flags;
1868 
1869 	if (tracing_disabled || tracing_selftest_running)
1870 		return;
1871 
1872 	local_save_flags(flags);
1873 
1874 	/*
1875 	 * Skip 3 more, seems to get us at the caller of
1876 	 * this function.
1877 	 */
1878 	skip += 3;
1879 	__ftrace_trace_stack(global_trace.trace_buffer.buffer,
1880 			     flags, skip, preempt_count(), NULL);
1881 }
1882 
1883 static DEFINE_PER_CPU(int, user_stack_count);
1884 
1885 void
1886 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1887 {
1888 	struct ftrace_event_call *call = &event_user_stack;
1889 	struct ring_buffer_event *event;
1890 	struct userstack_entry *entry;
1891 	struct stack_trace trace;
1892 
1893 	if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1894 		return;
1895 
1896 	/*
1897 	 * NMIs can not handle page faults, even with fix ups.
1898 	 * The save user stack can (and often does) fault.
1899 	 */
1900 	if (unlikely(in_nmi()))
1901 		return;
1902 
1903 	/*
1904 	 * prevent recursion, since the user stack tracing may
1905 	 * trigger other kernel events.
1906 	 */
1907 	preempt_disable();
1908 	if (__this_cpu_read(user_stack_count))
1909 		goto out;
1910 
1911 	__this_cpu_inc(user_stack_count);
1912 
1913 	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1914 					  sizeof(*entry), flags, pc);
1915 	if (!event)
1916 		goto out_drop_count;
1917 	entry	= ring_buffer_event_data(event);
1918 
1919 	entry->tgid		= current->tgid;
1920 	memset(&entry->caller, 0, sizeof(entry->caller));
1921 
1922 	trace.nr_entries	= 0;
1923 	trace.max_entries	= FTRACE_STACK_ENTRIES;
1924 	trace.skip		= 0;
1925 	trace.entries		= entry->caller;
1926 
1927 	save_stack_trace_user(&trace);
1928 	if (!call_filter_check_discard(call, entry, buffer, event))
1929 		__buffer_unlock_commit(buffer, event);
1930 
1931  out_drop_count:
1932 	__this_cpu_dec(user_stack_count);
1933  out:
1934 	preempt_enable();
1935 }
1936 
1937 #ifdef UNUSED
1938 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1939 {
1940 	ftrace_trace_userstack(tr, flags, preempt_count());
1941 }
1942 #endif /* UNUSED */
1943 
1944 #endif /* CONFIG_STACKTRACE */
1945 
1946 /* created for use with alloc_percpu */
1947 struct trace_buffer_struct {
1948 	char buffer[TRACE_BUF_SIZE];
1949 };
1950 
1951 static struct trace_buffer_struct *trace_percpu_buffer;
1952 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1953 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1954 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1955 
1956 /*
1957  * The buffer used is dependent on the context. There is a per cpu
1958  * buffer for normal context, softirq contex, hard irq context and
1959  * for NMI context. Thise allows for lockless recording.
1960  *
1961  * Note, if the buffers failed to be allocated, then this returns NULL
1962  */
1963 static char *get_trace_buf(void)
1964 {
1965 	struct trace_buffer_struct *percpu_buffer;
1966 
1967 	/*
1968 	 * If we have allocated per cpu buffers, then we do not
1969 	 * need to do any locking.
1970 	 */
1971 	if (in_nmi())
1972 		percpu_buffer = trace_percpu_nmi_buffer;
1973 	else if (in_irq())
1974 		percpu_buffer = trace_percpu_irq_buffer;
1975 	else if (in_softirq())
1976 		percpu_buffer = trace_percpu_sirq_buffer;
1977 	else
1978 		percpu_buffer = trace_percpu_buffer;
1979 
1980 	if (!percpu_buffer)
1981 		return NULL;
1982 
1983 	return this_cpu_ptr(&percpu_buffer->buffer[0]);
1984 }
1985 
1986 static int alloc_percpu_trace_buffer(void)
1987 {
1988 	struct trace_buffer_struct *buffers;
1989 	struct trace_buffer_struct *sirq_buffers;
1990 	struct trace_buffer_struct *irq_buffers;
1991 	struct trace_buffer_struct *nmi_buffers;
1992 
1993 	buffers = alloc_percpu(struct trace_buffer_struct);
1994 	if (!buffers)
1995 		goto err_warn;
1996 
1997 	sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1998 	if (!sirq_buffers)
1999 		goto err_sirq;
2000 
2001 	irq_buffers = alloc_percpu(struct trace_buffer_struct);
2002 	if (!irq_buffers)
2003 		goto err_irq;
2004 
2005 	nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2006 	if (!nmi_buffers)
2007 		goto err_nmi;
2008 
2009 	trace_percpu_buffer = buffers;
2010 	trace_percpu_sirq_buffer = sirq_buffers;
2011 	trace_percpu_irq_buffer = irq_buffers;
2012 	trace_percpu_nmi_buffer = nmi_buffers;
2013 
2014 	return 0;
2015 
2016  err_nmi:
2017 	free_percpu(irq_buffers);
2018  err_irq:
2019 	free_percpu(sirq_buffers);
2020  err_sirq:
2021 	free_percpu(buffers);
2022  err_warn:
2023 	WARN(1, "Could not allocate percpu trace_printk buffer");
2024 	return -ENOMEM;
2025 }
2026 
2027 static int buffers_allocated;
2028 
2029 void trace_printk_init_buffers(void)
2030 {
2031 	if (buffers_allocated)
2032 		return;
2033 
2034 	if (alloc_percpu_trace_buffer())
2035 		return;
2036 
2037 	/* trace_printk() is for debug use only. Don't use it in production. */
2038 
2039 	pr_warning("\n");
2040 	pr_warning("**********************************************************\n");
2041 	pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2042 	pr_warning("**                                                      **\n");
2043 	pr_warning("** trace_printk() being used. Allocating extra memory.  **\n");
2044 	pr_warning("**                                                      **\n");
2045 	pr_warning("** This means that this is a DEBUG kernel and it is     **\n");
2046 	pr_warning("** unsafe for production use.                           **\n");
2047 	pr_warning("**                                                      **\n");
2048 	pr_warning("** If you see this message and you are not debugging    **\n");
2049 	pr_warning("** the kernel, report this immediately to your vendor!  **\n");
2050 	pr_warning("**                                                      **\n");
2051 	pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2052 	pr_warning("**********************************************************\n");
2053 
2054 	/* Expand the buffers to set size */
2055 	tracing_update_buffers();
2056 
2057 	buffers_allocated = 1;
2058 
2059 	/*
2060 	 * trace_printk_init_buffers() can be called by modules.
2061 	 * If that happens, then we need to start cmdline recording
2062 	 * directly here. If the global_trace.buffer is already
2063 	 * allocated here, then this was called by module code.
2064 	 */
2065 	if (global_trace.trace_buffer.buffer)
2066 		tracing_start_cmdline_record();
2067 }
2068 
2069 void trace_printk_start_comm(void)
2070 {
2071 	/* Start tracing comms if trace printk is set */
2072 	if (!buffers_allocated)
2073 		return;
2074 	tracing_start_cmdline_record();
2075 }
2076 
2077 static void trace_printk_start_stop_comm(int enabled)
2078 {
2079 	if (!buffers_allocated)
2080 		return;
2081 
2082 	if (enabled)
2083 		tracing_start_cmdline_record();
2084 	else
2085 		tracing_stop_cmdline_record();
2086 }
2087 
2088 /**
2089  * trace_vbprintk - write binary msg to tracing buffer
2090  *
2091  */
2092 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2093 {
2094 	struct ftrace_event_call *call = &event_bprint;
2095 	struct ring_buffer_event *event;
2096 	struct ring_buffer *buffer;
2097 	struct trace_array *tr = &global_trace;
2098 	struct bprint_entry *entry;
2099 	unsigned long flags;
2100 	char *tbuffer;
2101 	int len = 0, size, pc;
2102 
2103 	if (unlikely(tracing_selftest_running || tracing_disabled))
2104 		return 0;
2105 
2106 	/* Don't pollute graph traces with trace_vprintk internals */
2107 	pause_graph_tracing();
2108 
2109 	pc = preempt_count();
2110 	preempt_disable_notrace();
2111 
2112 	tbuffer = get_trace_buf();
2113 	if (!tbuffer) {
2114 		len = 0;
2115 		goto out;
2116 	}
2117 
2118 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2119 
2120 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2121 		goto out;
2122 
2123 	local_save_flags(flags);
2124 	size = sizeof(*entry) + sizeof(u32) * len;
2125 	buffer = tr->trace_buffer.buffer;
2126 	event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2127 					  flags, pc);
2128 	if (!event)
2129 		goto out;
2130 	entry = ring_buffer_event_data(event);
2131 	entry->ip			= ip;
2132 	entry->fmt			= fmt;
2133 
2134 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2135 	if (!call_filter_check_discard(call, entry, buffer, event)) {
2136 		__buffer_unlock_commit(buffer, event);
2137 		ftrace_trace_stack(buffer, flags, 6, pc);
2138 	}
2139 
2140 out:
2141 	preempt_enable_notrace();
2142 	unpause_graph_tracing();
2143 
2144 	return len;
2145 }
2146 EXPORT_SYMBOL_GPL(trace_vbprintk);
2147 
2148 static int
2149 __trace_array_vprintk(struct ring_buffer *buffer,
2150 		      unsigned long ip, const char *fmt, va_list args)
2151 {
2152 	struct ftrace_event_call *call = &event_print;
2153 	struct ring_buffer_event *event;
2154 	int len = 0, size, pc;
2155 	struct print_entry *entry;
2156 	unsigned long flags;
2157 	char *tbuffer;
2158 
2159 	if (tracing_disabled || tracing_selftest_running)
2160 		return 0;
2161 
2162 	/* Don't pollute graph traces with trace_vprintk internals */
2163 	pause_graph_tracing();
2164 
2165 	pc = preempt_count();
2166 	preempt_disable_notrace();
2167 
2168 
2169 	tbuffer = get_trace_buf();
2170 	if (!tbuffer) {
2171 		len = 0;
2172 		goto out;
2173 	}
2174 
2175 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2176 
2177 	local_save_flags(flags);
2178 	size = sizeof(*entry) + len + 1;
2179 	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2180 					  flags, pc);
2181 	if (!event)
2182 		goto out;
2183 	entry = ring_buffer_event_data(event);
2184 	entry->ip = ip;
2185 
2186 	memcpy(&entry->buf, tbuffer, len + 1);
2187 	if (!call_filter_check_discard(call, entry, buffer, event)) {
2188 		__buffer_unlock_commit(buffer, event);
2189 		ftrace_trace_stack(buffer, flags, 6, pc);
2190 	}
2191  out:
2192 	preempt_enable_notrace();
2193 	unpause_graph_tracing();
2194 
2195 	return len;
2196 }
2197 
2198 int trace_array_vprintk(struct trace_array *tr,
2199 			unsigned long ip, const char *fmt, va_list args)
2200 {
2201 	return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2202 }
2203 
2204 int trace_array_printk(struct trace_array *tr,
2205 		       unsigned long ip, const char *fmt, ...)
2206 {
2207 	int ret;
2208 	va_list ap;
2209 
2210 	if (!(trace_flags & TRACE_ITER_PRINTK))
2211 		return 0;
2212 
2213 	va_start(ap, fmt);
2214 	ret = trace_array_vprintk(tr, ip, fmt, ap);
2215 	va_end(ap);
2216 	return ret;
2217 }
2218 
2219 int trace_array_printk_buf(struct ring_buffer *buffer,
2220 			   unsigned long ip, const char *fmt, ...)
2221 {
2222 	int ret;
2223 	va_list ap;
2224 
2225 	if (!(trace_flags & TRACE_ITER_PRINTK))
2226 		return 0;
2227 
2228 	va_start(ap, fmt);
2229 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2230 	va_end(ap);
2231 	return ret;
2232 }
2233 
2234 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2235 {
2236 	return trace_array_vprintk(&global_trace, ip, fmt, args);
2237 }
2238 EXPORT_SYMBOL_GPL(trace_vprintk);
2239 
2240 static void trace_iterator_increment(struct trace_iterator *iter)
2241 {
2242 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2243 
2244 	iter->idx++;
2245 	if (buf_iter)
2246 		ring_buffer_read(buf_iter, NULL);
2247 }
2248 
2249 static struct trace_entry *
2250 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2251 		unsigned long *lost_events)
2252 {
2253 	struct ring_buffer_event *event;
2254 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2255 
2256 	if (buf_iter)
2257 		event = ring_buffer_iter_peek(buf_iter, ts);
2258 	else
2259 		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2260 					 lost_events);
2261 
2262 	if (event) {
2263 		iter->ent_size = ring_buffer_event_length(event);
2264 		return ring_buffer_event_data(event);
2265 	}
2266 	iter->ent_size = 0;
2267 	return NULL;
2268 }
2269 
2270 static struct trace_entry *
2271 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2272 		  unsigned long *missing_events, u64 *ent_ts)
2273 {
2274 	struct ring_buffer *buffer = iter->trace_buffer->buffer;
2275 	struct trace_entry *ent, *next = NULL;
2276 	unsigned long lost_events = 0, next_lost = 0;
2277 	int cpu_file = iter->cpu_file;
2278 	u64 next_ts = 0, ts;
2279 	int next_cpu = -1;
2280 	int next_size = 0;
2281 	int cpu;
2282 
2283 	/*
2284 	 * If we are in a per_cpu trace file, don't bother by iterating over
2285 	 * all cpu and peek directly.
2286 	 */
2287 	if (cpu_file > RING_BUFFER_ALL_CPUS) {
2288 		if (ring_buffer_empty_cpu(buffer, cpu_file))
2289 			return NULL;
2290 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2291 		if (ent_cpu)
2292 			*ent_cpu = cpu_file;
2293 
2294 		return ent;
2295 	}
2296 
2297 	for_each_tracing_cpu(cpu) {
2298 
2299 		if (ring_buffer_empty_cpu(buffer, cpu))
2300 			continue;
2301 
2302 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2303 
2304 		/*
2305 		 * Pick the entry with the smallest timestamp:
2306 		 */
2307 		if (ent && (!next || ts < next_ts)) {
2308 			next = ent;
2309 			next_cpu = cpu;
2310 			next_ts = ts;
2311 			next_lost = lost_events;
2312 			next_size = iter->ent_size;
2313 		}
2314 	}
2315 
2316 	iter->ent_size = next_size;
2317 
2318 	if (ent_cpu)
2319 		*ent_cpu = next_cpu;
2320 
2321 	if (ent_ts)
2322 		*ent_ts = next_ts;
2323 
2324 	if (missing_events)
2325 		*missing_events = next_lost;
2326 
2327 	return next;
2328 }
2329 
2330 /* Find the next real entry, without updating the iterator itself */
2331 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2332 					  int *ent_cpu, u64 *ent_ts)
2333 {
2334 	return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2335 }
2336 
2337 /* Find the next real entry, and increment the iterator to the next entry */
2338 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2339 {
2340 	iter->ent = __find_next_entry(iter, &iter->cpu,
2341 				      &iter->lost_events, &iter->ts);
2342 
2343 	if (iter->ent)
2344 		trace_iterator_increment(iter);
2345 
2346 	return iter->ent ? iter : NULL;
2347 }
2348 
2349 static void trace_consume(struct trace_iterator *iter)
2350 {
2351 	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2352 			    &iter->lost_events);
2353 }
2354 
2355 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2356 {
2357 	struct trace_iterator *iter = m->private;
2358 	int i = (int)*pos;
2359 	void *ent;
2360 
2361 	WARN_ON_ONCE(iter->leftover);
2362 
2363 	(*pos)++;
2364 
2365 	/* can't go backwards */
2366 	if (iter->idx > i)
2367 		return NULL;
2368 
2369 	if (iter->idx < 0)
2370 		ent = trace_find_next_entry_inc(iter);
2371 	else
2372 		ent = iter;
2373 
2374 	while (ent && iter->idx < i)
2375 		ent = trace_find_next_entry_inc(iter);
2376 
2377 	iter->pos = *pos;
2378 
2379 	return ent;
2380 }
2381 
2382 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2383 {
2384 	struct ring_buffer_event *event;
2385 	struct ring_buffer_iter *buf_iter;
2386 	unsigned long entries = 0;
2387 	u64 ts;
2388 
2389 	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2390 
2391 	buf_iter = trace_buffer_iter(iter, cpu);
2392 	if (!buf_iter)
2393 		return;
2394 
2395 	ring_buffer_iter_reset(buf_iter);
2396 
2397 	/*
2398 	 * We could have the case with the max latency tracers
2399 	 * that a reset never took place on a cpu. This is evident
2400 	 * by the timestamp being before the start of the buffer.
2401 	 */
2402 	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2403 		if (ts >= iter->trace_buffer->time_start)
2404 			break;
2405 		entries++;
2406 		ring_buffer_read(buf_iter, NULL);
2407 	}
2408 
2409 	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2410 }
2411 
2412 /*
2413  * The current tracer is copied to avoid a global locking
2414  * all around.
2415  */
2416 static void *s_start(struct seq_file *m, loff_t *pos)
2417 {
2418 	struct trace_iterator *iter = m->private;
2419 	struct trace_array *tr = iter->tr;
2420 	int cpu_file = iter->cpu_file;
2421 	void *p = NULL;
2422 	loff_t l = 0;
2423 	int cpu;
2424 
2425 	/*
2426 	 * copy the tracer to avoid using a global lock all around.
2427 	 * iter->trace is a copy of current_trace, the pointer to the
2428 	 * name may be used instead of a strcmp(), as iter->trace->name
2429 	 * will point to the same string as current_trace->name.
2430 	 */
2431 	mutex_lock(&trace_types_lock);
2432 	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2433 		*iter->trace = *tr->current_trace;
2434 	mutex_unlock(&trace_types_lock);
2435 
2436 #ifdef CONFIG_TRACER_MAX_TRACE
2437 	if (iter->snapshot && iter->trace->use_max_tr)
2438 		return ERR_PTR(-EBUSY);
2439 #endif
2440 
2441 	if (!iter->snapshot)
2442 		atomic_inc(&trace_record_cmdline_disabled);
2443 
2444 	if (*pos != iter->pos) {
2445 		iter->ent = NULL;
2446 		iter->cpu = 0;
2447 		iter->idx = -1;
2448 
2449 		if (cpu_file == RING_BUFFER_ALL_CPUS) {
2450 			for_each_tracing_cpu(cpu)
2451 				tracing_iter_reset(iter, cpu);
2452 		} else
2453 			tracing_iter_reset(iter, cpu_file);
2454 
2455 		iter->leftover = 0;
2456 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2457 			;
2458 
2459 	} else {
2460 		/*
2461 		 * If we overflowed the seq_file before, then we want
2462 		 * to just reuse the trace_seq buffer again.
2463 		 */
2464 		if (iter->leftover)
2465 			p = iter;
2466 		else {
2467 			l = *pos - 1;
2468 			p = s_next(m, p, &l);
2469 		}
2470 	}
2471 
2472 	trace_event_read_lock();
2473 	trace_access_lock(cpu_file);
2474 	return p;
2475 }
2476 
2477 static void s_stop(struct seq_file *m, void *p)
2478 {
2479 	struct trace_iterator *iter = m->private;
2480 
2481 #ifdef CONFIG_TRACER_MAX_TRACE
2482 	if (iter->snapshot && iter->trace->use_max_tr)
2483 		return;
2484 #endif
2485 
2486 	if (!iter->snapshot)
2487 		atomic_dec(&trace_record_cmdline_disabled);
2488 
2489 	trace_access_unlock(iter->cpu_file);
2490 	trace_event_read_unlock();
2491 }
2492 
2493 static void
2494 get_total_entries(struct trace_buffer *buf,
2495 		  unsigned long *total, unsigned long *entries)
2496 {
2497 	unsigned long count;
2498 	int cpu;
2499 
2500 	*total = 0;
2501 	*entries = 0;
2502 
2503 	for_each_tracing_cpu(cpu) {
2504 		count = ring_buffer_entries_cpu(buf->buffer, cpu);
2505 		/*
2506 		 * If this buffer has skipped entries, then we hold all
2507 		 * entries for the trace and we need to ignore the
2508 		 * ones before the time stamp.
2509 		 */
2510 		if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2511 			count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2512 			/* total is the same as the entries */
2513 			*total += count;
2514 		} else
2515 			*total += count +
2516 				ring_buffer_overrun_cpu(buf->buffer, cpu);
2517 		*entries += count;
2518 	}
2519 }
2520 
2521 static void print_lat_help_header(struct seq_file *m)
2522 {
2523 	seq_puts(m, "#                  _------=> CPU#            \n"
2524 		    "#                 / _-----=> irqs-off        \n"
2525 		    "#                | / _----=> need-resched    \n"
2526 		    "#                || / _---=> hardirq/softirq \n"
2527 		    "#                ||| / _--=> preempt-depth   \n"
2528 		    "#                |||| /     delay            \n"
2529 		    "#  cmd     pid   ||||| time  |   caller      \n"
2530 		    "#     \\   /      |||||  \\    |   /         \n");
2531 }
2532 
2533 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2534 {
2535 	unsigned long total;
2536 	unsigned long entries;
2537 
2538 	get_total_entries(buf, &total, &entries);
2539 	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2540 		   entries, total, num_online_cpus());
2541 	seq_puts(m, "#\n");
2542 }
2543 
2544 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2545 {
2546 	print_event_info(buf, m);
2547 	seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n"
2548 		    "#              | |       |          |         |\n");
2549 }
2550 
2551 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2552 {
2553 	print_event_info(buf, m);
2554 	seq_puts(m, "#                              _-----=> irqs-off\n"
2555 		    "#                             / _----=> need-resched\n"
2556 		    "#                            | / _---=> hardirq/softirq\n"
2557 		    "#                            || / _--=> preempt-depth\n"
2558 		    "#                            ||| /     delay\n"
2559 		    "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n"
2560 		    "#              | |       |   ||||       |         |\n");
2561 }
2562 
2563 void
2564 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2565 {
2566 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2567 	struct trace_buffer *buf = iter->trace_buffer;
2568 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2569 	struct tracer *type = iter->trace;
2570 	unsigned long entries;
2571 	unsigned long total;
2572 	const char *name = "preemption";
2573 
2574 	name = type->name;
2575 
2576 	get_total_entries(buf, &total, &entries);
2577 
2578 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2579 		   name, UTS_RELEASE);
2580 	seq_puts(m, "# -----------------------------------"
2581 		 "---------------------------------\n");
2582 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2583 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2584 		   nsecs_to_usecs(data->saved_latency),
2585 		   entries,
2586 		   total,
2587 		   buf->cpu,
2588 #if defined(CONFIG_PREEMPT_NONE)
2589 		   "server",
2590 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2591 		   "desktop",
2592 #elif defined(CONFIG_PREEMPT)
2593 		   "preempt",
2594 #else
2595 		   "unknown",
2596 #endif
2597 		   /* These are reserved for later use */
2598 		   0, 0, 0, 0);
2599 #ifdef CONFIG_SMP
2600 	seq_printf(m, " #P:%d)\n", num_online_cpus());
2601 #else
2602 	seq_puts(m, ")\n");
2603 #endif
2604 	seq_puts(m, "#    -----------------\n");
2605 	seq_printf(m, "#    | task: %.16s-%d "
2606 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2607 		   data->comm, data->pid,
2608 		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2609 		   data->policy, data->rt_priority);
2610 	seq_puts(m, "#    -----------------\n");
2611 
2612 	if (data->critical_start) {
2613 		seq_puts(m, "#  => started at: ");
2614 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2615 		trace_print_seq(m, &iter->seq);
2616 		seq_puts(m, "\n#  => ended at:   ");
2617 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2618 		trace_print_seq(m, &iter->seq);
2619 		seq_puts(m, "\n#\n");
2620 	}
2621 
2622 	seq_puts(m, "#\n");
2623 }
2624 
2625 static void test_cpu_buff_start(struct trace_iterator *iter)
2626 {
2627 	struct trace_seq *s = &iter->seq;
2628 
2629 	if (!(trace_flags & TRACE_ITER_ANNOTATE))
2630 		return;
2631 
2632 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2633 		return;
2634 
2635 	if (cpumask_test_cpu(iter->cpu, iter->started))
2636 		return;
2637 
2638 	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2639 		return;
2640 
2641 	cpumask_set_cpu(iter->cpu, iter->started);
2642 
2643 	/* Don't print started cpu buffer for the first entry of the trace */
2644 	if (iter->idx > 1)
2645 		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2646 				iter->cpu);
2647 }
2648 
2649 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2650 {
2651 	struct trace_seq *s = &iter->seq;
2652 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2653 	struct trace_entry *entry;
2654 	struct trace_event *event;
2655 
2656 	entry = iter->ent;
2657 
2658 	test_cpu_buff_start(iter);
2659 
2660 	event = ftrace_find_event(entry->type);
2661 
2662 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2663 		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2664 			trace_print_lat_context(iter);
2665 		else
2666 			trace_print_context(iter);
2667 	}
2668 
2669 	if (trace_seq_has_overflowed(s))
2670 		return TRACE_TYPE_PARTIAL_LINE;
2671 
2672 	if (event)
2673 		return event->funcs->trace(iter, sym_flags, event);
2674 
2675 	trace_seq_printf(s, "Unknown type %d\n", entry->type);
2676 
2677 	return trace_handle_return(s);
2678 }
2679 
2680 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2681 {
2682 	struct trace_seq *s = &iter->seq;
2683 	struct trace_entry *entry;
2684 	struct trace_event *event;
2685 
2686 	entry = iter->ent;
2687 
2688 	if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2689 		trace_seq_printf(s, "%d %d %llu ",
2690 				 entry->pid, iter->cpu, iter->ts);
2691 
2692 	if (trace_seq_has_overflowed(s))
2693 		return TRACE_TYPE_PARTIAL_LINE;
2694 
2695 	event = ftrace_find_event(entry->type);
2696 	if (event)
2697 		return event->funcs->raw(iter, 0, event);
2698 
2699 	trace_seq_printf(s, "%d ?\n", entry->type);
2700 
2701 	return trace_handle_return(s);
2702 }
2703 
2704 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2705 {
2706 	struct trace_seq *s = &iter->seq;
2707 	unsigned char newline = '\n';
2708 	struct trace_entry *entry;
2709 	struct trace_event *event;
2710 
2711 	entry = iter->ent;
2712 
2713 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2714 		SEQ_PUT_HEX_FIELD(s, entry->pid);
2715 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
2716 		SEQ_PUT_HEX_FIELD(s, iter->ts);
2717 		if (trace_seq_has_overflowed(s))
2718 			return TRACE_TYPE_PARTIAL_LINE;
2719 	}
2720 
2721 	event = ftrace_find_event(entry->type);
2722 	if (event) {
2723 		enum print_line_t ret = event->funcs->hex(iter, 0, event);
2724 		if (ret != TRACE_TYPE_HANDLED)
2725 			return ret;
2726 	}
2727 
2728 	SEQ_PUT_FIELD(s, newline);
2729 
2730 	return trace_handle_return(s);
2731 }
2732 
2733 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2734 {
2735 	struct trace_seq *s = &iter->seq;
2736 	struct trace_entry *entry;
2737 	struct trace_event *event;
2738 
2739 	entry = iter->ent;
2740 
2741 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2742 		SEQ_PUT_FIELD(s, entry->pid);
2743 		SEQ_PUT_FIELD(s, iter->cpu);
2744 		SEQ_PUT_FIELD(s, iter->ts);
2745 		if (trace_seq_has_overflowed(s))
2746 			return TRACE_TYPE_PARTIAL_LINE;
2747 	}
2748 
2749 	event = ftrace_find_event(entry->type);
2750 	return event ? event->funcs->binary(iter, 0, event) :
2751 		TRACE_TYPE_HANDLED;
2752 }
2753 
2754 int trace_empty(struct trace_iterator *iter)
2755 {
2756 	struct ring_buffer_iter *buf_iter;
2757 	int cpu;
2758 
2759 	/* If we are looking at one CPU buffer, only check that one */
2760 	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2761 		cpu = iter->cpu_file;
2762 		buf_iter = trace_buffer_iter(iter, cpu);
2763 		if (buf_iter) {
2764 			if (!ring_buffer_iter_empty(buf_iter))
2765 				return 0;
2766 		} else {
2767 			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2768 				return 0;
2769 		}
2770 		return 1;
2771 	}
2772 
2773 	for_each_tracing_cpu(cpu) {
2774 		buf_iter = trace_buffer_iter(iter, cpu);
2775 		if (buf_iter) {
2776 			if (!ring_buffer_iter_empty(buf_iter))
2777 				return 0;
2778 		} else {
2779 			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2780 				return 0;
2781 		}
2782 	}
2783 
2784 	return 1;
2785 }
2786 
2787 /*  Called with trace_event_read_lock() held. */
2788 enum print_line_t print_trace_line(struct trace_iterator *iter)
2789 {
2790 	enum print_line_t ret;
2791 
2792 	if (iter->lost_events) {
2793 		trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2794 				 iter->cpu, iter->lost_events);
2795 		if (trace_seq_has_overflowed(&iter->seq))
2796 			return TRACE_TYPE_PARTIAL_LINE;
2797 	}
2798 
2799 	if (iter->trace && iter->trace->print_line) {
2800 		ret = iter->trace->print_line(iter);
2801 		if (ret != TRACE_TYPE_UNHANDLED)
2802 			return ret;
2803 	}
2804 
2805 	if (iter->ent->type == TRACE_BPUTS &&
2806 			trace_flags & TRACE_ITER_PRINTK &&
2807 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2808 		return trace_print_bputs_msg_only(iter);
2809 
2810 	if (iter->ent->type == TRACE_BPRINT &&
2811 			trace_flags & TRACE_ITER_PRINTK &&
2812 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2813 		return trace_print_bprintk_msg_only(iter);
2814 
2815 	if (iter->ent->type == TRACE_PRINT &&
2816 			trace_flags & TRACE_ITER_PRINTK &&
2817 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2818 		return trace_print_printk_msg_only(iter);
2819 
2820 	if (trace_flags & TRACE_ITER_BIN)
2821 		return print_bin_fmt(iter);
2822 
2823 	if (trace_flags & TRACE_ITER_HEX)
2824 		return print_hex_fmt(iter);
2825 
2826 	if (trace_flags & TRACE_ITER_RAW)
2827 		return print_raw_fmt(iter);
2828 
2829 	return print_trace_fmt(iter);
2830 }
2831 
2832 void trace_latency_header(struct seq_file *m)
2833 {
2834 	struct trace_iterator *iter = m->private;
2835 
2836 	/* print nothing if the buffers are empty */
2837 	if (trace_empty(iter))
2838 		return;
2839 
2840 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2841 		print_trace_header(m, iter);
2842 
2843 	if (!(trace_flags & TRACE_ITER_VERBOSE))
2844 		print_lat_help_header(m);
2845 }
2846 
2847 void trace_default_header(struct seq_file *m)
2848 {
2849 	struct trace_iterator *iter = m->private;
2850 
2851 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2852 		return;
2853 
2854 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2855 		/* print nothing if the buffers are empty */
2856 		if (trace_empty(iter))
2857 			return;
2858 		print_trace_header(m, iter);
2859 		if (!(trace_flags & TRACE_ITER_VERBOSE))
2860 			print_lat_help_header(m);
2861 	} else {
2862 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2863 			if (trace_flags & TRACE_ITER_IRQ_INFO)
2864 				print_func_help_header_irq(iter->trace_buffer, m);
2865 			else
2866 				print_func_help_header(iter->trace_buffer, m);
2867 		}
2868 	}
2869 }
2870 
2871 static void test_ftrace_alive(struct seq_file *m)
2872 {
2873 	if (!ftrace_is_dead())
2874 		return;
2875 	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2876 		    "#          MAY BE MISSING FUNCTION EVENTS\n");
2877 }
2878 
2879 #ifdef CONFIG_TRACER_MAX_TRACE
2880 static void show_snapshot_main_help(struct seq_file *m)
2881 {
2882 	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2883 		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2884 		    "#                      Takes a snapshot of the main buffer.\n"
2885 		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2886 		    "#                      (Doesn't have to be '2' works with any number that\n"
2887 		    "#                       is not a '0' or '1')\n");
2888 }
2889 
2890 static void show_snapshot_percpu_help(struct seq_file *m)
2891 {
2892 	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2893 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2894 	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2895 		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
2896 #else
2897 	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2898 		    "#                     Must use main snapshot file to allocate.\n");
2899 #endif
2900 	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2901 		    "#                      (Doesn't have to be '2' works with any number that\n"
2902 		    "#                       is not a '0' or '1')\n");
2903 }
2904 
2905 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2906 {
2907 	if (iter->tr->allocated_snapshot)
2908 		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2909 	else
2910 		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2911 
2912 	seq_puts(m, "# Snapshot commands:\n");
2913 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2914 		show_snapshot_main_help(m);
2915 	else
2916 		show_snapshot_percpu_help(m);
2917 }
2918 #else
2919 /* Should never be called */
2920 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2921 #endif
2922 
2923 static int s_show(struct seq_file *m, void *v)
2924 {
2925 	struct trace_iterator *iter = v;
2926 	int ret;
2927 
2928 	if (iter->ent == NULL) {
2929 		if (iter->tr) {
2930 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
2931 			seq_puts(m, "#\n");
2932 			test_ftrace_alive(m);
2933 		}
2934 		if (iter->snapshot && trace_empty(iter))
2935 			print_snapshot_help(m, iter);
2936 		else if (iter->trace && iter->trace->print_header)
2937 			iter->trace->print_header(m);
2938 		else
2939 			trace_default_header(m);
2940 
2941 	} else if (iter->leftover) {
2942 		/*
2943 		 * If we filled the seq_file buffer earlier, we
2944 		 * want to just show it now.
2945 		 */
2946 		ret = trace_print_seq(m, &iter->seq);
2947 
2948 		/* ret should this time be zero, but you never know */
2949 		iter->leftover = ret;
2950 
2951 	} else {
2952 		print_trace_line(iter);
2953 		ret = trace_print_seq(m, &iter->seq);
2954 		/*
2955 		 * If we overflow the seq_file buffer, then it will
2956 		 * ask us for this data again at start up.
2957 		 * Use that instead.
2958 		 *  ret is 0 if seq_file write succeeded.
2959 		 *        -1 otherwise.
2960 		 */
2961 		iter->leftover = ret;
2962 	}
2963 
2964 	return 0;
2965 }
2966 
2967 /*
2968  * Should be used after trace_array_get(), trace_types_lock
2969  * ensures that i_cdev was already initialized.
2970  */
2971 static inline int tracing_get_cpu(struct inode *inode)
2972 {
2973 	if (inode->i_cdev) /* See trace_create_cpu_file() */
2974 		return (long)inode->i_cdev - 1;
2975 	return RING_BUFFER_ALL_CPUS;
2976 }
2977 
2978 static const struct seq_operations tracer_seq_ops = {
2979 	.start		= s_start,
2980 	.next		= s_next,
2981 	.stop		= s_stop,
2982 	.show		= s_show,
2983 };
2984 
2985 static struct trace_iterator *
2986 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2987 {
2988 	struct trace_array *tr = inode->i_private;
2989 	struct trace_iterator *iter;
2990 	int cpu;
2991 
2992 	if (tracing_disabled)
2993 		return ERR_PTR(-ENODEV);
2994 
2995 	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2996 	if (!iter)
2997 		return ERR_PTR(-ENOMEM);
2998 
2999 	iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
3000 				    GFP_KERNEL);
3001 	if (!iter->buffer_iter)
3002 		goto release;
3003 
3004 	/*
3005 	 * We make a copy of the current tracer to avoid concurrent
3006 	 * changes on it while we are reading.
3007 	 */
3008 	mutex_lock(&trace_types_lock);
3009 	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3010 	if (!iter->trace)
3011 		goto fail;
3012 
3013 	*iter->trace = *tr->current_trace;
3014 
3015 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3016 		goto fail;
3017 
3018 	iter->tr = tr;
3019 
3020 #ifdef CONFIG_TRACER_MAX_TRACE
3021 	/* Currently only the top directory has a snapshot */
3022 	if (tr->current_trace->print_max || snapshot)
3023 		iter->trace_buffer = &tr->max_buffer;
3024 	else
3025 #endif
3026 		iter->trace_buffer = &tr->trace_buffer;
3027 	iter->snapshot = snapshot;
3028 	iter->pos = -1;
3029 	iter->cpu_file = tracing_get_cpu(inode);
3030 	mutex_init(&iter->mutex);
3031 
3032 	/* Notify the tracer early; before we stop tracing. */
3033 	if (iter->trace && iter->trace->open)
3034 		iter->trace->open(iter);
3035 
3036 	/* Annotate start of buffers if we had overruns */
3037 	if (ring_buffer_overruns(iter->trace_buffer->buffer))
3038 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
3039 
3040 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
3041 	if (trace_clocks[tr->clock_id].in_ns)
3042 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3043 
3044 	/* stop the trace while dumping if we are not opening "snapshot" */
3045 	if (!iter->snapshot)
3046 		tracing_stop_tr(tr);
3047 
3048 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3049 		for_each_tracing_cpu(cpu) {
3050 			iter->buffer_iter[cpu] =
3051 				ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3052 		}
3053 		ring_buffer_read_prepare_sync();
3054 		for_each_tracing_cpu(cpu) {
3055 			ring_buffer_read_start(iter->buffer_iter[cpu]);
3056 			tracing_iter_reset(iter, cpu);
3057 		}
3058 	} else {
3059 		cpu = iter->cpu_file;
3060 		iter->buffer_iter[cpu] =
3061 			ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3062 		ring_buffer_read_prepare_sync();
3063 		ring_buffer_read_start(iter->buffer_iter[cpu]);
3064 		tracing_iter_reset(iter, cpu);
3065 	}
3066 
3067 	mutex_unlock(&trace_types_lock);
3068 
3069 	return iter;
3070 
3071  fail:
3072 	mutex_unlock(&trace_types_lock);
3073 	kfree(iter->trace);
3074 	kfree(iter->buffer_iter);
3075 release:
3076 	seq_release_private(inode, file);
3077 	return ERR_PTR(-ENOMEM);
3078 }
3079 
3080 int tracing_open_generic(struct inode *inode, struct file *filp)
3081 {
3082 	if (tracing_disabled)
3083 		return -ENODEV;
3084 
3085 	filp->private_data = inode->i_private;
3086 	return 0;
3087 }
3088 
3089 bool tracing_is_disabled(void)
3090 {
3091 	return (tracing_disabled) ? true: false;
3092 }
3093 
3094 /*
3095  * Open and update trace_array ref count.
3096  * Must have the current trace_array passed to it.
3097  */
3098 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3099 {
3100 	struct trace_array *tr = inode->i_private;
3101 
3102 	if (tracing_disabled)
3103 		return -ENODEV;
3104 
3105 	if (trace_array_get(tr) < 0)
3106 		return -ENODEV;
3107 
3108 	filp->private_data = inode->i_private;
3109 
3110 	return 0;
3111 }
3112 
3113 static int tracing_release(struct inode *inode, struct file *file)
3114 {
3115 	struct trace_array *tr = inode->i_private;
3116 	struct seq_file *m = file->private_data;
3117 	struct trace_iterator *iter;
3118 	int cpu;
3119 
3120 	if (!(file->f_mode & FMODE_READ)) {
3121 		trace_array_put(tr);
3122 		return 0;
3123 	}
3124 
3125 	/* Writes do not use seq_file */
3126 	iter = m->private;
3127 	mutex_lock(&trace_types_lock);
3128 
3129 	for_each_tracing_cpu(cpu) {
3130 		if (iter->buffer_iter[cpu])
3131 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
3132 	}
3133 
3134 	if (iter->trace && iter->trace->close)
3135 		iter->trace->close(iter);
3136 
3137 	if (!iter->snapshot)
3138 		/* reenable tracing if it was previously enabled */
3139 		tracing_start_tr(tr);
3140 
3141 	__trace_array_put(tr);
3142 
3143 	mutex_unlock(&trace_types_lock);
3144 
3145 	mutex_destroy(&iter->mutex);
3146 	free_cpumask_var(iter->started);
3147 	kfree(iter->trace);
3148 	kfree(iter->buffer_iter);
3149 	seq_release_private(inode, file);
3150 
3151 	return 0;
3152 }
3153 
3154 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3155 {
3156 	struct trace_array *tr = inode->i_private;
3157 
3158 	trace_array_put(tr);
3159 	return 0;
3160 }
3161 
3162 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3163 {
3164 	struct trace_array *tr = inode->i_private;
3165 
3166 	trace_array_put(tr);
3167 
3168 	return single_release(inode, file);
3169 }
3170 
3171 static int tracing_open(struct inode *inode, struct file *file)
3172 {
3173 	struct trace_array *tr = inode->i_private;
3174 	struct trace_iterator *iter;
3175 	int ret = 0;
3176 
3177 	if (trace_array_get(tr) < 0)
3178 		return -ENODEV;
3179 
3180 	/* If this file was open for write, then erase contents */
3181 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3182 		int cpu = tracing_get_cpu(inode);
3183 
3184 		if (cpu == RING_BUFFER_ALL_CPUS)
3185 			tracing_reset_online_cpus(&tr->trace_buffer);
3186 		else
3187 			tracing_reset(&tr->trace_buffer, cpu);
3188 	}
3189 
3190 	if (file->f_mode & FMODE_READ) {
3191 		iter = __tracing_open(inode, file, false);
3192 		if (IS_ERR(iter))
3193 			ret = PTR_ERR(iter);
3194 		else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3195 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
3196 	}
3197 
3198 	if (ret < 0)
3199 		trace_array_put(tr);
3200 
3201 	return ret;
3202 }
3203 
3204 /*
3205  * Some tracers are not suitable for instance buffers.
3206  * A tracer is always available for the global array (toplevel)
3207  * or if it explicitly states that it is.
3208  */
3209 static bool
3210 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3211 {
3212 	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3213 }
3214 
3215 /* Find the next tracer that this trace array may use */
3216 static struct tracer *
3217 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3218 {
3219 	while (t && !trace_ok_for_array(t, tr))
3220 		t = t->next;
3221 
3222 	return t;
3223 }
3224 
3225 static void *
3226 t_next(struct seq_file *m, void *v, loff_t *pos)
3227 {
3228 	struct trace_array *tr = m->private;
3229 	struct tracer *t = v;
3230 
3231 	(*pos)++;
3232 
3233 	if (t)
3234 		t = get_tracer_for_array(tr, t->next);
3235 
3236 	return t;
3237 }
3238 
3239 static void *t_start(struct seq_file *m, loff_t *pos)
3240 {
3241 	struct trace_array *tr = m->private;
3242 	struct tracer *t;
3243 	loff_t l = 0;
3244 
3245 	mutex_lock(&trace_types_lock);
3246 
3247 	t = get_tracer_for_array(tr, trace_types);
3248 	for (; t && l < *pos; t = t_next(m, t, &l))
3249 			;
3250 
3251 	return t;
3252 }
3253 
3254 static void t_stop(struct seq_file *m, void *p)
3255 {
3256 	mutex_unlock(&trace_types_lock);
3257 }
3258 
3259 static int t_show(struct seq_file *m, void *v)
3260 {
3261 	struct tracer *t = v;
3262 
3263 	if (!t)
3264 		return 0;
3265 
3266 	seq_puts(m, t->name);
3267 	if (t->next)
3268 		seq_putc(m, ' ');
3269 	else
3270 		seq_putc(m, '\n');
3271 
3272 	return 0;
3273 }
3274 
3275 static const struct seq_operations show_traces_seq_ops = {
3276 	.start		= t_start,
3277 	.next		= t_next,
3278 	.stop		= t_stop,
3279 	.show		= t_show,
3280 };
3281 
3282 static int show_traces_open(struct inode *inode, struct file *file)
3283 {
3284 	struct trace_array *tr = inode->i_private;
3285 	struct seq_file *m;
3286 	int ret;
3287 
3288 	if (tracing_disabled)
3289 		return -ENODEV;
3290 
3291 	ret = seq_open(file, &show_traces_seq_ops);
3292 	if (ret)
3293 		return ret;
3294 
3295 	m = file->private_data;
3296 	m->private = tr;
3297 
3298 	return 0;
3299 }
3300 
3301 static ssize_t
3302 tracing_write_stub(struct file *filp, const char __user *ubuf,
3303 		   size_t count, loff_t *ppos)
3304 {
3305 	return count;
3306 }
3307 
3308 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3309 {
3310 	int ret;
3311 
3312 	if (file->f_mode & FMODE_READ)
3313 		ret = seq_lseek(file, offset, whence);
3314 	else
3315 		file->f_pos = ret = 0;
3316 
3317 	return ret;
3318 }
3319 
3320 static const struct file_operations tracing_fops = {
3321 	.open		= tracing_open,
3322 	.read		= seq_read,
3323 	.write		= tracing_write_stub,
3324 	.llseek		= tracing_lseek,
3325 	.release	= tracing_release,
3326 };
3327 
3328 static const struct file_operations show_traces_fops = {
3329 	.open		= show_traces_open,
3330 	.read		= seq_read,
3331 	.release	= seq_release,
3332 	.llseek		= seq_lseek,
3333 };
3334 
3335 /*
3336  * The tracer itself will not take this lock, but still we want
3337  * to provide a consistent cpumask to user-space:
3338  */
3339 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3340 
3341 /*
3342  * Temporary storage for the character representation of the
3343  * CPU bitmask (and one more byte for the newline):
3344  */
3345 static char mask_str[NR_CPUS + 1];
3346 
3347 static ssize_t
3348 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3349 		     size_t count, loff_t *ppos)
3350 {
3351 	struct trace_array *tr = file_inode(filp)->i_private;
3352 	int len;
3353 
3354 	mutex_lock(&tracing_cpumask_update_lock);
3355 
3356 	len = snprintf(mask_str, count, "%*pb\n",
3357 		       cpumask_pr_args(tr->tracing_cpumask));
3358 	if (len >= count) {
3359 		count = -EINVAL;
3360 		goto out_err;
3361 	}
3362 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3363 
3364 out_err:
3365 	mutex_unlock(&tracing_cpumask_update_lock);
3366 
3367 	return count;
3368 }
3369 
3370 static ssize_t
3371 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3372 		      size_t count, loff_t *ppos)
3373 {
3374 	struct trace_array *tr = file_inode(filp)->i_private;
3375 	cpumask_var_t tracing_cpumask_new;
3376 	int err, cpu;
3377 
3378 	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3379 		return -ENOMEM;
3380 
3381 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3382 	if (err)
3383 		goto err_unlock;
3384 
3385 	mutex_lock(&tracing_cpumask_update_lock);
3386 
3387 	local_irq_disable();
3388 	arch_spin_lock(&tr->max_lock);
3389 	for_each_tracing_cpu(cpu) {
3390 		/*
3391 		 * Increase/decrease the disabled counter if we are
3392 		 * about to flip a bit in the cpumask:
3393 		 */
3394 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3395 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3396 			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3397 			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3398 		}
3399 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3400 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3401 			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3402 			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3403 		}
3404 	}
3405 	arch_spin_unlock(&tr->max_lock);
3406 	local_irq_enable();
3407 
3408 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3409 
3410 	mutex_unlock(&tracing_cpumask_update_lock);
3411 	free_cpumask_var(tracing_cpumask_new);
3412 
3413 	return count;
3414 
3415 err_unlock:
3416 	free_cpumask_var(tracing_cpumask_new);
3417 
3418 	return err;
3419 }
3420 
3421 static const struct file_operations tracing_cpumask_fops = {
3422 	.open		= tracing_open_generic_tr,
3423 	.read		= tracing_cpumask_read,
3424 	.write		= tracing_cpumask_write,
3425 	.release	= tracing_release_generic_tr,
3426 	.llseek		= generic_file_llseek,
3427 };
3428 
3429 static int tracing_trace_options_show(struct seq_file *m, void *v)
3430 {
3431 	struct tracer_opt *trace_opts;
3432 	struct trace_array *tr = m->private;
3433 	u32 tracer_flags;
3434 	int i;
3435 
3436 	mutex_lock(&trace_types_lock);
3437 	tracer_flags = tr->current_trace->flags->val;
3438 	trace_opts = tr->current_trace->flags->opts;
3439 
3440 	for (i = 0; trace_options[i]; i++) {
3441 		if (trace_flags & (1 << i))
3442 			seq_printf(m, "%s\n", trace_options[i]);
3443 		else
3444 			seq_printf(m, "no%s\n", trace_options[i]);
3445 	}
3446 
3447 	for (i = 0; trace_opts[i].name; i++) {
3448 		if (tracer_flags & trace_opts[i].bit)
3449 			seq_printf(m, "%s\n", trace_opts[i].name);
3450 		else
3451 			seq_printf(m, "no%s\n", trace_opts[i].name);
3452 	}
3453 	mutex_unlock(&trace_types_lock);
3454 
3455 	return 0;
3456 }
3457 
3458 static int __set_tracer_option(struct trace_array *tr,
3459 			       struct tracer_flags *tracer_flags,
3460 			       struct tracer_opt *opts, int neg)
3461 {
3462 	struct tracer *trace = tr->current_trace;
3463 	int ret;
3464 
3465 	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3466 	if (ret)
3467 		return ret;
3468 
3469 	if (neg)
3470 		tracer_flags->val &= ~opts->bit;
3471 	else
3472 		tracer_flags->val |= opts->bit;
3473 	return 0;
3474 }
3475 
3476 /* Try to assign a tracer specific option */
3477 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3478 {
3479 	struct tracer *trace = tr->current_trace;
3480 	struct tracer_flags *tracer_flags = trace->flags;
3481 	struct tracer_opt *opts = NULL;
3482 	int i;
3483 
3484 	for (i = 0; tracer_flags->opts[i].name; i++) {
3485 		opts = &tracer_flags->opts[i];
3486 
3487 		if (strcmp(cmp, opts->name) == 0)
3488 			return __set_tracer_option(tr, trace->flags, opts, neg);
3489 	}
3490 
3491 	return -EINVAL;
3492 }
3493 
3494 /* Some tracers require overwrite to stay enabled */
3495 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3496 {
3497 	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3498 		return -1;
3499 
3500 	return 0;
3501 }
3502 
3503 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3504 {
3505 	/* do nothing if flag is already set */
3506 	if (!!(trace_flags & mask) == !!enabled)
3507 		return 0;
3508 
3509 	/* Give the tracer a chance to approve the change */
3510 	if (tr->current_trace->flag_changed)
3511 		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3512 			return -EINVAL;
3513 
3514 	if (enabled)
3515 		trace_flags |= mask;
3516 	else
3517 		trace_flags &= ~mask;
3518 
3519 	if (mask == TRACE_ITER_RECORD_CMD)
3520 		trace_event_enable_cmd_record(enabled);
3521 
3522 	if (mask == TRACE_ITER_OVERWRITE) {
3523 		ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3524 #ifdef CONFIG_TRACER_MAX_TRACE
3525 		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3526 #endif
3527 	}
3528 
3529 	if (mask == TRACE_ITER_PRINTK)
3530 		trace_printk_start_stop_comm(enabled);
3531 
3532 	return 0;
3533 }
3534 
3535 static int trace_set_options(struct trace_array *tr, char *option)
3536 {
3537 	char *cmp;
3538 	int neg = 0;
3539 	int ret = -ENODEV;
3540 	int i;
3541 
3542 	cmp = strstrip(option);
3543 
3544 	if (strncmp(cmp, "no", 2) == 0) {
3545 		neg = 1;
3546 		cmp += 2;
3547 	}
3548 
3549 	mutex_lock(&trace_types_lock);
3550 
3551 	for (i = 0; trace_options[i]; i++) {
3552 		if (strcmp(cmp, trace_options[i]) == 0) {
3553 			ret = set_tracer_flag(tr, 1 << i, !neg);
3554 			break;
3555 		}
3556 	}
3557 
3558 	/* If no option could be set, test the specific tracer options */
3559 	if (!trace_options[i])
3560 		ret = set_tracer_option(tr, cmp, neg);
3561 
3562 	mutex_unlock(&trace_types_lock);
3563 
3564 	return ret;
3565 }
3566 
3567 static ssize_t
3568 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3569 			size_t cnt, loff_t *ppos)
3570 {
3571 	struct seq_file *m = filp->private_data;
3572 	struct trace_array *tr = m->private;
3573 	char buf[64];
3574 	int ret;
3575 
3576 	if (cnt >= sizeof(buf))
3577 		return -EINVAL;
3578 
3579 	if (copy_from_user(&buf, ubuf, cnt))
3580 		return -EFAULT;
3581 
3582 	buf[cnt] = 0;
3583 
3584 	ret = trace_set_options(tr, buf);
3585 	if (ret < 0)
3586 		return ret;
3587 
3588 	*ppos += cnt;
3589 
3590 	return cnt;
3591 }
3592 
3593 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3594 {
3595 	struct trace_array *tr = inode->i_private;
3596 	int ret;
3597 
3598 	if (tracing_disabled)
3599 		return -ENODEV;
3600 
3601 	if (trace_array_get(tr) < 0)
3602 		return -ENODEV;
3603 
3604 	ret = single_open(file, tracing_trace_options_show, inode->i_private);
3605 	if (ret < 0)
3606 		trace_array_put(tr);
3607 
3608 	return ret;
3609 }
3610 
3611 static const struct file_operations tracing_iter_fops = {
3612 	.open		= tracing_trace_options_open,
3613 	.read		= seq_read,
3614 	.llseek		= seq_lseek,
3615 	.release	= tracing_single_release_tr,
3616 	.write		= tracing_trace_options_write,
3617 };
3618 
3619 static const char readme_msg[] =
3620 	"tracing mini-HOWTO:\n\n"
3621 	"# echo 0 > tracing_on : quick way to disable tracing\n"
3622 	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3623 	" Important files:\n"
3624 	"  trace\t\t\t- The static contents of the buffer\n"
3625 	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
3626 	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3627 	"  current_tracer\t- function and latency tracers\n"
3628 	"  available_tracers\t- list of configured tracers for current_tracer\n"
3629 	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3630 	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3631 	"  trace_clock\t\t-change the clock used to order events\n"
3632 	"       local:   Per cpu clock but may not be synced across CPUs\n"
3633 	"      global:   Synced across CPUs but slows tracing down.\n"
3634 	"     counter:   Not a clock, but just an increment\n"
3635 	"      uptime:   Jiffy counter from time of boot\n"
3636 	"        perf:   Same clock that perf events use\n"
3637 #ifdef CONFIG_X86_64
3638 	"     x86-tsc:   TSC cycle counter\n"
3639 #endif
3640 	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3641 	"  tracing_cpumask\t- Limit which CPUs to trace\n"
3642 	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3643 	"\t\t\t  Remove sub-buffer with rmdir\n"
3644 	"  trace_options\t\t- Set format or modify how tracing happens\n"
3645 	"\t\t\t  Disable an option by adding a suffix 'no' to the\n"
3646 	"\t\t\t  option name\n"
3647 	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3648 #ifdef CONFIG_DYNAMIC_FTRACE
3649 	"\n  available_filter_functions - list of functions that can be filtered on\n"
3650 	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
3651 	"\t\t\t  functions\n"
3652 	"\t     accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3653 	"\t     modules: Can select a group via module\n"
3654 	"\t      Format: :mod:<module-name>\n"
3655 	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
3656 	"\t    triggers: a command to perform when function is hit\n"
3657 	"\t      Format: <function>:<trigger>[:count]\n"
3658 	"\t     trigger: traceon, traceoff\n"
3659 	"\t\t      enable_event:<system>:<event>\n"
3660 	"\t\t      disable_event:<system>:<event>\n"
3661 #ifdef CONFIG_STACKTRACE
3662 	"\t\t      stacktrace\n"
3663 #endif
3664 #ifdef CONFIG_TRACER_SNAPSHOT
3665 	"\t\t      snapshot\n"
3666 #endif
3667 	"\t\t      dump\n"
3668 	"\t\t      cpudump\n"
3669 	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
3670 	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
3671 	"\t     The first one will disable tracing every time do_fault is hit\n"
3672 	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
3673 	"\t       The first time do trap is hit and it disables tracing, the\n"
3674 	"\t       counter will decrement to 2. If tracing is already disabled,\n"
3675 	"\t       the counter will not decrement. It only decrements when the\n"
3676 	"\t       trigger did work\n"
3677 	"\t     To remove trigger without count:\n"
3678 	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
3679 	"\t     To remove trigger with a count:\n"
3680 	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3681 	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3682 	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3683 	"\t    modules: Can select a group via module command :mod:\n"
3684 	"\t    Does not accept triggers\n"
3685 #endif /* CONFIG_DYNAMIC_FTRACE */
3686 #ifdef CONFIG_FUNCTION_TRACER
3687 	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3688 	"\t\t    (function)\n"
3689 #endif
3690 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3691 	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3692 	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3693 	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3694 #endif
3695 #ifdef CONFIG_TRACER_SNAPSHOT
3696 	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
3697 	"\t\t\t  snapshot buffer. Read the contents for more\n"
3698 	"\t\t\t  information\n"
3699 #endif
3700 #ifdef CONFIG_STACK_TRACER
3701 	"  stack_trace\t\t- Shows the max stack trace when active\n"
3702 	"  stack_max_size\t- Shows current max stack size that was traced\n"
3703 	"\t\t\t  Write into this file to reset the max size (trigger a\n"
3704 	"\t\t\t  new trace)\n"
3705 #ifdef CONFIG_DYNAMIC_FTRACE
3706 	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3707 	"\t\t\t  traces\n"
3708 #endif
3709 #endif /* CONFIG_STACK_TRACER */
3710 	"  events/\t\t- Directory containing all trace event subsystems:\n"
3711 	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3712 	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
3713 	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3714 	"\t\t\t  events\n"
3715 	"      filter\t\t- If set, only events passing filter are traced\n"
3716 	"  events/<system>/<event>/\t- Directory containing control files for\n"
3717 	"\t\t\t  <event>:\n"
3718 	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3719 	"      filter\t\t- If set, only events passing filter are traced\n"
3720 	"      trigger\t\t- If set, a command to perform when event is hit\n"
3721 	"\t    Format: <trigger>[:count][if <filter>]\n"
3722 	"\t   trigger: traceon, traceoff\n"
3723 	"\t            enable_event:<system>:<event>\n"
3724 	"\t            disable_event:<system>:<event>\n"
3725 #ifdef CONFIG_STACKTRACE
3726 	"\t\t    stacktrace\n"
3727 #endif
3728 #ifdef CONFIG_TRACER_SNAPSHOT
3729 	"\t\t    snapshot\n"
3730 #endif
3731 	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
3732 	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
3733 	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3734 	"\t                  events/block/block_unplug/trigger\n"
3735 	"\t   The first disables tracing every time block_unplug is hit.\n"
3736 	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
3737 	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
3738 	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3739 	"\t   Like function triggers, the counter is only decremented if it\n"
3740 	"\t    enabled or disabled tracing.\n"
3741 	"\t   To remove a trigger without a count:\n"
3742 	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
3743 	"\t   To remove a trigger with a count:\n"
3744 	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
3745 	"\t   Filters can be ignored when removing a trigger.\n"
3746 ;
3747 
3748 static ssize_t
3749 tracing_readme_read(struct file *filp, char __user *ubuf,
3750 		       size_t cnt, loff_t *ppos)
3751 {
3752 	return simple_read_from_buffer(ubuf, cnt, ppos,
3753 					readme_msg, strlen(readme_msg));
3754 }
3755 
3756 static const struct file_operations tracing_readme_fops = {
3757 	.open		= tracing_open_generic,
3758 	.read		= tracing_readme_read,
3759 	.llseek		= generic_file_llseek,
3760 };
3761 
3762 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3763 {
3764 	unsigned int *ptr = v;
3765 
3766 	if (*pos || m->count)
3767 		ptr++;
3768 
3769 	(*pos)++;
3770 
3771 	for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3772 	     ptr++) {
3773 		if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3774 			continue;
3775 
3776 		return ptr;
3777 	}
3778 
3779 	return NULL;
3780 }
3781 
3782 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3783 {
3784 	void *v;
3785 	loff_t l = 0;
3786 
3787 	preempt_disable();
3788 	arch_spin_lock(&trace_cmdline_lock);
3789 
3790 	v = &savedcmd->map_cmdline_to_pid[0];
3791 	while (l <= *pos) {
3792 		v = saved_cmdlines_next(m, v, &l);
3793 		if (!v)
3794 			return NULL;
3795 	}
3796 
3797 	return v;
3798 }
3799 
3800 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3801 {
3802 	arch_spin_unlock(&trace_cmdline_lock);
3803 	preempt_enable();
3804 }
3805 
3806 static int saved_cmdlines_show(struct seq_file *m, void *v)
3807 {
3808 	char buf[TASK_COMM_LEN];
3809 	unsigned int *pid = v;
3810 
3811 	__trace_find_cmdline(*pid, buf);
3812 	seq_printf(m, "%d %s\n", *pid, buf);
3813 	return 0;
3814 }
3815 
3816 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3817 	.start		= saved_cmdlines_start,
3818 	.next		= saved_cmdlines_next,
3819 	.stop		= saved_cmdlines_stop,
3820 	.show		= saved_cmdlines_show,
3821 };
3822 
3823 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3824 {
3825 	if (tracing_disabled)
3826 		return -ENODEV;
3827 
3828 	return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3829 }
3830 
3831 static const struct file_operations tracing_saved_cmdlines_fops = {
3832 	.open		= tracing_saved_cmdlines_open,
3833 	.read		= seq_read,
3834 	.llseek		= seq_lseek,
3835 	.release	= seq_release,
3836 };
3837 
3838 static ssize_t
3839 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3840 				 size_t cnt, loff_t *ppos)
3841 {
3842 	char buf[64];
3843 	int r;
3844 
3845 	arch_spin_lock(&trace_cmdline_lock);
3846 	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3847 	arch_spin_unlock(&trace_cmdline_lock);
3848 
3849 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3850 }
3851 
3852 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3853 {
3854 	kfree(s->saved_cmdlines);
3855 	kfree(s->map_cmdline_to_pid);
3856 	kfree(s);
3857 }
3858 
3859 static int tracing_resize_saved_cmdlines(unsigned int val)
3860 {
3861 	struct saved_cmdlines_buffer *s, *savedcmd_temp;
3862 
3863 	s = kmalloc(sizeof(*s), GFP_KERNEL);
3864 	if (!s)
3865 		return -ENOMEM;
3866 
3867 	if (allocate_cmdlines_buffer(val, s) < 0) {
3868 		kfree(s);
3869 		return -ENOMEM;
3870 	}
3871 
3872 	arch_spin_lock(&trace_cmdline_lock);
3873 	savedcmd_temp = savedcmd;
3874 	savedcmd = s;
3875 	arch_spin_unlock(&trace_cmdline_lock);
3876 	free_saved_cmdlines_buffer(savedcmd_temp);
3877 
3878 	return 0;
3879 }
3880 
3881 static ssize_t
3882 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3883 				  size_t cnt, loff_t *ppos)
3884 {
3885 	unsigned long val;
3886 	int ret;
3887 
3888 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3889 	if (ret)
3890 		return ret;
3891 
3892 	/* must have at least 1 entry or less than PID_MAX_DEFAULT */
3893 	if (!val || val > PID_MAX_DEFAULT)
3894 		return -EINVAL;
3895 
3896 	ret = tracing_resize_saved_cmdlines((unsigned int)val);
3897 	if (ret < 0)
3898 		return ret;
3899 
3900 	*ppos += cnt;
3901 
3902 	return cnt;
3903 }
3904 
3905 static const struct file_operations tracing_saved_cmdlines_size_fops = {
3906 	.open		= tracing_open_generic,
3907 	.read		= tracing_saved_cmdlines_size_read,
3908 	.write		= tracing_saved_cmdlines_size_write,
3909 };
3910 
3911 static ssize_t
3912 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3913 		       size_t cnt, loff_t *ppos)
3914 {
3915 	struct trace_array *tr = filp->private_data;
3916 	char buf[MAX_TRACER_SIZE+2];
3917 	int r;
3918 
3919 	mutex_lock(&trace_types_lock);
3920 	r = sprintf(buf, "%s\n", tr->current_trace->name);
3921 	mutex_unlock(&trace_types_lock);
3922 
3923 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3924 }
3925 
3926 int tracer_init(struct tracer *t, struct trace_array *tr)
3927 {
3928 	tracing_reset_online_cpus(&tr->trace_buffer);
3929 	return t->init(tr);
3930 }
3931 
3932 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3933 {
3934 	int cpu;
3935 
3936 	for_each_tracing_cpu(cpu)
3937 		per_cpu_ptr(buf->data, cpu)->entries = val;
3938 }
3939 
3940 #ifdef CONFIG_TRACER_MAX_TRACE
3941 /* resize @tr's buffer to the size of @size_tr's entries */
3942 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3943 					struct trace_buffer *size_buf, int cpu_id)
3944 {
3945 	int cpu, ret = 0;
3946 
3947 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
3948 		for_each_tracing_cpu(cpu) {
3949 			ret = ring_buffer_resize(trace_buf->buffer,
3950 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3951 			if (ret < 0)
3952 				break;
3953 			per_cpu_ptr(trace_buf->data, cpu)->entries =
3954 				per_cpu_ptr(size_buf->data, cpu)->entries;
3955 		}
3956 	} else {
3957 		ret = ring_buffer_resize(trace_buf->buffer,
3958 				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3959 		if (ret == 0)
3960 			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3961 				per_cpu_ptr(size_buf->data, cpu_id)->entries;
3962 	}
3963 
3964 	return ret;
3965 }
3966 #endif /* CONFIG_TRACER_MAX_TRACE */
3967 
3968 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3969 					unsigned long size, int cpu)
3970 {
3971 	int ret;
3972 
3973 	/*
3974 	 * If kernel or user changes the size of the ring buffer
3975 	 * we use the size that was given, and we can forget about
3976 	 * expanding it later.
3977 	 */
3978 	ring_buffer_expanded = true;
3979 
3980 	/* May be called before buffers are initialized */
3981 	if (!tr->trace_buffer.buffer)
3982 		return 0;
3983 
3984 	ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3985 	if (ret < 0)
3986 		return ret;
3987 
3988 #ifdef CONFIG_TRACER_MAX_TRACE
3989 	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3990 	    !tr->current_trace->use_max_tr)
3991 		goto out;
3992 
3993 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3994 	if (ret < 0) {
3995 		int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3996 						     &tr->trace_buffer, cpu);
3997 		if (r < 0) {
3998 			/*
3999 			 * AARGH! We are left with different
4000 			 * size max buffer!!!!
4001 			 * The max buffer is our "snapshot" buffer.
4002 			 * When a tracer needs a snapshot (one of the
4003 			 * latency tracers), it swaps the max buffer
4004 			 * with the saved snap shot. We succeeded to
4005 			 * update the size of the main buffer, but failed to
4006 			 * update the size of the max buffer. But when we tried
4007 			 * to reset the main buffer to the original size, we
4008 			 * failed there too. This is very unlikely to
4009 			 * happen, but if it does, warn and kill all
4010 			 * tracing.
4011 			 */
4012 			WARN_ON(1);
4013 			tracing_disabled = 1;
4014 		}
4015 		return ret;
4016 	}
4017 
4018 	if (cpu == RING_BUFFER_ALL_CPUS)
4019 		set_buffer_entries(&tr->max_buffer, size);
4020 	else
4021 		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4022 
4023  out:
4024 #endif /* CONFIG_TRACER_MAX_TRACE */
4025 
4026 	if (cpu == RING_BUFFER_ALL_CPUS)
4027 		set_buffer_entries(&tr->trace_buffer, size);
4028 	else
4029 		per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4030 
4031 	return ret;
4032 }
4033 
4034 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4035 					  unsigned long size, int cpu_id)
4036 {
4037 	int ret = size;
4038 
4039 	mutex_lock(&trace_types_lock);
4040 
4041 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
4042 		/* make sure, this cpu is enabled in the mask */
4043 		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4044 			ret = -EINVAL;
4045 			goto out;
4046 		}
4047 	}
4048 
4049 	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4050 	if (ret < 0)
4051 		ret = -ENOMEM;
4052 
4053 out:
4054 	mutex_unlock(&trace_types_lock);
4055 
4056 	return ret;
4057 }
4058 
4059 
4060 /**
4061  * tracing_update_buffers - used by tracing facility to expand ring buffers
4062  *
4063  * To save on memory when the tracing is never used on a system with it
4064  * configured in. The ring buffers are set to a minimum size. But once
4065  * a user starts to use the tracing facility, then they need to grow
4066  * to their default size.
4067  *
4068  * This function is to be called when a tracer is about to be used.
4069  */
4070 int tracing_update_buffers(void)
4071 {
4072 	int ret = 0;
4073 
4074 	mutex_lock(&trace_types_lock);
4075 	if (!ring_buffer_expanded)
4076 		ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4077 						RING_BUFFER_ALL_CPUS);
4078 	mutex_unlock(&trace_types_lock);
4079 
4080 	return ret;
4081 }
4082 
4083 struct trace_option_dentry;
4084 
4085 static struct trace_option_dentry *
4086 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4087 
4088 static void
4089 destroy_trace_option_files(struct trace_option_dentry *topts);
4090 
4091 /*
4092  * Used to clear out the tracer before deletion of an instance.
4093  * Must have trace_types_lock held.
4094  */
4095 static void tracing_set_nop(struct trace_array *tr)
4096 {
4097 	if (tr->current_trace == &nop_trace)
4098 		return;
4099 
4100 	tr->current_trace->enabled--;
4101 
4102 	if (tr->current_trace->reset)
4103 		tr->current_trace->reset(tr);
4104 
4105 	tr->current_trace = &nop_trace;
4106 }
4107 
4108 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4109 {
4110 	static struct trace_option_dentry *topts;
4111 	struct tracer *t;
4112 #ifdef CONFIG_TRACER_MAX_TRACE
4113 	bool had_max_tr;
4114 #endif
4115 	int ret = 0;
4116 
4117 	mutex_lock(&trace_types_lock);
4118 
4119 	if (!ring_buffer_expanded) {
4120 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4121 						RING_BUFFER_ALL_CPUS);
4122 		if (ret < 0)
4123 			goto out;
4124 		ret = 0;
4125 	}
4126 
4127 	for (t = trace_types; t; t = t->next) {
4128 		if (strcmp(t->name, buf) == 0)
4129 			break;
4130 	}
4131 	if (!t) {
4132 		ret = -EINVAL;
4133 		goto out;
4134 	}
4135 	if (t == tr->current_trace)
4136 		goto out;
4137 
4138 	/* Some tracers are only allowed for the top level buffer */
4139 	if (!trace_ok_for_array(t, tr)) {
4140 		ret = -EINVAL;
4141 		goto out;
4142 	}
4143 
4144 	/* If trace pipe files are being read, we can't change the tracer */
4145 	if (tr->current_trace->ref) {
4146 		ret = -EBUSY;
4147 		goto out;
4148 	}
4149 
4150 	trace_branch_disable();
4151 
4152 	tr->current_trace->enabled--;
4153 
4154 	if (tr->current_trace->reset)
4155 		tr->current_trace->reset(tr);
4156 
4157 	/* Current trace needs to be nop_trace before synchronize_sched */
4158 	tr->current_trace = &nop_trace;
4159 
4160 #ifdef CONFIG_TRACER_MAX_TRACE
4161 	had_max_tr = tr->allocated_snapshot;
4162 
4163 	if (had_max_tr && !t->use_max_tr) {
4164 		/*
4165 		 * We need to make sure that the update_max_tr sees that
4166 		 * current_trace changed to nop_trace to keep it from
4167 		 * swapping the buffers after we resize it.
4168 		 * The update_max_tr is called from interrupts disabled
4169 		 * so a synchronized_sched() is sufficient.
4170 		 */
4171 		synchronize_sched();
4172 		free_snapshot(tr);
4173 	}
4174 #endif
4175 	/* Currently, only the top instance has options */
4176 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4177 		destroy_trace_option_files(topts);
4178 		topts = create_trace_option_files(tr, t);
4179 	}
4180 
4181 #ifdef CONFIG_TRACER_MAX_TRACE
4182 	if (t->use_max_tr && !had_max_tr) {
4183 		ret = alloc_snapshot(tr);
4184 		if (ret < 0)
4185 			goto out;
4186 	}
4187 #endif
4188 
4189 	if (t->init) {
4190 		ret = tracer_init(t, tr);
4191 		if (ret)
4192 			goto out;
4193 	}
4194 
4195 	tr->current_trace = t;
4196 	tr->current_trace->enabled++;
4197 	trace_branch_enable(tr);
4198  out:
4199 	mutex_unlock(&trace_types_lock);
4200 
4201 	return ret;
4202 }
4203 
4204 static ssize_t
4205 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4206 			size_t cnt, loff_t *ppos)
4207 {
4208 	struct trace_array *tr = filp->private_data;
4209 	char buf[MAX_TRACER_SIZE+1];
4210 	int i;
4211 	size_t ret;
4212 	int err;
4213 
4214 	ret = cnt;
4215 
4216 	if (cnt > MAX_TRACER_SIZE)
4217 		cnt = MAX_TRACER_SIZE;
4218 
4219 	if (copy_from_user(&buf, ubuf, cnt))
4220 		return -EFAULT;
4221 
4222 	buf[cnt] = 0;
4223 
4224 	/* strip ending whitespace. */
4225 	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4226 		buf[i] = 0;
4227 
4228 	err = tracing_set_tracer(tr, buf);
4229 	if (err)
4230 		return err;
4231 
4232 	*ppos += ret;
4233 
4234 	return ret;
4235 }
4236 
4237 static ssize_t
4238 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4239 		   size_t cnt, loff_t *ppos)
4240 {
4241 	char buf[64];
4242 	int r;
4243 
4244 	r = snprintf(buf, sizeof(buf), "%ld\n",
4245 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4246 	if (r > sizeof(buf))
4247 		r = sizeof(buf);
4248 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4249 }
4250 
4251 static ssize_t
4252 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4253 		    size_t cnt, loff_t *ppos)
4254 {
4255 	unsigned long val;
4256 	int ret;
4257 
4258 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4259 	if (ret)
4260 		return ret;
4261 
4262 	*ptr = val * 1000;
4263 
4264 	return cnt;
4265 }
4266 
4267 static ssize_t
4268 tracing_thresh_read(struct file *filp, char __user *ubuf,
4269 		    size_t cnt, loff_t *ppos)
4270 {
4271 	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4272 }
4273 
4274 static ssize_t
4275 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4276 		     size_t cnt, loff_t *ppos)
4277 {
4278 	struct trace_array *tr = filp->private_data;
4279 	int ret;
4280 
4281 	mutex_lock(&trace_types_lock);
4282 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4283 	if (ret < 0)
4284 		goto out;
4285 
4286 	if (tr->current_trace->update_thresh) {
4287 		ret = tr->current_trace->update_thresh(tr);
4288 		if (ret < 0)
4289 			goto out;
4290 	}
4291 
4292 	ret = cnt;
4293 out:
4294 	mutex_unlock(&trace_types_lock);
4295 
4296 	return ret;
4297 }
4298 
4299 static ssize_t
4300 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4301 		     size_t cnt, loff_t *ppos)
4302 {
4303 	return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4304 }
4305 
4306 static ssize_t
4307 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4308 		      size_t cnt, loff_t *ppos)
4309 {
4310 	return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4311 }
4312 
4313 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4314 {
4315 	struct trace_array *tr = inode->i_private;
4316 	struct trace_iterator *iter;
4317 	int ret = 0;
4318 
4319 	if (tracing_disabled)
4320 		return -ENODEV;
4321 
4322 	if (trace_array_get(tr) < 0)
4323 		return -ENODEV;
4324 
4325 	mutex_lock(&trace_types_lock);
4326 
4327 	/* create a buffer to store the information to pass to userspace */
4328 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4329 	if (!iter) {
4330 		ret = -ENOMEM;
4331 		__trace_array_put(tr);
4332 		goto out;
4333 	}
4334 
4335 	trace_seq_init(&iter->seq);
4336 	iter->trace = tr->current_trace;
4337 
4338 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4339 		ret = -ENOMEM;
4340 		goto fail;
4341 	}
4342 
4343 	/* trace pipe does not show start of buffer */
4344 	cpumask_setall(iter->started);
4345 
4346 	if (trace_flags & TRACE_ITER_LATENCY_FMT)
4347 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
4348 
4349 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4350 	if (trace_clocks[tr->clock_id].in_ns)
4351 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4352 
4353 	iter->tr = tr;
4354 	iter->trace_buffer = &tr->trace_buffer;
4355 	iter->cpu_file = tracing_get_cpu(inode);
4356 	mutex_init(&iter->mutex);
4357 	filp->private_data = iter;
4358 
4359 	if (iter->trace->pipe_open)
4360 		iter->trace->pipe_open(iter);
4361 
4362 	nonseekable_open(inode, filp);
4363 
4364 	tr->current_trace->ref++;
4365 out:
4366 	mutex_unlock(&trace_types_lock);
4367 	return ret;
4368 
4369 fail:
4370 	kfree(iter->trace);
4371 	kfree(iter);
4372 	__trace_array_put(tr);
4373 	mutex_unlock(&trace_types_lock);
4374 	return ret;
4375 }
4376 
4377 static int tracing_release_pipe(struct inode *inode, struct file *file)
4378 {
4379 	struct trace_iterator *iter = file->private_data;
4380 	struct trace_array *tr = inode->i_private;
4381 
4382 	mutex_lock(&trace_types_lock);
4383 
4384 	tr->current_trace->ref--;
4385 
4386 	if (iter->trace->pipe_close)
4387 		iter->trace->pipe_close(iter);
4388 
4389 	mutex_unlock(&trace_types_lock);
4390 
4391 	free_cpumask_var(iter->started);
4392 	mutex_destroy(&iter->mutex);
4393 	kfree(iter);
4394 
4395 	trace_array_put(tr);
4396 
4397 	return 0;
4398 }
4399 
4400 static unsigned int
4401 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4402 {
4403 	/* Iterators are static, they should be filled or empty */
4404 	if (trace_buffer_iter(iter, iter->cpu_file))
4405 		return POLLIN | POLLRDNORM;
4406 
4407 	if (trace_flags & TRACE_ITER_BLOCK)
4408 		/*
4409 		 * Always select as readable when in blocking mode
4410 		 */
4411 		return POLLIN | POLLRDNORM;
4412 	else
4413 		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4414 					     filp, poll_table);
4415 }
4416 
4417 static unsigned int
4418 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4419 {
4420 	struct trace_iterator *iter = filp->private_data;
4421 
4422 	return trace_poll(iter, filp, poll_table);
4423 }
4424 
4425 /* Must be called with iter->mutex held. */
4426 static int tracing_wait_pipe(struct file *filp)
4427 {
4428 	struct trace_iterator *iter = filp->private_data;
4429 	int ret;
4430 
4431 	while (trace_empty(iter)) {
4432 
4433 		if ((filp->f_flags & O_NONBLOCK)) {
4434 			return -EAGAIN;
4435 		}
4436 
4437 		/*
4438 		 * We block until we read something and tracing is disabled.
4439 		 * We still block if tracing is disabled, but we have never
4440 		 * read anything. This allows a user to cat this file, and
4441 		 * then enable tracing. But after we have read something,
4442 		 * we give an EOF when tracing is again disabled.
4443 		 *
4444 		 * iter->pos will be 0 if we haven't read anything.
4445 		 */
4446 		if (!tracing_is_on() && iter->pos)
4447 			break;
4448 
4449 		mutex_unlock(&iter->mutex);
4450 
4451 		ret = wait_on_pipe(iter, false);
4452 
4453 		mutex_lock(&iter->mutex);
4454 
4455 		if (ret)
4456 			return ret;
4457 	}
4458 
4459 	return 1;
4460 }
4461 
4462 /*
4463  * Consumer reader.
4464  */
4465 static ssize_t
4466 tracing_read_pipe(struct file *filp, char __user *ubuf,
4467 		  size_t cnt, loff_t *ppos)
4468 {
4469 	struct trace_iterator *iter = filp->private_data;
4470 	ssize_t sret;
4471 
4472 	/* return any leftover data */
4473 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4474 	if (sret != -EBUSY)
4475 		return sret;
4476 
4477 	trace_seq_init(&iter->seq);
4478 
4479 	/*
4480 	 * Avoid more than one consumer on a single file descriptor
4481 	 * This is just a matter of traces coherency, the ring buffer itself
4482 	 * is protected.
4483 	 */
4484 	mutex_lock(&iter->mutex);
4485 	if (iter->trace->read) {
4486 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4487 		if (sret)
4488 			goto out;
4489 	}
4490 
4491 waitagain:
4492 	sret = tracing_wait_pipe(filp);
4493 	if (sret <= 0)
4494 		goto out;
4495 
4496 	/* stop when tracing is finished */
4497 	if (trace_empty(iter)) {
4498 		sret = 0;
4499 		goto out;
4500 	}
4501 
4502 	if (cnt >= PAGE_SIZE)
4503 		cnt = PAGE_SIZE - 1;
4504 
4505 	/* reset all but tr, trace, and overruns */
4506 	memset(&iter->seq, 0,
4507 	       sizeof(struct trace_iterator) -
4508 	       offsetof(struct trace_iterator, seq));
4509 	cpumask_clear(iter->started);
4510 	iter->pos = -1;
4511 
4512 	trace_event_read_lock();
4513 	trace_access_lock(iter->cpu_file);
4514 	while (trace_find_next_entry_inc(iter) != NULL) {
4515 		enum print_line_t ret;
4516 		int save_len = iter->seq.seq.len;
4517 
4518 		ret = print_trace_line(iter);
4519 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4520 			/* don't print partial lines */
4521 			iter->seq.seq.len = save_len;
4522 			break;
4523 		}
4524 		if (ret != TRACE_TYPE_NO_CONSUME)
4525 			trace_consume(iter);
4526 
4527 		if (trace_seq_used(&iter->seq) >= cnt)
4528 			break;
4529 
4530 		/*
4531 		 * Setting the full flag means we reached the trace_seq buffer
4532 		 * size and we should leave by partial output condition above.
4533 		 * One of the trace_seq_* functions is not used properly.
4534 		 */
4535 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4536 			  iter->ent->type);
4537 	}
4538 	trace_access_unlock(iter->cpu_file);
4539 	trace_event_read_unlock();
4540 
4541 	/* Now copy what we have to the user */
4542 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4543 	if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4544 		trace_seq_init(&iter->seq);
4545 
4546 	/*
4547 	 * If there was nothing to send to user, in spite of consuming trace
4548 	 * entries, go back to wait for more entries.
4549 	 */
4550 	if (sret == -EBUSY)
4551 		goto waitagain;
4552 
4553 out:
4554 	mutex_unlock(&iter->mutex);
4555 
4556 	return sret;
4557 }
4558 
4559 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4560 				     unsigned int idx)
4561 {
4562 	__free_page(spd->pages[idx]);
4563 }
4564 
4565 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4566 	.can_merge		= 0,
4567 	.confirm		= generic_pipe_buf_confirm,
4568 	.release		= generic_pipe_buf_release,
4569 	.steal			= generic_pipe_buf_steal,
4570 	.get			= generic_pipe_buf_get,
4571 };
4572 
4573 static size_t
4574 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4575 {
4576 	size_t count;
4577 	int save_len;
4578 	int ret;
4579 
4580 	/* Seq buffer is page-sized, exactly what we need. */
4581 	for (;;) {
4582 		save_len = iter->seq.seq.len;
4583 		ret = print_trace_line(iter);
4584 
4585 		if (trace_seq_has_overflowed(&iter->seq)) {
4586 			iter->seq.seq.len = save_len;
4587 			break;
4588 		}
4589 
4590 		/*
4591 		 * This should not be hit, because it should only
4592 		 * be set if the iter->seq overflowed. But check it
4593 		 * anyway to be safe.
4594 		 */
4595 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4596 			iter->seq.seq.len = save_len;
4597 			break;
4598 		}
4599 
4600 		count = trace_seq_used(&iter->seq) - save_len;
4601 		if (rem < count) {
4602 			rem = 0;
4603 			iter->seq.seq.len = save_len;
4604 			break;
4605 		}
4606 
4607 		if (ret != TRACE_TYPE_NO_CONSUME)
4608 			trace_consume(iter);
4609 		rem -= count;
4610 		if (!trace_find_next_entry_inc(iter))	{
4611 			rem = 0;
4612 			iter->ent = NULL;
4613 			break;
4614 		}
4615 	}
4616 
4617 	return rem;
4618 }
4619 
4620 static ssize_t tracing_splice_read_pipe(struct file *filp,
4621 					loff_t *ppos,
4622 					struct pipe_inode_info *pipe,
4623 					size_t len,
4624 					unsigned int flags)
4625 {
4626 	struct page *pages_def[PIPE_DEF_BUFFERS];
4627 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
4628 	struct trace_iterator *iter = filp->private_data;
4629 	struct splice_pipe_desc spd = {
4630 		.pages		= pages_def,
4631 		.partial	= partial_def,
4632 		.nr_pages	= 0, /* This gets updated below. */
4633 		.nr_pages_max	= PIPE_DEF_BUFFERS,
4634 		.flags		= flags,
4635 		.ops		= &tracing_pipe_buf_ops,
4636 		.spd_release	= tracing_spd_release_pipe,
4637 	};
4638 	ssize_t ret;
4639 	size_t rem;
4640 	unsigned int i;
4641 
4642 	if (splice_grow_spd(pipe, &spd))
4643 		return -ENOMEM;
4644 
4645 	mutex_lock(&iter->mutex);
4646 
4647 	if (iter->trace->splice_read) {
4648 		ret = iter->trace->splice_read(iter, filp,
4649 					       ppos, pipe, len, flags);
4650 		if (ret)
4651 			goto out_err;
4652 	}
4653 
4654 	ret = tracing_wait_pipe(filp);
4655 	if (ret <= 0)
4656 		goto out_err;
4657 
4658 	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4659 		ret = -EFAULT;
4660 		goto out_err;
4661 	}
4662 
4663 	trace_event_read_lock();
4664 	trace_access_lock(iter->cpu_file);
4665 
4666 	/* Fill as many pages as possible. */
4667 	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4668 		spd.pages[i] = alloc_page(GFP_KERNEL);
4669 		if (!spd.pages[i])
4670 			break;
4671 
4672 		rem = tracing_fill_pipe_page(rem, iter);
4673 
4674 		/* Copy the data into the page, so we can start over. */
4675 		ret = trace_seq_to_buffer(&iter->seq,
4676 					  page_address(spd.pages[i]),
4677 					  trace_seq_used(&iter->seq));
4678 		if (ret < 0) {
4679 			__free_page(spd.pages[i]);
4680 			break;
4681 		}
4682 		spd.partial[i].offset = 0;
4683 		spd.partial[i].len = trace_seq_used(&iter->seq);
4684 
4685 		trace_seq_init(&iter->seq);
4686 	}
4687 
4688 	trace_access_unlock(iter->cpu_file);
4689 	trace_event_read_unlock();
4690 	mutex_unlock(&iter->mutex);
4691 
4692 	spd.nr_pages = i;
4693 
4694 	ret = splice_to_pipe(pipe, &spd);
4695 out:
4696 	splice_shrink_spd(&spd);
4697 	return ret;
4698 
4699 out_err:
4700 	mutex_unlock(&iter->mutex);
4701 	goto out;
4702 }
4703 
4704 static ssize_t
4705 tracing_entries_read(struct file *filp, char __user *ubuf,
4706 		     size_t cnt, loff_t *ppos)
4707 {
4708 	struct inode *inode = file_inode(filp);
4709 	struct trace_array *tr = inode->i_private;
4710 	int cpu = tracing_get_cpu(inode);
4711 	char buf[64];
4712 	int r = 0;
4713 	ssize_t ret;
4714 
4715 	mutex_lock(&trace_types_lock);
4716 
4717 	if (cpu == RING_BUFFER_ALL_CPUS) {
4718 		int cpu, buf_size_same;
4719 		unsigned long size;
4720 
4721 		size = 0;
4722 		buf_size_same = 1;
4723 		/* check if all cpu sizes are same */
4724 		for_each_tracing_cpu(cpu) {
4725 			/* fill in the size from first enabled cpu */
4726 			if (size == 0)
4727 				size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4728 			if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4729 				buf_size_same = 0;
4730 				break;
4731 			}
4732 		}
4733 
4734 		if (buf_size_same) {
4735 			if (!ring_buffer_expanded)
4736 				r = sprintf(buf, "%lu (expanded: %lu)\n",
4737 					    size >> 10,
4738 					    trace_buf_size >> 10);
4739 			else
4740 				r = sprintf(buf, "%lu\n", size >> 10);
4741 		} else
4742 			r = sprintf(buf, "X\n");
4743 	} else
4744 		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4745 
4746 	mutex_unlock(&trace_types_lock);
4747 
4748 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4749 	return ret;
4750 }
4751 
4752 static ssize_t
4753 tracing_entries_write(struct file *filp, const char __user *ubuf,
4754 		      size_t cnt, loff_t *ppos)
4755 {
4756 	struct inode *inode = file_inode(filp);
4757 	struct trace_array *tr = inode->i_private;
4758 	unsigned long val;
4759 	int ret;
4760 
4761 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4762 	if (ret)
4763 		return ret;
4764 
4765 	/* must have at least 1 entry */
4766 	if (!val)
4767 		return -EINVAL;
4768 
4769 	/* value is in KB */
4770 	val <<= 10;
4771 	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4772 	if (ret < 0)
4773 		return ret;
4774 
4775 	*ppos += cnt;
4776 
4777 	return cnt;
4778 }
4779 
4780 static ssize_t
4781 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4782 				size_t cnt, loff_t *ppos)
4783 {
4784 	struct trace_array *tr = filp->private_data;
4785 	char buf[64];
4786 	int r, cpu;
4787 	unsigned long size = 0, expanded_size = 0;
4788 
4789 	mutex_lock(&trace_types_lock);
4790 	for_each_tracing_cpu(cpu) {
4791 		size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4792 		if (!ring_buffer_expanded)
4793 			expanded_size += trace_buf_size >> 10;
4794 	}
4795 	if (ring_buffer_expanded)
4796 		r = sprintf(buf, "%lu\n", size);
4797 	else
4798 		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4799 	mutex_unlock(&trace_types_lock);
4800 
4801 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4802 }
4803 
4804 static ssize_t
4805 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4806 			  size_t cnt, loff_t *ppos)
4807 {
4808 	/*
4809 	 * There is no need to read what the user has written, this function
4810 	 * is just to make sure that there is no error when "echo" is used
4811 	 */
4812 
4813 	*ppos += cnt;
4814 
4815 	return cnt;
4816 }
4817 
4818 static int
4819 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4820 {
4821 	struct trace_array *tr = inode->i_private;
4822 
4823 	/* disable tracing ? */
4824 	if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4825 		tracer_tracing_off(tr);
4826 	/* resize the ring buffer to 0 */
4827 	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4828 
4829 	trace_array_put(tr);
4830 
4831 	return 0;
4832 }
4833 
4834 static ssize_t
4835 tracing_mark_write(struct file *filp, const char __user *ubuf,
4836 					size_t cnt, loff_t *fpos)
4837 {
4838 	unsigned long addr = (unsigned long)ubuf;
4839 	struct trace_array *tr = filp->private_data;
4840 	struct ring_buffer_event *event;
4841 	struct ring_buffer *buffer;
4842 	struct print_entry *entry;
4843 	unsigned long irq_flags;
4844 	struct page *pages[2];
4845 	void *map_page[2];
4846 	int nr_pages = 1;
4847 	ssize_t written;
4848 	int offset;
4849 	int size;
4850 	int len;
4851 	int ret;
4852 	int i;
4853 
4854 	if (tracing_disabled)
4855 		return -EINVAL;
4856 
4857 	if (!(trace_flags & TRACE_ITER_MARKERS))
4858 		return -EINVAL;
4859 
4860 	if (cnt > TRACE_BUF_SIZE)
4861 		cnt = TRACE_BUF_SIZE;
4862 
4863 	/*
4864 	 * Userspace is injecting traces into the kernel trace buffer.
4865 	 * We want to be as non intrusive as possible.
4866 	 * To do so, we do not want to allocate any special buffers
4867 	 * or take any locks, but instead write the userspace data
4868 	 * straight into the ring buffer.
4869 	 *
4870 	 * First we need to pin the userspace buffer into memory,
4871 	 * which, most likely it is, because it just referenced it.
4872 	 * But there's no guarantee that it is. By using get_user_pages_fast()
4873 	 * and kmap_atomic/kunmap_atomic() we can get access to the
4874 	 * pages directly. We then write the data directly into the
4875 	 * ring buffer.
4876 	 */
4877 	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4878 
4879 	/* check if we cross pages */
4880 	if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4881 		nr_pages = 2;
4882 
4883 	offset = addr & (PAGE_SIZE - 1);
4884 	addr &= PAGE_MASK;
4885 
4886 	ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4887 	if (ret < nr_pages) {
4888 		while (--ret >= 0)
4889 			put_page(pages[ret]);
4890 		written = -EFAULT;
4891 		goto out;
4892 	}
4893 
4894 	for (i = 0; i < nr_pages; i++)
4895 		map_page[i] = kmap_atomic(pages[i]);
4896 
4897 	local_save_flags(irq_flags);
4898 	size = sizeof(*entry) + cnt + 2; /* possible \n added */
4899 	buffer = tr->trace_buffer.buffer;
4900 	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4901 					  irq_flags, preempt_count());
4902 	if (!event) {
4903 		/* Ring buffer disabled, return as if not open for write */
4904 		written = -EBADF;
4905 		goto out_unlock;
4906 	}
4907 
4908 	entry = ring_buffer_event_data(event);
4909 	entry->ip = _THIS_IP_;
4910 
4911 	if (nr_pages == 2) {
4912 		len = PAGE_SIZE - offset;
4913 		memcpy(&entry->buf, map_page[0] + offset, len);
4914 		memcpy(&entry->buf[len], map_page[1], cnt - len);
4915 	} else
4916 		memcpy(&entry->buf, map_page[0] + offset, cnt);
4917 
4918 	if (entry->buf[cnt - 1] != '\n') {
4919 		entry->buf[cnt] = '\n';
4920 		entry->buf[cnt + 1] = '\0';
4921 	} else
4922 		entry->buf[cnt] = '\0';
4923 
4924 	__buffer_unlock_commit(buffer, event);
4925 
4926 	written = cnt;
4927 
4928 	*fpos += written;
4929 
4930  out_unlock:
4931 	for (i = nr_pages - 1; i >= 0; i--) {
4932 		kunmap_atomic(map_page[i]);
4933 		put_page(pages[i]);
4934 	}
4935  out:
4936 	return written;
4937 }
4938 
4939 static int tracing_clock_show(struct seq_file *m, void *v)
4940 {
4941 	struct trace_array *tr = m->private;
4942 	int i;
4943 
4944 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4945 		seq_printf(m,
4946 			"%s%s%s%s", i ? " " : "",
4947 			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4948 			i == tr->clock_id ? "]" : "");
4949 	seq_putc(m, '\n');
4950 
4951 	return 0;
4952 }
4953 
4954 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
4955 {
4956 	int i;
4957 
4958 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4959 		if (strcmp(trace_clocks[i].name, clockstr) == 0)
4960 			break;
4961 	}
4962 	if (i == ARRAY_SIZE(trace_clocks))
4963 		return -EINVAL;
4964 
4965 	mutex_lock(&trace_types_lock);
4966 
4967 	tr->clock_id = i;
4968 
4969 	ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4970 
4971 	/*
4972 	 * New clock may not be consistent with the previous clock.
4973 	 * Reset the buffer so that it doesn't have incomparable timestamps.
4974 	 */
4975 	tracing_reset_online_cpus(&tr->trace_buffer);
4976 
4977 #ifdef CONFIG_TRACER_MAX_TRACE
4978 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4979 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4980 	tracing_reset_online_cpus(&tr->max_buffer);
4981 #endif
4982 
4983 	mutex_unlock(&trace_types_lock);
4984 
4985 	return 0;
4986 }
4987 
4988 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4989 				   size_t cnt, loff_t *fpos)
4990 {
4991 	struct seq_file *m = filp->private_data;
4992 	struct trace_array *tr = m->private;
4993 	char buf[64];
4994 	const char *clockstr;
4995 	int ret;
4996 
4997 	if (cnt >= sizeof(buf))
4998 		return -EINVAL;
4999 
5000 	if (copy_from_user(&buf, ubuf, cnt))
5001 		return -EFAULT;
5002 
5003 	buf[cnt] = 0;
5004 
5005 	clockstr = strstrip(buf);
5006 
5007 	ret = tracing_set_clock(tr, clockstr);
5008 	if (ret)
5009 		return ret;
5010 
5011 	*fpos += cnt;
5012 
5013 	return cnt;
5014 }
5015 
5016 static int tracing_clock_open(struct inode *inode, struct file *file)
5017 {
5018 	struct trace_array *tr = inode->i_private;
5019 	int ret;
5020 
5021 	if (tracing_disabled)
5022 		return -ENODEV;
5023 
5024 	if (trace_array_get(tr))
5025 		return -ENODEV;
5026 
5027 	ret = single_open(file, tracing_clock_show, inode->i_private);
5028 	if (ret < 0)
5029 		trace_array_put(tr);
5030 
5031 	return ret;
5032 }
5033 
5034 struct ftrace_buffer_info {
5035 	struct trace_iterator	iter;
5036 	void			*spare;
5037 	unsigned int		read;
5038 };
5039 
5040 #ifdef CONFIG_TRACER_SNAPSHOT
5041 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5042 {
5043 	struct trace_array *tr = inode->i_private;
5044 	struct trace_iterator *iter;
5045 	struct seq_file *m;
5046 	int ret = 0;
5047 
5048 	if (trace_array_get(tr) < 0)
5049 		return -ENODEV;
5050 
5051 	if (file->f_mode & FMODE_READ) {
5052 		iter = __tracing_open(inode, file, true);
5053 		if (IS_ERR(iter))
5054 			ret = PTR_ERR(iter);
5055 	} else {
5056 		/* Writes still need the seq_file to hold the private data */
5057 		ret = -ENOMEM;
5058 		m = kzalloc(sizeof(*m), GFP_KERNEL);
5059 		if (!m)
5060 			goto out;
5061 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5062 		if (!iter) {
5063 			kfree(m);
5064 			goto out;
5065 		}
5066 		ret = 0;
5067 
5068 		iter->tr = tr;
5069 		iter->trace_buffer = &tr->max_buffer;
5070 		iter->cpu_file = tracing_get_cpu(inode);
5071 		m->private = iter;
5072 		file->private_data = m;
5073 	}
5074 out:
5075 	if (ret < 0)
5076 		trace_array_put(tr);
5077 
5078 	return ret;
5079 }
5080 
5081 static ssize_t
5082 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5083 		       loff_t *ppos)
5084 {
5085 	struct seq_file *m = filp->private_data;
5086 	struct trace_iterator *iter = m->private;
5087 	struct trace_array *tr = iter->tr;
5088 	unsigned long val;
5089 	int ret;
5090 
5091 	ret = tracing_update_buffers();
5092 	if (ret < 0)
5093 		return ret;
5094 
5095 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5096 	if (ret)
5097 		return ret;
5098 
5099 	mutex_lock(&trace_types_lock);
5100 
5101 	if (tr->current_trace->use_max_tr) {
5102 		ret = -EBUSY;
5103 		goto out;
5104 	}
5105 
5106 	switch (val) {
5107 	case 0:
5108 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5109 			ret = -EINVAL;
5110 			break;
5111 		}
5112 		if (tr->allocated_snapshot)
5113 			free_snapshot(tr);
5114 		break;
5115 	case 1:
5116 /* Only allow per-cpu swap if the ring buffer supports it */
5117 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5118 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5119 			ret = -EINVAL;
5120 			break;
5121 		}
5122 #endif
5123 		if (!tr->allocated_snapshot) {
5124 			ret = alloc_snapshot(tr);
5125 			if (ret < 0)
5126 				break;
5127 		}
5128 		local_irq_disable();
5129 		/* Now, we're going to swap */
5130 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5131 			update_max_tr(tr, current, smp_processor_id());
5132 		else
5133 			update_max_tr_single(tr, current, iter->cpu_file);
5134 		local_irq_enable();
5135 		break;
5136 	default:
5137 		if (tr->allocated_snapshot) {
5138 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5139 				tracing_reset_online_cpus(&tr->max_buffer);
5140 			else
5141 				tracing_reset(&tr->max_buffer, iter->cpu_file);
5142 		}
5143 		break;
5144 	}
5145 
5146 	if (ret >= 0) {
5147 		*ppos += cnt;
5148 		ret = cnt;
5149 	}
5150 out:
5151 	mutex_unlock(&trace_types_lock);
5152 	return ret;
5153 }
5154 
5155 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5156 {
5157 	struct seq_file *m = file->private_data;
5158 	int ret;
5159 
5160 	ret = tracing_release(inode, file);
5161 
5162 	if (file->f_mode & FMODE_READ)
5163 		return ret;
5164 
5165 	/* If write only, the seq_file is just a stub */
5166 	if (m)
5167 		kfree(m->private);
5168 	kfree(m);
5169 
5170 	return 0;
5171 }
5172 
5173 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5174 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5175 				    size_t count, loff_t *ppos);
5176 static int tracing_buffers_release(struct inode *inode, struct file *file);
5177 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5178 		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5179 
5180 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5181 {
5182 	struct ftrace_buffer_info *info;
5183 	int ret;
5184 
5185 	ret = tracing_buffers_open(inode, filp);
5186 	if (ret < 0)
5187 		return ret;
5188 
5189 	info = filp->private_data;
5190 
5191 	if (info->iter.trace->use_max_tr) {
5192 		tracing_buffers_release(inode, filp);
5193 		return -EBUSY;
5194 	}
5195 
5196 	info->iter.snapshot = true;
5197 	info->iter.trace_buffer = &info->iter.tr->max_buffer;
5198 
5199 	return ret;
5200 }
5201 
5202 #endif /* CONFIG_TRACER_SNAPSHOT */
5203 
5204 
5205 static const struct file_operations tracing_thresh_fops = {
5206 	.open		= tracing_open_generic,
5207 	.read		= tracing_thresh_read,
5208 	.write		= tracing_thresh_write,
5209 	.llseek		= generic_file_llseek,
5210 };
5211 
5212 static const struct file_operations tracing_max_lat_fops = {
5213 	.open		= tracing_open_generic,
5214 	.read		= tracing_max_lat_read,
5215 	.write		= tracing_max_lat_write,
5216 	.llseek		= generic_file_llseek,
5217 };
5218 
5219 static const struct file_operations set_tracer_fops = {
5220 	.open		= tracing_open_generic,
5221 	.read		= tracing_set_trace_read,
5222 	.write		= tracing_set_trace_write,
5223 	.llseek		= generic_file_llseek,
5224 };
5225 
5226 static const struct file_operations tracing_pipe_fops = {
5227 	.open		= tracing_open_pipe,
5228 	.poll		= tracing_poll_pipe,
5229 	.read		= tracing_read_pipe,
5230 	.splice_read	= tracing_splice_read_pipe,
5231 	.release	= tracing_release_pipe,
5232 	.llseek		= no_llseek,
5233 };
5234 
5235 static const struct file_operations tracing_entries_fops = {
5236 	.open		= tracing_open_generic_tr,
5237 	.read		= tracing_entries_read,
5238 	.write		= tracing_entries_write,
5239 	.llseek		= generic_file_llseek,
5240 	.release	= tracing_release_generic_tr,
5241 };
5242 
5243 static const struct file_operations tracing_total_entries_fops = {
5244 	.open		= tracing_open_generic_tr,
5245 	.read		= tracing_total_entries_read,
5246 	.llseek		= generic_file_llseek,
5247 	.release	= tracing_release_generic_tr,
5248 };
5249 
5250 static const struct file_operations tracing_free_buffer_fops = {
5251 	.open		= tracing_open_generic_tr,
5252 	.write		= tracing_free_buffer_write,
5253 	.release	= tracing_free_buffer_release,
5254 };
5255 
5256 static const struct file_operations tracing_mark_fops = {
5257 	.open		= tracing_open_generic_tr,
5258 	.write		= tracing_mark_write,
5259 	.llseek		= generic_file_llseek,
5260 	.release	= tracing_release_generic_tr,
5261 };
5262 
5263 static const struct file_operations trace_clock_fops = {
5264 	.open		= tracing_clock_open,
5265 	.read		= seq_read,
5266 	.llseek		= seq_lseek,
5267 	.release	= tracing_single_release_tr,
5268 	.write		= tracing_clock_write,
5269 };
5270 
5271 #ifdef CONFIG_TRACER_SNAPSHOT
5272 static const struct file_operations snapshot_fops = {
5273 	.open		= tracing_snapshot_open,
5274 	.read		= seq_read,
5275 	.write		= tracing_snapshot_write,
5276 	.llseek		= tracing_lseek,
5277 	.release	= tracing_snapshot_release,
5278 };
5279 
5280 static const struct file_operations snapshot_raw_fops = {
5281 	.open		= snapshot_raw_open,
5282 	.read		= tracing_buffers_read,
5283 	.release	= tracing_buffers_release,
5284 	.splice_read	= tracing_buffers_splice_read,
5285 	.llseek		= no_llseek,
5286 };
5287 
5288 #endif /* CONFIG_TRACER_SNAPSHOT */
5289 
5290 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5291 {
5292 	struct trace_array *tr = inode->i_private;
5293 	struct ftrace_buffer_info *info;
5294 	int ret;
5295 
5296 	if (tracing_disabled)
5297 		return -ENODEV;
5298 
5299 	if (trace_array_get(tr) < 0)
5300 		return -ENODEV;
5301 
5302 	info = kzalloc(sizeof(*info), GFP_KERNEL);
5303 	if (!info) {
5304 		trace_array_put(tr);
5305 		return -ENOMEM;
5306 	}
5307 
5308 	mutex_lock(&trace_types_lock);
5309 
5310 	info->iter.tr		= tr;
5311 	info->iter.cpu_file	= tracing_get_cpu(inode);
5312 	info->iter.trace	= tr->current_trace;
5313 	info->iter.trace_buffer = &tr->trace_buffer;
5314 	info->spare		= NULL;
5315 	/* Force reading ring buffer for first read */
5316 	info->read		= (unsigned int)-1;
5317 
5318 	filp->private_data = info;
5319 
5320 	tr->current_trace->ref++;
5321 
5322 	mutex_unlock(&trace_types_lock);
5323 
5324 	ret = nonseekable_open(inode, filp);
5325 	if (ret < 0)
5326 		trace_array_put(tr);
5327 
5328 	return ret;
5329 }
5330 
5331 static unsigned int
5332 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5333 {
5334 	struct ftrace_buffer_info *info = filp->private_data;
5335 	struct trace_iterator *iter = &info->iter;
5336 
5337 	return trace_poll(iter, filp, poll_table);
5338 }
5339 
5340 static ssize_t
5341 tracing_buffers_read(struct file *filp, char __user *ubuf,
5342 		     size_t count, loff_t *ppos)
5343 {
5344 	struct ftrace_buffer_info *info = filp->private_data;
5345 	struct trace_iterator *iter = &info->iter;
5346 	ssize_t ret;
5347 	ssize_t size;
5348 
5349 	if (!count)
5350 		return 0;
5351 
5352 #ifdef CONFIG_TRACER_MAX_TRACE
5353 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5354 		return -EBUSY;
5355 #endif
5356 
5357 	if (!info->spare)
5358 		info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5359 							  iter->cpu_file);
5360 	if (!info->spare)
5361 		return -ENOMEM;
5362 
5363 	/* Do we have previous read data to read? */
5364 	if (info->read < PAGE_SIZE)
5365 		goto read;
5366 
5367  again:
5368 	trace_access_lock(iter->cpu_file);
5369 	ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5370 				    &info->spare,
5371 				    count,
5372 				    iter->cpu_file, 0);
5373 	trace_access_unlock(iter->cpu_file);
5374 
5375 	if (ret < 0) {
5376 		if (trace_empty(iter)) {
5377 			if ((filp->f_flags & O_NONBLOCK))
5378 				return -EAGAIN;
5379 
5380 			ret = wait_on_pipe(iter, false);
5381 			if (ret)
5382 				return ret;
5383 
5384 			goto again;
5385 		}
5386 		return 0;
5387 	}
5388 
5389 	info->read = 0;
5390  read:
5391 	size = PAGE_SIZE - info->read;
5392 	if (size > count)
5393 		size = count;
5394 
5395 	ret = copy_to_user(ubuf, info->spare + info->read, size);
5396 	if (ret == size)
5397 		return -EFAULT;
5398 
5399 	size -= ret;
5400 
5401 	*ppos += size;
5402 	info->read += size;
5403 
5404 	return size;
5405 }
5406 
5407 static int tracing_buffers_release(struct inode *inode, struct file *file)
5408 {
5409 	struct ftrace_buffer_info *info = file->private_data;
5410 	struct trace_iterator *iter = &info->iter;
5411 
5412 	mutex_lock(&trace_types_lock);
5413 
5414 	iter->tr->current_trace->ref--;
5415 
5416 	__trace_array_put(iter->tr);
5417 
5418 	if (info->spare)
5419 		ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5420 	kfree(info);
5421 
5422 	mutex_unlock(&trace_types_lock);
5423 
5424 	return 0;
5425 }
5426 
5427 struct buffer_ref {
5428 	struct ring_buffer	*buffer;
5429 	void			*page;
5430 	int			ref;
5431 };
5432 
5433 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5434 				    struct pipe_buffer *buf)
5435 {
5436 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5437 
5438 	if (--ref->ref)
5439 		return;
5440 
5441 	ring_buffer_free_read_page(ref->buffer, ref->page);
5442 	kfree(ref);
5443 	buf->private = 0;
5444 }
5445 
5446 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5447 				struct pipe_buffer *buf)
5448 {
5449 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5450 
5451 	ref->ref++;
5452 }
5453 
5454 /* Pipe buffer operations for a buffer. */
5455 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5456 	.can_merge		= 0,
5457 	.confirm		= generic_pipe_buf_confirm,
5458 	.release		= buffer_pipe_buf_release,
5459 	.steal			= generic_pipe_buf_steal,
5460 	.get			= buffer_pipe_buf_get,
5461 };
5462 
5463 /*
5464  * Callback from splice_to_pipe(), if we need to release some pages
5465  * at the end of the spd in case we error'ed out in filling the pipe.
5466  */
5467 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5468 {
5469 	struct buffer_ref *ref =
5470 		(struct buffer_ref *)spd->partial[i].private;
5471 
5472 	if (--ref->ref)
5473 		return;
5474 
5475 	ring_buffer_free_read_page(ref->buffer, ref->page);
5476 	kfree(ref);
5477 	spd->partial[i].private = 0;
5478 }
5479 
5480 static ssize_t
5481 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5482 			    struct pipe_inode_info *pipe, size_t len,
5483 			    unsigned int flags)
5484 {
5485 	struct ftrace_buffer_info *info = file->private_data;
5486 	struct trace_iterator *iter = &info->iter;
5487 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
5488 	struct page *pages_def[PIPE_DEF_BUFFERS];
5489 	struct splice_pipe_desc spd = {
5490 		.pages		= pages_def,
5491 		.partial	= partial_def,
5492 		.nr_pages_max	= PIPE_DEF_BUFFERS,
5493 		.flags		= flags,
5494 		.ops		= &buffer_pipe_buf_ops,
5495 		.spd_release	= buffer_spd_release,
5496 	};
5497 	struct buffer_ref *ref;
5498 	int entries, size, i;
5499 	ssize_t ret = 0;
5500 
5501 #ifdef CONFIG_TRACER_MAX_TRACE
5502 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5503 		return -EBUSY;
5504 #endif
5505 
5506 	if (splice_grow_spd(pipe, &spd))
5507 		return -ENOMEM;
5508 
5509 	if (*ppos & (PAGE_SIZE - 1))
5510 		return -EINVAL;
5511 
5512 	if (len & (PAGE_SIZE - 1)) {
5513 		if (len < PAGE_SIZE)
5514 			return -EINVAL;
5515 		len &= PAGE_MASK;
5516 	}
5517 
5518  again:
5519 	trace_access_lock(iter->cpu_file);
5520 	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5521 
5522 	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5523 		struct page *page;
5524 		int r;
5525 
5526 		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5527 		if (!ref) {
5528 			ret = -ENOMEM;
5529 			break;
5530 		}
5531 
5532 		ref->ref = 1;
5533 		ref->buffer = iter->trace_buffer->buffer;
5534 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5535 		if (!ref->page) {
5536 			ret = -ENOMEM;
5537 			kfree(ref);
5538 			break;
5539 		}
5540 
5541 		r = ring_buffer_read_page(ref->buffer, &ref->page,
5542 					  len, iter->cpu_file, 1);
5543 		if (r < 0) {
5544 			ring_buffer_free_read_page(ref->buffer, ref->page);
5545 			kfree(ref);
5546 			break;
5547 		}
5548 
5549 		/*
5550 		 * zero out any left over data, this is going to
5551 		 * user land.
5552 		 */
5553 		size = ring_buffer_page_len(ref->page);
5554 		if (size < PAGE_SIZE)
5555 			memset(ref->page + size, 0, PAGE_SIZE - size);
5556 
5557 		page = virt_to_page(ref->page);
5558 
5559 		spd.pages[i] = page;
5560 		spd.partial[i].len = PAGE_SIZE;
5561 		spd.partial[i].offset = 0;
5562 		spd.partial[i].private = (unsigned long)ref;
5563 		spd.nr_pages++;
5564 		*ppos += PAGE_SIZE;
5565 
5566 		entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5567 	}
5568 
5569 	trace_access_unlock(iter->cpu_file);
5570 	spd.nr_pages = i;
5571 
5572 	/* did we read anything? */
5573 	if (!spd.nr_pages) {
5574 		if (ret)
5575 			return ret;
5576 
5577 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5578 			return -EAGAIN;
5579 
5580 		ret = wait_on_pipe(iter, true);
5581 		if (ret)
5582 			return ret;
5583 
5584 		goto again;
5585 	}
5586 
5587 	ret = splice_to_pipe(pipe, &spd);
5588 	splice_shrink_spd(&spd);
5589 
5590 	return ret;
5591 }
5592 
5593 static const struct file_operations tracing_buffers_fops = {
5594 	.open		= tracing_buffers_open,
5595 	.read		= tracing_buffers_read,
5596 	.poll		= tracing_buffers_poll,
5597 	.release	= tracing_buffers_release,
5598 	.splice_read	= tracing_buffers_splice_read,
5599 	.llseek		= no_llseek,
5600 };
5601 
5602 static ssize_t
5603 tracing_stats_read(struct file *filp, char __user *ubuf,
5604 		   size_t count, loff_t *ppos)
5605 {
5606 	struct inode *inode = file_inode(filp);
5607 	struct trace_array *tr = inode->i_private;
5608 	struct trace_buffer *trace_buf = &tr->trace_buffer;
5609 	int cpu = tracing_get_cpu(inode);
5610 	struct trace_seq *s;
5611 	unsigned long cnt;
5612 	unsigned long long t;
5613 	unsigned long usec_rem;
5614 
5615 	s = kmalloc(sizeof(*s), GFP_KERNEL);
5616 	if (!s)
5617 		return -ENOMEM;
5618 
5619 	trace_seq_init(s);
5620 
5621 	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5622 	trace_seq_printf(s, "entries: %ld\n", cnt);
5623 
5624 	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5625 	trace_seq_printf(s, "overrun: %ld\n", cnt);
5626 
5627 	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5628 	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5629 
5630 	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5631 	trace_seq_printf(s, "bytes: %ld\n", cnt);
5632 
5633 	if (trace_clocks[tr->clock_id].in_ns) {
5634 		/* local or global for trace_clock */
5635 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5636 		usec_rem = do_div(t, USEC_PER_SEC);
5637 		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5638 								t, usec_rem);
5639 
5640 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5641 		usec_rem = do_div(t, USEC_PER_SEC);
5642 		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5643 	} else {
5644 		/* counter or tsc mode for trace_clock */
5645 		trace_seq_printf(s, "oldest event ts: %llu\n",
5646 				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5647 
5648 		trace_seq_printf(s, "now ts: %llu\n",
5649 				ring_buffer_time_stamp(trace_buf->buffer, cpu));
5650 	}
5651 
5652 	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5653 	trace_seq_printf(s, "dropped events: %ld\n", cnt);
5654 
5655 	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5656 	trace_seq_printf(s, "read events: %ld\n", cnt);
5657 
5658 	count = simple_read_from_buffer(ubuf, count, ppos,
5659 					s->buffer, trace_seq_used(s));
5660 
5661 	kfree(s);
5662 
5663 	return count;
5664 }
5665 
5666 static const struct file_operations tracing_stats_fops = {
5667 	.open		= tracing_open_generic_tr,
5668 	.read		= tracing_stats_read,
5669 	.llseek		= generic_file_llseek,
5670 	.release	= tracing_release_generic_tr,
5671 };
5672 
5673 #ifdef CONFIG_DYNAMIC_FTRACE
5674 
5675 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5676 {
5677 	return 0;
5678 }
5679 
5680 static ssize_t
5681 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5682 		  size_t cnt, loff_t *ppos)
5683 {
5684 	static char ftrace_dyn_info_buffer[1024];
5685 	static DEFINE_MUTEX(dyn_info_mutex);
5686 	unsigned long *p = filp->private_data;
5687 	char *buf = ftrace_dyn_info_buffer;
5688 	int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5689 	int r;
5690 
5691 	mutex_lock(&dyn_info_mutex);
5692 	r = sprintf(buf, "%ld ", *p);
5693 
5694 	r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5695 	buf[r++] = '\n';
5696 
5697 	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5698 
5699 	mutex_unlock(&dyn_info_mutex);
5700 
5701 	return r;
5702 }
5703 
5704 static const struct file_operations tracing_dyn_info_fops = {
5705 	.open		= tracing_open_generic,
5706 	.read		= tracing_read_dyn_info,
5707 	.llseek		= generic_file_llseek,
5708 };
5709 #endif /* CONFIG_DYNAMIC_FTRACE */
5710 
5711 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5712 static void
5713 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5714 {
5715 	tracing_snapshot();
5716 }
5717 
5718 static void
5719 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5720 {
5721 	unsigned long *count = (long *)data;
5722 
5723 	if (!*count)
5724 		return;
5725 
5726 	if (*count != -1)
5727 		(*count)--;
5728 
5729 	tracing_snapshot();
5730 }
5731 
5732 static int
5733 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5734 		      struct ftrace_probe_ops *ops, void *data)
5735 {
5736 	long count = (long)data;
5737 
5738 	seq_printf(m, "%ps:", (void *)ip);
5739 
5740 	seq_puts(m, "snapshot");
5741 
5742 	if (count == -1)
5743 		seq_puts(m, ":unlimited\n");
5744 	else
5745 		seq_printf(m, ":count=%ld\n", count);
5746 
5747 	return 0;
5748 }
5749 
5750 static struct ftrace_probe_ops snapshot_probe_ops = {
5751 	.func			= ftrace_snapshot,
5752 	.print			= ftrace_snapshot_print,
5753 };
5754 
5755 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5756 	.func			= ftrace_count_snapshot,
5757 	.print			= ftrace_snapshot_print,
5758 };
5759 
5760 static int
5761 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5762 			       char *glob, char *cmd, char *param, int enable)
5763 {
5764 	struct ftrace_probe_ops *ops;
5765 	void *count = (void *)-1;
5766 	char *number;
5767 	int ret;
5768 
5769 	/* hash funcs only work with set_ftrace_filter */
5770 	if (!enable)
5771 		return -EINVAL;
5772 
5773 	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
5774 
5775 	if (glob[0] == '!') {
5776 		unregister_ftrace_function_probe_func(glob+1, ops);
5777 		return 0;
5778 	}
5779 
5780 	if (!param)
5781 		goto out_reg;
5782 
5783 	number = strsep(&param, ":");
5784 
5785 	if (!strlen(number))
5786 		goto out_reg;
5787 
5788 	/*
5789 	 * We use the callback data field (which is a pointer)
5790 	 * as our counter.
5791 	 */
5792 	ret = kstrtoul(number, 0, (unsigned long *)&count);
5793 	if (ret)
5794 		return ret;
5795 
5796  out_reg:
5797 	ret = register_ftrace_function_probe(glob, ops, count);
5798 
5799 	if (ret >= 0)
5800 		alloc_snapshot(&global_trace);
5801 
5802 	return ret < 0 ? ret : 0;
5803 }
5804 
5805 static struct ftrace_func_command ftrace_snapshot_cmd = {
5806 	.name			= "snapshot",
5807 	.func			= ftrace_trace_snapshot_callback,
5808 };
5809 
5810 static __init int register_snapshot_cmd(void)
5811 {
5812 	return register_ftrace_command(&ftrace_snapshot_cmd);
5813 }
5814 #else
5815 static inline __init int register_snapshot_cmd(void) { return 0; }
5816 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5817 
5818 static struct dentry *tracing_get_dentry(struct trace_array *tr)
5819 {
5820 	return tr->dir;
5821 }
5822 
5823 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5824 {
5825 	struct dentry *d_tracer;
5826 
5827 	if (tr->percpu_dir)
5828 		return tr->percpu_dir;
5829 
5830 	d_tracer = tracing_get_dentry(tr);
5831 	if (IS_ERR(d_tracer))
5832 		return NULL;
5833 
5834 	tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5835 
5836 	WARN_ONCE(!tr->percpu_dir,
5837 		  "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5838 
5839 	return tr->percpu_dir;
5840 }
5841 
5842 static struct dentry *
5843 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5844 		      void *data, long cpu, const struct file_operations *fops)
5845 {
5846 	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5847 
5848 	if (ret) /* See tracing_get_cpu() */
5849 		ret->d_inode->i_cdev = (void *)(cpu + 1);
5850 	return ret;
5851 }
5852 
5853 static void
5854 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5855 {
5856 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5857 	struct dentry *d_cpu;
5858 	char cpu_dir[30]; /* 30 characters should be more than enough */
5859 
5860 	if (!d_percpu)
5861 		return;
5862 
5863 	snprintf(cpu_dir, 30, "cpu%ld", cpu);
5864 	d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5865 	if (!d_cpu) {
5866 		pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5867 		return;
5868 	}
5869 
5870 	/* per cpu trace_pipe */
5871 	trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5872 				tr, cpu, &tracing_pipe_fops);
5873 
5874 	/* per cpu trace */
5875 	trace_create_cpu_file("trace", 0644, d_cpu,
5876 				tr, cpu, &tracing_fops);
5877 
5878 	trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5879 				tr, cpu, &tracing_buffers_fops);
5880 
5881 	trace_create_cpu_file("stats", 0444, d_cpu,
5882 				tr, cpu, &tracing_stats_fops);
5883 
5884 	trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5885 				tr, cpu, &tracing_entries_fops);
5886 
5887 #ifdef CONFIG_TRACER_SNAPSHOT
5888 	trace_create_cpu_file("snapshot", 0644, d_cpu,
5889 				tr, cpu, &snapshot_fops);
5890 
5891 	trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5892 				tr, cpu, &snapshot_raw_fops);
5893 #endif
5894 }
5895 
5896 #ifdef CONFIG_FTRACE_SELFTEST
5897 /* Let selftest have access to static functions in this file */
5898 #include "trace_selftest.c"
5899 #endif
5900 
5901 struct trace_option_dentry {
5902 	struct tracer_opt		*opt;
5903 	struct tracer_flags		*flags;
5904 	struct trace_array		*tr;
5905 	struct dentry			*entry;
5906 };
5907 
5908 static ssize_t
5909 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5910 			loff_t *ppos)
5911 {
5912 	struct trace_option_dentry *topt = filp->private_data;
5913 	char *buf;
5914 
5915 	if (topt->flags->val & topt->opt->bit)
5916 		buf = "1\n";
5917 	else
5918 		buf = "0\n";
5919 
5920 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5921 }
5922 
5923 static ssize_t
5924 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5925 			 loff_t *ppos)
5926 {
5927 	struct trace_option_dentry *topt = filp->private_data;
5928 	unsigned long val;
5929 	int ret;
5930 
5931 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5932 	if (ret)
5933 		return ret;
5934 
5935 	if (val != 0 && val != 1)
5936 		return -EINVAL;
5937 
5938 	if (!!(topt->flags->val & topt->opt->bit) != val) {
5939 		mutex_lock(&trace_types_lock);
5940 		ret = __set_tracer_option(topt->tr, topt->flags,
5941 					  topt->opt, !val);
5942 		mutex_unlock(&trace_types_lock);
5943 		if (ret)
5944 			return ret;
5945 	}
5946 
5947 	*ppos += cnt;
5948 
5949 	return cnt;
5950 }
5951 
5952 
5953 static const struct file_operations trace_options_fops = {
5954 	.open = tracing_open_generic,
5955 	.read = trace_options_read,
5956 	.write = trace_options_write,
5957 	.llseek	= generic_file_llseek,
5958 };
5959 
5960 static ssize_t
5961 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5962 			loff_t *ppos)
5963 {
5964 	long index = (long)filp->private_data;
5965 	char *buf;
5966 
5967 	if (trace_flags & (1 << index))
5968 		buf = "1\n";
5969 	else
5970 		buf = "0\n";
5971 
5972 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5973 }
5974 
5975 static ssize_t
5976 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5977 			 loff_t *ppos)
5978 {
5979 	struct trace_array *tr = &global_trace;
5980 	long index = (long)filp->private_data;
5981 	unsigned long val;
5982 	int ret;
5983 
5984 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5985 	if (ret)
5986 		return ret;
5987 
5988 	if (val != 0 && val != 1)
5989 		return -EINVAL;
5990 
5991 	mutex_lock(&trace_types_lock);
5992 	ret = set_tracer_flag(tr, 1 << index, val);
5993 	mutex_unlock(&trace_types_lock);
5994 
5995 	if (ret < 0)
5996 		return ret;
5997 
5998 	*ppos += cnt;
5999 
6000 	return cnt;
6001 }
6002 
6003 static const struct file_operations trace_options_core_fops = {
6004 	.open = tracing_open_generic,
6005 	.read = trace_options_core_read,
6006 	.write = trace_options_core_write,
6007 	.llseek = generic_file_llseek,
6008 };
6009 
6010 struct dentry *trace_create_file(const char *name,
6011 				 umode_t mode,
6012 				 struct dentry *parent,
6013 				 void *data,
6014 				 const struct file_operations *fops)
6015 {
6016 	struct dentry *ret;
6017 
6018 	ret = debugfs_create_file(name, mode, parent, data, fops);
6019 	if (!ret)
6020 		pr_warning("Could not create debugfs '%s' entry\n", name);
6021 
6022 	return ret;
6023 }
6024 
6025 
6026 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6027 {
6028 	struct dentry *d_tracer;
6029 
6030 	if (tr->options)
6031 		return tr->options;
6032 
6033 	d_tracer = tracing_get_dentry(tr);
6034 	if (IS_ERR(d_tracer))
6035 		return NULL;
6036 
6037 	tr->options = debugfs_create_dir("options", d_tracer);
6038 	if (!tr->options) {
6039 		pr_warning("Could not create debugfs directory 'options'\n");
6040 		return NULL;
6041 	}
6042 
6043 	return tr->options;
6044 }
6045 
6046 static void
6047 create_trace_option_file(struct trace_array *tr,
6048 			 struct trace_option_dentry *topt,
6049 			 struct tracer_flags *flags,
6050 			 struct tracer_opt *opt)
6051 {
6052 	struct dentry *t_options;
6053 
6054 	t_options = trace_options_init_dentry(tr);
6055 	if (!t_options)
6056 		return;
6057 
6058 	topt->flags = flags;
6059 	topt->opt = opt;
6060 	topt->tr = tr;
6061 
6062 	topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6063 				    &trace_options_fops);
6064 
6065 }
6066 
6067 static struct trace_option_dentry *
6068 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6069 {
6070 	struct trace_option_dentry *topts;
6071 	struct tracer_flags *flags;
6072 	struct tracer_opt *opts;
6073 	int cnt;
6074 
6075 	if (!tracer)
6076 		return NULL;
6077 
6078 	flags = tracer->flags;
6079 
6080 	if (!flags || !flags->opts)
6081 		return NULL;
6082 
6083 	opts = flags->opts;
6084 
6085 	for (cnt = 0; opts[cnt].name; cnt++)
6086 		;
6087 
6088 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6089 	if (!topts)
6090 		return NULL;
6091 
6092 	for (cnt = 0; opts[cnt].name; cnt++)
6093 		create_trace_option_file(tr, &topts[cnt], flags,
6094 					 &opts[cnt]);
6095 
6096 	return topts;
6097 }
6098 
6099 static void
6100 destroy_trace_option_files(struct trace_option_dentry *topts)
6101 {
6102 	int cnt;
6103 
6104 	if (!topts)
6105 		return;
6106 
6107 	for (cnt = 0; topts[cnt].opt; cnt++)
6108 		debugfs_remove(topts[cnt].entry);
6109 
6110 	kfree(topts);
6111 }
6112 
6113 static struct dentry *
6114 create_trace_option_core_file(struct trace_array *tr,
6115 			      const char *option, long index)
6116 {
6117 	struct dentry *t_options;
6118 
6119 	t_options = trace_options_init_dentry(tr);
6120 	if (!t_options)
6121 		return NULL;
6122 
6123 	return trace_create_file(option, 0644, t_options, (void *)index,
6124 				    &trace_options_core_fops);
6125 }
6126 
6127 static __init void create_trace_options_dir(struct trace_array *tr)
6128 {
6129 	struct dentry *t_options;
6130 	int i;
6131 
6132 	t_options = trace_options_init_dentry(tr);
6133 	if (!t_options)
6134 		return;
6135 
6136 	for (i = 0; trace_options[i]; i++)
6137 		create_trace_option_core_file(tr, trace_options[i], i);
6138 }
6139 
6140 static ssize_t
6141 rb_simple_read(struct file *filp, char __user *ubuf,
6142 	       size_t cnt, loff_t *ppos)
6143 {
6144 	struct trace_array *tr = filp->private_data;
6145 	char buf[64];
6146 	int r;
6147 
6148 	r = tracer_tracing_is_on(tr);
6149 	r = sprintf(buf, "%d\n", r);
6150 
6151 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6152 }
6153 
6154 static ssize_t
6155 rb_simple_write(struct file *filp, const char __user *ubuf,
6156 		size_t cnt, loff_t *ppos)
6157 {
6158 	struct trace_array *tr = filp->private_data;
6159 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
6160 	unsigned long val;
6161 	int ret;
6162 
6163 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6164 	if (ret)
6165 		return ret;
6166 
6167 	if (buffer) {
6168 		mutex_lock(&trace_types_lock);
6169 		if (val) {
6170 			tracer_tracing_on(tr);
6171 			if (tr->current_trace->start)
6172 				tr->current_trace->start(tr);
6173 		} else {
6174 			tracer_tracing_off(tr);
6175 			if (tr->current_trace->stop)
6176 				tr->current_trace->stop(tr);
6177 		}
6178 		mutex_unlock(&trace_types_lock);
6179 	}
6180 
6181 	(*ppos)++;
6182 
6183 	return cnt;
6184 }
6185 
6186 static const struct file_operations rb_simple_fops = {
6187 	.open		= tracing_open_generic_tr,
6188 	.read		= rb_simple_read,
6189 	.write		= rb_simple_write,
6190 	.release	= tracing_release_generic_tr,
6191 	.llseek		= default_llseek,
6192 };
6193 
6194 struct dentry *trace_instance_dir;
6195 
6196 static void
6197 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6198 
6199 static int
6200 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6201 {
6202 	enum ring_buffer_flags rb_flags;
6203 
6204 	rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6205 
6206 	buf->tr = tr;
6207 
6208 	buf->buffer = ring_buffer_alloc(size, rb_flags);
6209 	if (!buf->buffer)
6210 		return -ENOMEM;
6211 
6212 	buf->data = alloc_percpu(struct trace_array_cpu);
6213 	if (!buf->data) {
6214 		ring_buffer_free(buf->buffer);
6215 		return -ENOMEM;
6216 	}
6217 
6218 	/* Allocate the first page for all buffers */
6219 	set_buffer_entries(&tr->trace_buffer,
6220 			   ring_buffer_size(tr->trace_buffer.buffer, 0));
6221 
6222 	return 0;
6223 }
6224 
6225 static int allocate_trace_buffers(struct trace_array *tr, int size)
6226 {
6227 	int ret;
6228 
6229 	ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6230 	if (ret)
6231 		return ret;
6232 
6233 #ifdef CONFIG_TRACER_MAX_TRACE
6234 	ret = allocate_trace_buffer(tr, &tr->max_buffer,
6235 				    allocate_snapshot ? size : 1);
6236 	if (WARN_ON(ret)) {
6237 		ring_buffer_free(tr->trace_buffer.buffer);
6238 		free_percpu(tr->trace_buffer.data);
6239 		return -ENOMEM;
6240 	}
6241 	tr->allocated_snapshot = allocate_snapshot;
6242 
6243 	/*
6244 	 * Only the top level trace array gets its snapshot allocated
6245 	 * from the kernel command line.
6246 	 */
6247 	allocate_snapshot = false;
6248 #endif
6249 	return 0;
6250 }
6251 
6252 static void free_trace_buffer(struct trace_buffer *buf)
6253 {
6254 	if (buf->buffer) {
6255 		ring_buffer_free(buf->buffer);
6256 		buf->buffer = NULL;
6257 		free_percpu(buf->data);
6258 		buf->data = NULL;
6259 	}
6260 }
6261 
6262 static void free_trace_buffers(struct trace_array *tr)
6263 {
6264 	if (!tr)
6265 		return;
6266 
6267 	free_trace_buffer(&tr->trace_buffer);
6268 
6269 #ifdef CONFIG_TRACER_MAX_TRACE
6270 	free_trace_buffer(&tr->max_buffer);
6271 #endif
6272 }
6273 
6274 static int new_instance_create(const char *name)
6275 {
6276 	struct trace_array *tr;
6277 	int ret;
6278 
6279 	mutex_lock(&trace_types_lock);
6280 
6281 	ret = -EEXIST;
6282 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6283 		if (tr->name && strcmp(tr->name, name) == 0)
6284 			goto out_unlock;
6285 	}
6286 
6287 	ret = -ENOMEM;
6288 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6289 	if (!tr)
6290 		goto out_unlock;
6291 
6292 	tr->name = kstrdup(name, GFP_KERNEL);
6293 	if (!tr->name)
6294 		goto out_free_tr;
6295 
6296 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6297 		goto out_free_tr;
6298 
6299 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6300 
6301 	raw_spin_lock_init(&tr->start_lock);
6302 
6303 	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6304 
6305 	tr->current_trace = &nop_trace;
6306 
6307 	INIT_LIST_HEAD(&tr->systems);
6308 	INIT_LIST_HEAD(&tr->events);
6309 
6310 	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6311 		goto out_free_tr;
6312 
6313 	tr->dir = debugfs_create_dir(name, trace_instance_dir);
6314 	if (!tr->dir)
6315 		goto out_free_tr;
6316 
6317 	ret = event_trace_add_tracer(tr->dir, tr);
6318 	if (ret) {
6319 		debugfs_remove_recursive(tr->dir);
6320 		goto out_free_tr;
6321 	}
6322 
6323 	init_tracer_debugfs(tr, tr->dir);
6324 
6325 	list_add(&tr->list, &ftrace_trace_arrays);
6326 
6327 	mutex_unlock(&trace_types_lock);
6328 
6329 	return 0;
6330 
6331  out_free_tr:
6332 	free_trace_buffers(tr);
6333 	free_cpumask_var(tr->tracing_cpumask);
6334 	kfree(tr->name);
6335 	kfree(tr);
6336 
6337  out_unlock:
6338 	mutex_unlock(&trace_types_lock);
6339 
6340 	return ret;
6341 
6342 }
6343 
6344 static int instance_delete(const char *name)
6345 {
6346 	struct trace_array *tr;
6347 	int found = 0;
6348 	int ret;
6349 
6350 	mutex_lock(&trace_types_lock);
6351 
6352 	ret = -ENODEV;
6353 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6354 		if (tr->name && strcmp(tr->name, name) == 0) {
6355 			found = 1;
6356 			break;
6357 		}
6358 	}
6359 	if (!found)
6360 		goto out_unlock;
6361 
6362 	ret = -EBUSY;
6363 	if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6364 		goto out_unlock;
6365 
6366 	list_del(&tr->list);
6367 
6368 	tracing_set_nop(tr);
6369 	event_trace_del_tracer(tr);
6370 	ftrace_destroy_function_files(tr);
6371 	debugfs_remove_recursive(tr->dir);
6372 	free_trace_buffers(tr);
6373 
6374 	kfree(tr->name);
6375 	kfree(tr);
6376 
6377 	ret = 0;
6378 
6379  out_unlock:
6380 	mutex_unlock(&trace_types_lock);
6381 
6382 	return ret;
6383 }
6384 
6385 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6386 {
6387 	struct dentry *parent;
6388 	int ret;
6389 
6390 	/* Paranoid: Make sure the parent is the "instances" directory */
6391 	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
6392 	if (WARN_ON_ONCE(parent != trace_instance_dir))
6393 		return -ENOENT;
6394 
6395 	/*
6396 	 * The inode mutex is locked, but debugfs_create_dir() will also
6397 	 * take the mutex. As the instances directory can not be destroyed
6398 	 * or changed in any other way, it is safe to unlock it, and
6399 	 * let the dentry try. If two users try to make the same dir at
6400 	 * the same time, then the new_instance_create() will determine the
6401 	 * winner.
6402 	 */
6403 	mutex_unlock(&inode->i_mutex);
6404 
6405 	ret = new_instance_create(dentry->d_iname);
6406 
6407 	mutex_lock(&inode->i_mutex);
6408 
6409 	return ret;
6410 }
6411 
6412 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6413 {
6414 	struct dentry *parent;
6415 	int ret;
6416 
6417 	/* Paranoid: Make sure the parent is the "instances" directory */
6418 	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
6419 	if (WARN_ON_ONCE(parent != trace_instance_dir))
6420 		return -ENOENT;
6421 
6422 	/* The caller did a dget() on dentry */
6423 	mutex_unlock(&dentry->d_inode->i_mutex);
6424 
6425 	/*
6426 	 * The inode mutex is locked, but debugfs_create_dir() will also
6427 	 * take the mutex. As the instances directory can not be destroyed
6428 	 * or changed in any other way, it is safe to unlock it, and
6429 	 * let the dentry try. If two users try to make the same dir at
6430 	 * the same time, then the instance_delete() will determine the
6431 	 * winner.
6432 	 */
6433 	mutex_unlock(&inode->i_mutex);
6434 
6435 	ret = instance_delete(dentry->d_iname);
6436 
6437 	mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6438 	mutex_lock(&dentry->d_inode->i_mutex);
6439 
6440 	return ret;
6441 }
6442 
6443 static const struct inode_operations instance_dir_inode_operations = {
6444 	.lookup		= simple_lookup,
6445 	.mkdir		= instance_mkdir,
6446 	.rmdir		= instance_rmdir,
6447 };
6448 
6449 static __init void create_trace_instances(struct dentry *d_tracer)
6450 {
6451 	trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6452 	if (WARN_ON(!trace_instance_dir))
6453 		return;
6454 
6455 	/* Hijack the dir inode operations, to allow mkdir */
6456 	trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6457 }
6458 
6459 static void
6460 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6461 {
6462 	int cpu;
6463 
6464 	trace_create_file("available_tracers", 0444, d_tracer,
6465 			tr, &show_traces_fops);
6466 
6467 	trace_create_file("current_tracer", 0644, d_tracer,
6468 			tr, &set_tracer_fops);
6469 
6470 	trace_create_file("tracing_cpumask", 0644, d_tracer,
6471 			  tr, &tracing_cpumask_fops);
6472 
6473 	trace_create_file("trace_options", 0644, d_tracer,
6474 			  tr, &tracing_iter_fops);
6475 
6476 	trace_create_file("trace", 0644, d_tracer,
6477 			  tr, &tracing_fops);
6478 
6479 	trace_create_file("trace_pipe", 0444, d_tracer,
6480 			  tr, &tracing_pipe_fops);
6481 
6482 	trace_create_file("buffer_size_kb", 0644, d_tracer,
6483 			  tr, &tracing_entries_fops);
6484 
6485 	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6486 			  tr, &tracing_total_entries_fops);
6487 
6488 	trace_create_file("free_buffer", 0200, d_tracer,
6489 			  tr, &tracing_free_buffer_fops);
6490 
6491 	trace_create_file("trace_marker", 0220, d_tracer,
6492 			  tr, &tracing_mark_fops);
6493 
6494 	trace_create_file("trace_clock", 0644, d_tracer, tr,
6495 			  &trace_clock_fops);
6496 
6497 	trace_create_file("tracing_on", 0644, d_tracer,
6498 			  tr, &rb_simple_fops);
6499 
6500 #ifdef CONFIG_TRACER_MAX_TRACE
6501 	trace_create_file("tracing_max_latency", 0644, d_tracer,
6502 			&tr->max_latency, &tracing_max_lat_fops);
6503 #endif
6504 
6505 	if (ftrace_create_function_files(tr, d_tracer))
6506 		WARN(1, "Could not allocate function filter files");
6507 
6508 #ifdef CONFIG_TRACER_SNAPSHOT
6509 	trace_create_file("snapshot", 0644, d_tracer,
6510 			  tr, &snapshot_fops);
6511 #endif
6512 
6513 	for_each_tracing_cpu(cpu)
6514 		tracing_init_debugfs_percpu(tr, cpu);
6515 
6516 }
6517 
6518 /**
6519  * tracing_init_dentry - initialize top level trace array
6520  *
6521  * This is called when creating files or directories in the tracing
6522  * directory. It is called via fs_initcall() by any of the boot up code
6523  * and expects to return the dentry of the top level tracing directory.
6524  */
6525 struct dentry *tracing_init_dentry(void)
6526 {
6527 	struct trace_array *tr = &global_trace;
6528 
6529 	if (tr->dir)
6530 		return tr->dir;
6531 
6532 	if (WARN_ON(!debugfs_initialized()))
6533 		return ERR_PTR(-ENODEV);
6534 
6535 	tr->dir = debugfs_create_dir("tracing", NULL);
6536 
6537 	if (!tr->dir) {
6538 		pr_warn_once("Could not create debugfs directory 'tracing'\n");
6539 		return ERR_PTR(-ENOMEM);
6540 	}
6541 
6542 	return tr->dir;
6543 }
6544 
6545 static __init int tracer_init_debugfs(void)
6546 {
6547 	struct dentry *d_tracer;
6548 
6549 	trace_access_lock_init();
6550 
6551 	d_tracer = tracing_init_dentry();
6552 	if (IS_ERR(d_tracer))
6553 		return 0;
6554 
6555 	init_tracer_debugfs(&global_trace, d_tracer);
6556 
6557 	trace_create_file("tracing_thresh", 0644, d_tracer,
6558 			&global_trace, &tracing_thresh_fops);
6559 
6560 	trace_create_file("README", 0444, d_tracer,
6561 			NULL, &tracing_readme_fops);
6562 
6563 	trace_create_file("saved_cmdlines", 0444, d_tracer,
6564 			NULL, &tracing_saved_cmdlines_fops);
6565 
6566 	trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6567 			  NULL, &tracing_saved_cmdlines_size_fops);
6568 
6569 #ifdef CONFIG_DYNAMIC_FTRACE
6570 	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6571 			&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6572 #endif
6573 
6574 	create_trace_instances(d_tracer);
6575 
6576 	create_trace_options_dir(&global_trace);
6577 
6578 	return 0;
6579 }
6580 
6581 static int trace_panic_handler(struct notifier_block *this,
6582 			       unsigned long event, void *unused)
6583 {
6584 	if (ftrace_dump_on_oops)
6585 		ftrace_dump(ftrace_dump_on_oops);
6586 	return NOTIFY_OK;
6587 }
6588 
6589 static struct notifier_block trace_panic_notifier = {
6590 	.notifier_call  = trace_panic_handler,
6591 	.next           = NULL,
6592 	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
6593 };
6594 
6595 static int trace_die_handler(struct notifier_block *self,
6596 			     unsigned long val,
6597 			     void *data)
6598 {
6599 	switch (val) {
6600 	case DIE_OOPS:
6601 		if (ftrace_dump_on_oops)
6602 			ftrace_dump(ftrace_dump_on_oops);
6603 		break;
6604 	default:
6605 		break;
6606 	}
6607 	return NOTIFY_OK;
6608 }
6609 
6610 static struct notifier_block trace_die_notifier = {
6611 	.notifier_call = trace_die_handler,
6612 	.priority = 200
6613 };
6614 
6615 /*
6616  * printk is set to max of 1024, we really don't need it that big.
6617  * Nothing should be printing 1000 characters anyway.
6618  */
6619 #define TRACE_MAX_PRINT		1000
6620 
6621 /*
6622  * Define here KERN_TRACE so that we have one place to modify
6623  * it if we decide to change what log level the ftrace dump
6624  * should be at.
6625  */
6626 #define KERN_TRACE		KERN_EMERG
6627 
6628 void
6629 trace_printk_seq(struct trace_seq *s)
6630 {
6631 	/* Probably should print a warning here. */
6632 	if (s->seq.len >= TRACE_MAX_PRINT)
6633 		s->seq.len = TRACE_MAX_PRINT;
6634 
6635 	/*
6636 	 * More paranoid code. Although the buffer size is set to
6637 	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6638 	 * an extra layer of protection.
6639 	 */
6640 	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6641 		s->seq.len = s->seq.size - 1;
6642 
6643 	/* should be zero ended, but we are paranoid. */
6644 	s->buffer[s->seq.len] = 0;
6645 
6646 	printk(KERN_TRACE "%s", s->buffer);
6647 
6648 	trace_seq_init(s);
6649 }
6650 
6651 void trace_init_global_iter(struct trace_iterator *iter)
6652 {
6653 	iter->tr = &global_trace;
6654 	iter->trace = iter->tr->current_trace;
6655 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
6656 	iter->trace_buffer = &global_trace.trace_buffer;
6657 
6658 	if (iter->trace && iter->trace->open)
6659 		iter->trace->open(iter);
6660 
6661 	/* Annotate start of buffers if we had overruns */
6662 	if (ring_buffer_overruns(iter->trace_buffer->buffer))
6663 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
6664 
6665 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6666 	if (trace_clocks[iter->tr->clock_id].in_ns)
6667 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6668 }
6669 
6670 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6671 {
6672 	/* use static because iter can be a bit big for the stack */
6673 	static struct trace_iterator iter;
6674 	static atomic_t dump_running;
6675 	unsigned int old_userobj;
6676 	unsigned long flags;
6677 	int cnt = 0, cpu;
6678 
6679 	/* Only allow one dump user at a time. */
6680 	if (atomic_inc_return(&dump_running) != 1) {
6681 		atomic_dec(&dump_running);
6682 		return;
6683 	}
6684 
6685 	/*
6686 	 * Always turn off tracing when we dump.
6687 	 * We don't need to show trace output of what happens
6688 	 * between multiple crashes.
6689 	 *
6690 	 * If the user does a sysrq-z, then they can re-enable
6691 	 * tracing with echo 1 > tracing_on.
6692 	 */
6693 	tracing_off();
6694 
6695 	local_irq_save(flags);
6696 
6697 	/* Simulate the iterator */
6698 	trace_init_global_iter(&iter);
6699 
6700 	for_each_tracing_cpu(cpu) {
6701 		atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6702 	}
6703 
6704 	old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6705 
6706 	/* don't look at user memory in panic mode */
6707 	trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6708 
6709 	switch (oops_dump_mode) {
6710 	case DUMP_ALL:
6711 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
6712 		break;
6713 	case DUMP_ORIG:
6714 		iter.cpu_file = raw_smp_processor_id();
6715 		break;
6716 	case DUMP_NONE:
6717 		goto out_enable;
6718 	default:
6719 		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6720 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
6721 	}
6722 
6723 	printk(KERN_TRACE "Dumping ftrace buffer:\n");
6724 
6725 	/* Did function tracer already get disabled? */
6726 	if (ftrace_is_dead()) {
6727 		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6728 		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
6729 	}
6730 
6731 	/*
6732 	 * We need to stop all tracing on all CPUS to read the
6733 	 * the next buffer. This is a bit expensive, but is
6734 	 * not done often. We fill all what we can read,
6735 	 * and then release the locks again.
6736 	 */
6737 
6738 	while (!trace_empty(&iter)) {
6739 
6740 		if (!cnt)
6741 			printk(KERN_TRACE "---------------------------------\n");
6742 
6743 		cnt++;
6744 
6745 		/* reset all but tr, trace, and overruns */
6746 		memset(&iter.seq, 0,
6747 		       sizeof(struct trace_iterator) -
6748 		       offsetof(struct trace_iterator, seq));
6749 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
6750 		iter.pos = -1;
6751 
6752 		if (trace_find_next_entry_inc(&iter) != NULL) {
6753 			int ret;
6754 
6755 			ret = print_trace_line(&iter);
6756 			if (ret != TRACE_TYPE_NO_CONSUME)
6757 				trace_consume(&iter);
6758 		}
6759 		touch_nmi_watchdog();
6760 
6761 		trace_printk_seq(&iter.seq);
6762 	}
6763 
6764 	if (!cnt)
6765 		printk(KERN_TRACE "   (ftrace buffer empty)\n");
6766 	else
6767 		printk(KERN_TRACE "---------------------------------\n");
6768 
6769  out_enable:
6770 	trace_flags |= old_userobj;
6771 
6772 	for_each_tracing_cpu(cpu) {
6773 		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6774 	}
6775  	atomic_dec(&dump_running);
6776 	local_irq_restore(flags);
6777 }
6778 EXPORT_SYMBOL_GPL(ftrace_dump);
6779 
6780 __init static int tracer_alloc_buffers(void)
6781 {
6782 	int ring_buf_size;
6783 	int ret = -ENOMEM;
6784 
6785 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6786 		goto out;
6787 
6788 	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6789 		goto out_free_buffer_mask;
6790 
6791 	/* Only allocate trace_printk buffers if a trace_printk exists */
6792 	if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6793 		/* Must be called before global_trace.buffer is allocated */
6794 		trace_printk_init_buffers();
6795 
6796 	/* To save memory, keep the ring buffer size to its minimum */
6797 	if (ring_buffer_expanded)
6798 		ring_buf_size = trace_buf_size;
6799 	else
6800 		ring_buf_size = 1;
6801 
6802 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6803 	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6804 
6805 	raw_spin_lock_init(&global_trace.start_lock);
6806 
6807 	/* Used for event triggers */
6808 	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6809 	if (!temp_buffer)
6810 		goto out_free_cpumask;
6811 
6812 	if (trace_create_savedcmd() < 0)
6813 		goto out_free_temp_buffer;
6814 
6815 	/* TODO: make the number of buffers hot pluggable with CPUS */
6816 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6817 		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6818 		WARN_ON(1);
6819 		goto out_free_savedcmd;
6820 	}
6821 
6822 	if (global_trace.buffer_disabled)
6823 		tracing_off();
6824 
6825 	if (trace_boot_clock) {
6826 		ret = tracing_set_clock(&global_trace, trace_boot_clock);
6827 		if (ret < 0)
6828 			pr_warning("Trace clock %s not defined, going back to default\n",
6829 				   trace_boot_clock);
6830 	}
6831 
6832 	/*
6833 	 * register_tracer() might reference current_trace, so it
6834 	 * needs to be set before we register anything. This is
6835 	 * just a bootstrap of current_trace anyway.
6836 	 */
6837 	global_trace.current_trace = &nop_trace;
6838 
6839 	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6840 
6841 	ftrace_init_global_array_ops(&global_trace);
6842 
6843 	register_tracer(&nop_trace);
6844 
6845 	/* All seems OK, enable tracing */
6846 	tracing_disabled = 0;
6847 
6848 	atomic_notifier_chain_register(&panic_notifier_list,
6849 				       &trace_panic_notifier);
6850 
6851 	register_die_notifier(&trace_die_notifier);
6852 
6853 	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6854 
6855 	INIT_LIST_HEAD(&global_trace.systems);
6856 	INIT_LIST_HEAD(&global_trace.events);
6857 	list_add(&global_trace.list, &ftrace_trace_arrays);
6858 
6859 	while (trace_boot_options) {
6860 		char *option;
6861 
6862 		option = strsep(&trace_boot_options, ",");
6863 		trace_set_options(&global_trace, option);
6864 	}
6865 
6866 	register_snapshot_cmd();
6867 
6868 	return 0;
6869 
6870 out_free_savedcmd:
6871 	free_saved_cmdlines_buffer(savedcmd);
6872 out_free_temp_buffer:
6873 	ring_buffer_free(temp_buffer);
6874 out_free_cpumask:
6875 	free_cpumask_var(global_trace.tracing_cpumask);
6876 out_free_buffer_mask:
6877 	free_cpumask_var(tracing_buffer_mask);
6878 out:
6879 	return ret;
6880 }
6881 
6882 void __init trace_init(void)
6883 {
6884 	if (tracepoint_printk) {
6885 		tracepoint_print_iter =
6886 			kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
6887 		if (WARN_ON(!tracepoint_print_iter))
6888 			tracepoint_printk = 0;
6889 	}
6890 	tracer_alloc_buffers();
6891 	trace_event_init();
6892 }
6893 
6894 __init static int clear_boot_tracer(void)
6895 {
6896 	/*
6897 	 * The default tracer at boot buffer is an init section.
6898 	 * This function is called in lateinit. If we did not
6899 	 * find the boot tracer, then clear it out, to prevent
6900 	 * later registration from accessing the buffer that is
6901 	 * about to be freed.
6902 	 */
6903 	if (!default_bootup_tracer)
6904 		return 0;
6905 
6906 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6907 	       default_bootup_tracer);
6908 	default_bootup_tracer = NULL;
6909 
6910 	return 0;
6911 }
6912 
6913 fs_initcall(tracer_init_debugfs);
6914 late_initcall(clear_boot_tracer);
6915