xref: /linux/kernel/trace/trace_output.c (revision 2cddfc2e8fc78c13b0f5286ea5dd48cdf527ad41)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_output.c
4  *
5  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6  *
7  */
8 #include "trace.h"
9 #include <linux/module.h>
10 #include <linux/mutex.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/sched/clock.h>
14 #include <linux/sched/mm.h>
15 #include <linux/idr.h>
16 #include <linux/btf.h>
17 #include <linux/bpf.h>
18 #include <linux/hashtable.h>
19 
20 #include "trace_output.h"
21 #include "trace_btf.h"
22 
23 /* 2^7 = 128 */
24 #define EVENT_HASH_BITS 7
25 
26 DECLARE_RWSEM(trace_event_sem);
27 
28 static DEFINE_HASHTABLE(event_hash, EVENT_HASH_BITS);
29 
30 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
31 {
32 	struct trace_seq *s = &iter->seq;
33 	struct trace_entry *entry = iter->ent;
34 	struct bputs_entry *field;
35 
36 	trace_assign_type(field, entry);
37 
38 	trace_seq_puts(s, field->str);
39 
40 	return trace_handle_return(s);
41 }
42 
43 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
44 {
45 	struct trace_seq *s = &iter->seq;
46 	struct trace_entry *entry = iter->ent;
47 	struct bprint_entry *field;
48 
49 	trace_assign_type(field, entry);
50 
51 	trace_seq_bprintf(s, field->fmt, field->buf);
52 
53 	return trace_handle_return(s);
54 }
55 
56 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
57 {
58 	struct trace_seq *s = &iter->seq;
59 	struct trace_entry *entry = iter->ent;
60 	struct print_entry *field;
61 
62 	trace_assign_type(field, entry);
63 
64 	trace_seq_puts(s, field->buf);
65 
66 	return trace_handle_return(s);
67 }
68 
69 const char *
70 trace_print_flags_seq(struct trace_seq *p, const char *delim,
71 		      unsigned long flags,
72 		      const struct trace_print_flags *flag_array)
73 {
74 	unsigned long mask;
75 	const char *str;
76 	const char *ret = trace_seq_buffer_ptr(p);
77 	int i, first = 1;
78 
79 	for (i = 0;  flag_array[i].name && flags; i++) {
80 
81 		mask = flag_array[i].mask;
82 		if ((flags & mask) != mask)
83 			continue;
84 
85 		str = flag_array[i].name;
86 		flags &= ~mask;
87 		if (!first && delim)
88 			trace_seq_puts(p, delim);
89 		else
90 			first = 0;
91 		trace_seq_puts(p, str);
92 	}
93 
94 	/* check for left over flags */
95 	if (flags) {
96 		if (!first && delim)
97 			trace_seq_puts(p, delim);
98 		trace_seq_printf(p, "0x%lx", flags);
99 	}
100 
101 	trace_seq_putc(p, 0);
102 
103 	return ret;
104 }
105 EXPORT_SYMBOL(trace_print_flags_seq);
106 
107 const char *
108 trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
109 			const struct trace_print_flags *symbol_array)
110 {
111 	int i;
112 	const char *ret = trace_seq_buffer_ptr(p);
113 
114 	for (i = 0;  symbol_array[i].name; i++) {
115 
116 		if (val != symbol_array[i].mask)
117 			continue;
118 
119 		trace_seq_puts(p, symbol_array[i].name);
120 		break;
121 	}
122 
123 	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
124 		trace_seq_printf(p, "0x%lx", val);
125 
126 	trace_seq_putc(p, 0);
127 
128 	return ret;
129 }
130 EXPORT_SYMBOL(trace_print_symbols_seq);
131 
132 #if BITS_PER_LONG == 32
133 const char *
134 trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
135 		      unsigned long long flags,
136 		      const struct trace_print_flags_u64 *flag_array)
137 {
138 	unsigned long long mask;
139 	const char *str;
140 	const char *ret = trace_seq_buffer_ptr(p);
141 	int i, first = 1;
142 
143 	for (i = 0;  flag_array[i].name && flags; i++) {
144 
145 		mask = flag_array[i].mask;
146 		if ((flags & mask) != mask)
147 			continue;
148 
149 		str = flag_array[i].name;
150 		flags &= ~mask;
151 		if (!first && delim)
152 			trace_seq_puts(p, delim);
153 		else
154 			first = 0;
155 		trace_seq_puts(p, str);
156 	}
157 
158 	/* check for left over flags */
159 	if (flags) {
160 		if (!first && delim)
161 			trace_seq_puts(p, delim);
162 		trace_seq_printf(p, "0x%llx", flags);
163 	}
164 
165 	trace_seq_putc(p, 0);
166 
167 	return ret;
168 }
169 EXPORT_SYMBOL(trace_print_flags_seq_u64);
170 
171 const char *
172 trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
173 			 const struct trace_print_flags_u64 *symbol_array)
174 {
175 	int i;
176 	const char *ret = trace_seq_buffer_ptr(p);
177 
178 	for (i = 0;  symbol_array[i].name; i++) {
179 
180 		if (val != symbol_array[i].mask)
181 			continue;
182 
183 		trace_seq_puts(p, symbol_array[i].name);
184 		break;
185 	}
186 
187 	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
188 		trace_seq_printf(p, "0x%llx", val);
189 
190 	trace_seq_putc(p, 0);
191 
192 	return ret;
193 }
194 EXPORT_SYMBOL(trace_print_symbols_seq_u64);
195 #endif
196 
197 /**
198  * trace_print_bitmask_seq - print a bitmask to a sequence buffer
199  * @iter: The trace iterator for the current event instance
200  * @bitmask_ptr: The pointer to the bitmask data
201  * @bitmask_size: The size of the bitmask in bytes
202  *
203  * Prints a bitmask into a sequence buffer as either a hex string or a
204  * human-readable range list, depending on the instance's "bitmask-list"
205  * trace option. The bitmask is formatted into the iterator's temporary
206  * scratchpad rather than the primary sequence buffer. This avoids
207  * duplication and pointer-collision issues when the returned string is
208  * processed by a "%s" specifier in a TP_printk() macro.
209  *
210  * Returns a pointer to the formatted string within the temporary buffer.
211  */
212 const char *
213 trace_print_bitmask_seq(struct trace_iterator *iter, void *bitmask_ptr,
214 			unsigned int bitmask_size)
215 {
216 	struct trace_seq *p = &iter->tmp_seq;
217 	const struct trace_array *tr = iter->tr;
218 	const char *ret;
219 
220 	trace_seq_init(p);
221 	ret = trace_seq_buffer_ptr(p);
222 
223 	if (tr->trace_flags & TRACE_ITER(BITMASK_LIST))
224 		trace_seq_bitmask_list(p, bitmask_ptr, bitmask_size * 8);
225 	else
226 		trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
227 
228 	trace_seq_putc(p, 0);
229 
230 	return ret;
231 }
232 EXPORT_SYMBOL_GPL(trace_print_bitmask_seq);
233 
234 /**
235  * trace_print_hex_seq - print buffer as hex sequence
236  * @p: trace seq struct to write to
237  * @buf: The buffer to print
238  * @buf_len: Length of @buf in bytes
239  * @concatenate: Print @buf as single hex string or with spacing
240  *
241  * Prints the passed buffer as a hex sequence either as a whole,
242  * single hex string if @concatenate is true or with spacing after
243  * each byte in case @concatenate is false.
244  */
245 const char *
246 trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
247 		    bool concatenate)
248 {
249 	int i;
250 	const char *ret = trace_seq_buffer_ptr(p);
251 	const char *fmt = concatenate ? "%*phN" : "%*ph";
252 
253 	for (i = 0; i < buf_len; i += 16) {
254 		if (!concatenate && i != 0)
255 			trace_seq_putc(p, ' ');
256 		trace_seq_printf(p, fmt, min(buf_len - i, 16), &buf[i]);
257 	}
258 	trace_seq_putc(p, 0);
259 
260 	return ret;
261 }
262 EXPORT_SYMBOL(trace_print_hex_seq);
263 
264 const char *
265 trace_print_array_seq(struct trace_seq *p, const void *buf, int count,
266 		      size_t el_size)
267 {
268 	const char *ret = trace_seq_buffer_ptr(p);
269 	const char *prefix = "";
270 	void *ptr = (void *)buf;
271 	size_t buf_len = count * el_size;
272 
273 	trace_seq_putc(p, '{');
274 
275 	while (ptr < buf + buf_len) {
276 		switch (el_size) {
277 		case 1:
278 			trace_seq_printf(p, "%s0x%x", prefix,
279 					 *(u8 *)ptr);
280 			break;
281 		case 2:
282 			trace_seq_printf(p, "%s0x%x", prefix,
283 					 *(u16 *)ptr);
284 			break;
285 		case 4:
286 			trace_seq_printf(p, "%s0x%x", prefix,
287 					 *(u32 *)ptr);
288 			break;
289 		case 8:
290 			trace_seq_printf(p, "%s0x%llx", prefix,
291 					 *(u64 *)ptr);
292 			break;
293 		default:
294 			trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size,
295 					 *(u8 *)ptr);
296 			el_size = 1;
297 		}
298 		prefix = ",";
299 		ptr += el_size;
300 	}
301 
302 	trace_seq_putc(p, '}');
303 	trace_seq_putc(p, 0);
304 
305 	return ret;
306 }
307 EXPORT_SYMBOL(trace_print_array_seq);
308 
309 const char *
310 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
311 			 int prefix_type, int rowsize, int groupsize,
312 			 const void *buf, size_t len, bool ascii)
313 {
314 	const char *ret = trace_seq_buffer_ptr(p);
315 
316 	trace_seq_putc(p, '\n');
317 	trace_seq_hex_dump(p, prefix_str, prefix_type,
318 			   rowsize, groupsize, buf, len, ascii);
319 	trace_seq_putc(p, 0);
320 	return ret;
321 }
322 EXPORT_SYMBOL(trace_print_hex_dump_seq);
323 
324 int trace_raw_output_prep(struct trace_iterator *iter,
325 			  struct trace_event *trace_event)
326 {
327 	struct trace_event_call *event;
328 	struct trace_seq *s = &iter->seq;
329 	struct trace_seq *p = &iter->tmp_seq;
330 	struct trace_entry *entry;
331 
332 	event = container_of(trace_event, struct trace_event_call, event);
333 	entry = iter->ent;
334 
335 	if (entry->type != event->event.type) {
336 		WARN_ON_ONCE(1);
337 		return TRACE_TYPE_UNHANDLED;
338 	}
339 
340 	trace_seq_init(p);
341 	trace_seq_printf(s, "%s: ", trace_event_name(event));
342 
343 	return trace_handle_return(s);
344 }
345 EXPORT_SYMBOL(trace_raw_output_prep);
346 
347 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
348 {
349 	struct trace_seq *s = &iter->seq;
350 	va_list ap;
351 
352 	if (ignore_event(iter))
353 		return;
354 
355 	va_start(ap, fmt);
356 	trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
357 	va_end(ap);
358 }
359 EXPORT_SYMBOL(trace_event_printf);
360 
361 static __printf(3, 0)
362 int trace_output_raw(struct trace_iterator *iter, char *name,
363 		     char *fmt, va_list ap)
364 {
365 	struct trace_seq *s = &iter->seq;
366 
367 	trace_seq_printf(s, "%s: ", name);
368 	trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
369 
370 	return trace_handle_return(s);
371 }
372 
373 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
374 {
375 	va_list ap;
376 	int ret;
377 
378 	va_start(ap, fmt);
379 	ret = trace_output_raw(iter, name, fmt, ap);
380 	va_end(ap);
381 
382 	return ret;
383 }
384 EXPORT_SYMBOL_GPL(trace_output_call);
385 
386 static inline const char *kretprobed(const char *name, unsigned long addr)
387 {
388 	if (is_kretprobe_trampoline(addr))
389 		return "[unknown/kretprobe'd]";
390 	return name;
391 }
392 
393 void
394 trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
395 {
396 #ifdef CONFIG_KALLSYMS
397 	char str[KSYM_SYMBOL_LEN];
398 	const char *name;
399 
400 	if (offset)
401 		sprint_symbol(str, address);
402 	else
403 		kallsyms_lookup(address, NULL, NULL, NULL, str);
404 	name = kretprobed(str, address);
405 
406 	if (name && strlen(name)) {
407 		trace_seq_puts(s, name);
408 		return;
409 	}
410 #endif
411 	trace_seq_printf(s, "0x%08lx", address);
412 }
413 
414 #ifndef CONFIG_64BIT
415 # define IP_FMT "%08lx"
416 #else
417 # define IP_FMT "%016lx"
418 #endif
419 
420 static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
421 			     unsigned long ip, unsigned long sym_flags)
422 {
423 	struct file *file = NULL;
424 	unsigned long vmstart = 0;
425 	int ret = 1;
426 
427 	if (s->full)
428 		return 0;
429 
430 	if (mm) {
431 		const struct vm_area_struct *vma;
432 
433 		mmap_read_lock(mm);
434 		vma = find_vma(mm, ip);
435 		if (vma) {
436 			file = vma->vm_file;
437 			vmstart = vma->vm_start;
438 		}
439 		if (file) {
440 			ret = trace_seq_path(s, file_user_path(file));
441 			if (ret)
442 				trace_seq_printf(s, "[+0x%lx]",
443 						 ip - vmstart);
444 		}
445 		mmap_read_unlock(mm);
446 	}
447 	if (ret && ((sym_flags & TRACE_ITER(SYM_ADDR)) || !file))
448 		trace_seq_printf(s, " <" IP_FMT ">", ip);
449 	return !trace_seq_has_overflowed(s);
450 }
451 
452 int
453 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
454 {
455 	if (!ip) {
456 		trace_seq_putc(s, '0');
457 		goto out;
458 	}
459 
460 	trace_seq_print_sym(s, ip, sym_flags & TRACE_ITER(SYM_OFFSET));
461 
462 	if (sym_flags & TRACE_ITER(SYM_ADDR))
463 		trace_seq_printf(s, " <" IP_FMT ">", ip);
464 
465  out:
466 	return !trace_seq_has_overflowed(s);
467 }
468 
469 /**
470  * trace_print_lat_fmt - print the irq, preempt and lockdep fields
471  * @s: trace seq struct to write to
472  * @entry: The trace entry field from the ring buffer
473  *
474  * Prints the generic fields of irqs off, in hard or softirq, preempt
475  * count.
476  */
477 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
478 {
479 	char hardsoft_irq;
480 	char need_resched;
481 	char irqs_off;
482 	int hardirq;
483 	int softirq;
484 	int bh_off;
485 	int nmi;
486 
487 	nmi = entry->flags & TRACE_FLAG_NMI;
488 	hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
489 	softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
490 	bh_off = entry->flags & TRACE_FLAG_BH_OFF;
491 
492 	irqs_off =
493 		(entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
494 		(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
495 		bh_off ? 'b' :
496 		'.';
497 
498 	switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY |
499 				TRACE_FLAG_PREEMPT_RESCHED)) {
500 	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
501 		need_resched = 'B';
502 		break;
503 	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
504 		need_resched = 'N';
505 		break;
506 	case TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
507 		need_resched = 'L';
508 		break;
509 	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY:
510 		need_resched = 'b';
511 		break;
512 	case TRACE_FLAG_NEED_RESCHED:
513 		need_resched = 'n';
514 		break;
515 	case TRACE_FLAG_PREEMPT_RESCHED:
516 		need_resched = 'p';
517 		break;
518 	case TRACE_FLAG_NEED_RESCHED_LAZY:
519 		need_resched = 'l';
520 		break;
521 	default:
522 		need_resched = '.';
523 		break;
524 	}
525 
526 	hardsoft_irq =
527 		(nmi && hardirq)     ? 'Z' :
528 		nmi                  ? 'z' :
529 		(hardirq && softirq) ? 'H' :
530 		hardirq              ? 'h' :
531 		softirq              ? 's' :
532 		                       '.' ;
533 
534 	trace_seq_printf(s, "%c%c%c",
535 			 irqs_off, need_resched, hardsoft_irq);
536 
537 	if (entry->preempt_count & 0xf)
538 		trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
539 	else
540 		trace_seq_putc(s, '.');
541 
542 	if (entry->preempt_count & 0xf0)
543 		trace_seq_printf(s, "%x", entry->preempt_count >> 4);
544 	else
545 		trace_seq_putc(s, '.');
546 
547 	return !trace_seq_has_overflowed(s);
548 }
549 
550 static int
551 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
552 {
553 	char comm[TASK_COMM_LEN];
554 
555 	trace_find_cmdline(entry->pid, comm);
556 
557 	trace_seq_printf(s, "%8.8s-%-7d %3d",
558 			 comm, entry->pid, cpu);
559 
560 	return trace_print_lat_fmt(s, entry);
561 }
562 
563 #undef MARK
564 #define MARK(v, s) {.val = v, .sym = s}
565 /* trace overhead mark */
566 static const struct trace_mark {
567 	unsigned long long	val; /* unit: nsec */
568 	char			sym;
569 } mark[] = {
570 	MARK(1000000000ULL	, '$'), /* 1 sec */
571 	MARK(100000000ULL	, '@'), /* 100 msec */
572 	MARK(10000000ULL	, '*'), /* 10 msec */
573 	MARK(1000000ULL		, '#'), /* 1000 usecs */
574 	MARK(100000ULL		, '!'), /* 100 usecs */
575 	MARK(10000ULL		, '+'), /* 10 usecs */
576 };
577 #undef MARK
578 
579 char trace_find_mark(unsigned long long d)
580 {
581 	int i;
582 	int size = ARRAY_SIZE(mark);
583 
584 	for (i = 0; i < size; i++) {
585 		if (d > mark[i].val)
586 			break;
587 	}
588 
589 	return (i == size) ? ' ' : mark[i].sym;
590 }
591 
592 static int
593 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
594 {
595 	struct trace_array *tr = iter->tr;
596 	unsigned long verbose = tr->trace_flags & TRACE_ITER(VERBOSE);
597 	unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
598 	unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
599 	unsigned long long rel_ts = next_ts - iter->ts;
600 	struct trace_seq *s = &iter->seq;
601 
602 	if (in_ns) {
603 		abs_ts = ns2usecs(abs_ts);
604 		rel_ts = ns2usecs(rel_ts);
605 	}
606 
607 	if (verbose && in_ns) {
608 		unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
609 		unsigned long abs_msec = (unsigned long)abs_ts;
610 		unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
611 		unsigned long rel_msec = (unsigned long)rel_ts;
612 
613 		trace_seq_printf(
614 			s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
615 			ns2usecs(iter->ts),
616 			abs_msec, abs_usec,
617 			rel_msec, rel_usec);
618 
619 	} else if (verbose && !in_ns) {
620 		trace_seq_printf(
621 			s, "[%016llx] %lld (+%lld): ",
622 			iter->ts, abs_ts, rel_ts);
623 
624 	} else if (!verbose && in_ns) {
625 		trace_seq_printf(
626 			s, " %4lldus%c: ",
627 			abs_ts,
628 			trace_find_mark(rel_ts * NSEC_PER_USEC));
629 
630 	} else { /* !verbose && !in_ns */
631 		trace_seq_printf(s, " %4lld: ", abs_ts);
632 	}
633 
634 	return !trace_seq_has_overflowed(s);
635 }
636 
637 static void trace_print_time(struct trace_seq *s, struct trace_iterator *iter,
638 			     unsigned long long ts)
639 {
640 	unsigned long secs, usec_rem;
641 	unsigned long long t;
642 
643 	if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
644 		t = ns2usecs(ts);
645 		usec_rem = do_div(t, USEC_PER_SEC);
646 		secs = (unsigned long)t;
647 		trace_seq_printf(s, " %5lu.%06lu", secs, usec_rem);
648 	} else
649 		trace_seq_printf(s, " %12llu", ts);
650 }
651 
652 int trace_print_context(struct trace_iterator *iter)
653 {
654 	struct trace_array *tr = iter->tr;
655 	struct trace_seq *s = &iter->seq;
656 	struct trace_entry *entry = iter->ent;
657 	char comm[TASK_COMM_LEN];
658 
659 	trace_find_cmdline(entry->pid, comm);
660 
661 	trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid);
662 
663 	if (tr->trace_flags & TRACE_ITER(RECORD_TGID)) {
664 		unsigned int tgid = trace_find_tgid(entry->pid);
665 
666 		if (!tgid)
667 			trace_seq_printf(s, "(-------) ");
668 		else
669 			trace_seq_printf(s, "(%7d) ", tgid);
670 	}
671 
672 	trace_seq_printf(s, "[%03d] ", iter->cpu);
673 
674 	if (tr->trace_flags & TRACE_ITER(IRQ_INFO))
675 		trace_print_lat_fmt(s, entry);
676 
677 	trace_print_time(s, iter, iter->ts);
678 	trace_seq_puts(s, ": ");
679 
680 	return !trace_seq_has_overflowed(s);
681 }
682 
683 int trace_print_lat_context(struct trace_iterator *iter)
684 {
685 	struct trace_entry *entry, *next_entry;
686 	struct trace_array *tr = iter->tr;
687 	struct trace_seq *s = &iter->seq;
688 	unsigned long verbose = (tr->trace_flags & TRACE_ITER(VERBOSE));
689 	u64 next_ts;
690 
691 	next_entry = trace_find_next_entry(iter, NULL, &next_ts);
692 	if (!next_entry)
693 		next_ts = iter->ts;
694 
695 	/* trace_find_next_entry() may change iter->ent */
696 	entry = iter->ent;
697 
698 	if (verbose) {
699 		char comm[TASK_COMM_LEN];
700 
701 		trace_find_cmdline(entry->pid, comm);
702 
703 		trace_seq_printf(
704 			s, "%16s %7d %3d %d %08x %08lx ",
705 			comm, entry->pid, iter->cpu, entry->flags,
706 			entry->preempt_count & 0xf, iter->idx);
707 	} else {
708 		lat_print_generic(s, entry, iter->cpu);
709 	}
710 
711 	lat_print_timestamp(iter, next_ts);
712 
713 	return !trace_seq_has_overflowed(s);
714 }
715 
716 #ifdef CONFIG_FUNCTION_TRACE_ARGS
717 void print_function_args(struct trace_seq *s, unsigned long *args,
718 			 unsigned long func)
719 {
720 	const struct btf_param *param;
721 	const struct btf_type *t;
722 	const char *param_name;
723 	char name[KSYM_NAME_LEN];
724 	unsigned long arg;
725 	struct btf *btf;
726 	s32 tid, nr = 0;
727 	int a, p, x;
728 	u16 encode;
729 
730 	trace_seq_printf(s, "(");
731 
732 	if (!args)
733 		goto out;
734 	if (lookup_symbol_name(func, name))
735 		goto out;
736 
737 	/* TODO: Pass module name here too */
738 	t = btf_find_func_proto(name, &btf);
739 	if (IS_ERR_OR_NULL(t))
740 		goto out;
741 
742 	param = btf_get_func_param(t, &nr);
743 	if (!param)
744 		goto out_put;
745 
746 	for (a = 0, p = 0; p < nr; a++, p++) {
747 		if (p)
748 			trace_seq_puts(s, ", ");
749 
750 		/* This only prints what the arch allows (6 args by default) */
751 		if (a == FTRACE_REGS_MAX_ARGS) {
752 			trace_seq_puts(s, "...");
753 			break;
754 		}
755 
756 		arg = args[a];
757 
758 		param_name = btf_name_by_offset(btf, param[p].name_off);
759 		if (param_name)
760 			trace_seq_printf(s, "%s=", param_name);
761 		t = btf_type_skip_modifiers(btf, param[p].type, &tid);
762 
763 		switch (t ? BTF_INFO_KIND(t->info) : BTF_KIND_UNKN) {
764 		case BTF_KIND_UNKN:
765 			trace_seq_putc(s, '?');
766 			/* Still print unknown type values */
767 			fallthrough;
768 		case BTF_KIND_PTR:
769 			trace_seq_printf(s, "0x%lx", arg);
770 			break;
771 		case BTF_KIND_INT:
772 			encode = btf_int_encoding(t);
773 			/* Print unsigned ints as hex */
774 			if (encode & BTF_INT_SIGNED)
775 				trace_seq_printf(s, "%ld", arg);
776 			else
777 				trace_seq_printf(s, "0x%lx", arg);
778 			break;
779 		case BTF_KIND_ENUM:
780 			trace_seq_printf(s, "%ld", arg);
781 			break;
782 		default:
783 			/* This does not handle complex arguments */
784 			trace_seq_printf(s, "(%s)[0x%lx", btf_type_str(t), arg);
785 			for (x = sizeof(long); x < t->size; x += sizeof(long)) {
786 				trace_seq_putc(s, ':');
787 				if (++a == FTRACE_REGS_MAX_ARGS) {
788 					trace_seq_puts(s, "...]");
789 					goto out_put;
790 				}
791 				trace_seq_printf(s, "0x%lx", args[a]);
792 			}
793 			trace_seq_putc(s, ']');
794 			break;
795 		}
796 	}
797 out_put:
798 	btf_put(btf);
799 out:
800 	trace_seq_printf(s, ")");
801 }
802 #endif
803 
804 /**
805  * ftrace_find_event - find a registered event
806  * @type: the type of event to look for
807  *
808  * Returns an event of type @type otherwise NULL
809  * Called with trace_event_read_lock() held.
810  */
811 struct trace_event *ftrace_find_event(int type)
812 {
813 	struct trace_event *event;
814 
815 	hash_for_each_possible(event_hash, event, node, type) {
816 		if (event->type == type)
817 			return event;
818 	}
819 
820 	return NULL;
821 }
822 
823 static DEFINE_IDA(trace_event_ida);
824 
825 static void free_trace_event_type(int type)
826 {
827 	if (type >= __TRACE_LAST_TYPE)
828 		ida_free(&trace_event_ida, type);
829 }
830 
831 static int alloc_trace_event_type(void)
832 {
833 	int next;
834 
835 	/* Skip static defined type numbers */
836 	next = ida_alloc_range(&trace_event_ida, __TRACE_LAST_TYPE,
837 			       TRACE_EVENT_TYPE_MAX, GFP_KERNEL);
838 	if (next < 0)
839 		return 0;
840 	return next;
841 }
842 
843 void trace_event_read_lock(void)
844 {
845 	down_read(&trace_event_sem);
846 }
847 
848 void trace_event_read_unlock(void)
849 {
850 	up_read(&trace_event_sem);
851 }
852 
853 /**
854  * register_trace_event - register output for an event type
855  * @event: the event type to register
856  *
857  * Event types are stored in a hash and this hash is used to
858  * find a way to print an event. If the @event->type is set
859  * then it will use that type, otherwise it will assign a
860  * type to use.
861  *
862  * If you assign your own type, please make sure it is added
863  * to the trace_type enum in trace.h, to avoid collisions
864  * with the dynamic types.
865  *
866  * Returns the event type number or zero on error.
867  */
868 int register_trace_event(struct trace_event *event)
869 {
870 	int ret = 0;
871 
872 	down_write(&trace_event_sem);
873 
874 	if (WARN_ON(!event))
875 		goto out;
876 
877 	if (WARN_ON(!event->funcs))
878 		goto out;
879 
880 	if (!event->type) {
881 		event->type = alloc_trace_event_type();
882 		if (!event->type)
883 			goto out;
884 	} else if (WARN(event->type > __TRACE_LAST_TYPE,
885 			"Need to add type to trace.h")) {
886 		goto out;
887 	} else {
888 		/* Is this event already used */
889 		if (ftrace_find_event(event->type))
890 			goto out;
891 	}
892 
893 	if (event->funcs->trace == NULL)
894 		event->funcs->trace = trace_nop_print;
895 	if (event->funcs->raw == NULL)
896 		event->funcs->raw = trace_nop_print;
897 	if (event->funcs->hex == NULL)
898 		event->funcs->hex = trace_nop_print;
899 	if (event->funcs->binary == NULL)
900 		event->funcs->binary = trace_nop_print;
901 
902 	hash_add(event_hash, &event->node, event->type);
903 
904 	ret = event->type;
905  out:
906 	up_write(&trace_event_sem);
907 
908 	return ret;
909 }
910 EXPORT_SYMBOL_GPL(register_trace_event);
911 
912 /*
913  * Used by module code with the trace_event_sem held for write.
914  */
915 int __unregister_trace_event(struct trace_event *event)
916 {
917 	hash_del(&event->node);
918 	free_trace_event_type(event->type);
919 	return 0;
920 }
921 
922 /**
923  * unregister_trace_event - remove a no longer used event
924  * @event: the event to remove
925  */
926 int unregister_trace_event(struct trace_event *event)
927 {
928 	down_write(&trace_event_sem);
929 	__unregister_trace_event(event);
930 	up_write(&trace_event_sem);
931 
932 	return 0;
933 }
934 EXPORT_SYMBOL_GPL(unregister_trace_event);
935 
936 /*
937  * Standard events
938  */
939 
940 static void print_array(struct trace_iterator *iter, void *pos,
941 			struct ftrace_event_field *field)
942 {
943 	int offset;
944 	int len;
945 	int i;
946 
947 	offset = *(int *)pos & 0xffff;
948 	len = *(int *)pos >> 16;
949 
950 	if (field)
951 		offset += field->offset + sizeof(int);
952 
953 	if (offset + len > iter->ent_size) {
954 		trace_seq_puts(&iter->seq, "<OVERFLOW>");
955 		return;
956 	}
957 
958 	pos = (void *)iter->ent + offset;
959 
960 	for (i = 0; i < len; i++, pos++) {
961 		if (i)
962 			trace_seq_putc(&iter->seq, ',');
963 		trace_seq_printf(&iter->seq, "%02x", *(unsigned char *)pos);
964 	}
965 }
966 
967 static void print_fields(struct trace_iterator *iter, struct trace_event_call *call,
968 			 struct list_head *head)
969 {
970 	struct ftrace_event_field *field;
971 	struct trace_array *tr = iter->tr;
972 	unsigned long long laddr;
973 	unsigned long addr;
974 	int offset;
975 	int len;
976 	int ret;
977 	int i;
978 	void *pos;
979 	char *str;
980 
981 	list_for_each_entry_reverse(field, head, link) {
982 		trace_seq_printf(&iter->seq, " %s=", field->name);
983 		if (field->offset + field->size > iter->ent_size) {
984 			trace_seq_puts(&iter->seq, "<OVERFLOW>");
985 			continue;
986 		}
987 		pos = (void *)iter->ent + field->offset;
988 
989 		switch (field->filter_type) {
990 		case FILTER_COMM:
991 		case FILTER_STATIC_STRING:
992 			trace_seq_printf(&iter->seq, "%.*s", field->size, (char *)pos);
993 			break;
994 		case FILTER_RDYN_STRING:
995 		case FILTER_DYN_STRING:
996 			offset = *(int *)pos & 0xffff;
997 			len = *(int *)pos >> 16;
998 
999 			if (field->filter_type == FILTER_RDYN_STRING)
1000 				offset += field->offset + sizeof(int);
1001 
1002 			if (offset + len > iter->ent_size) {
1003 				trace_seq_puts(&iter->seq, "<OVERFLOW>");
1004 				break;
1005 			}
1006 			str = (char *)iter->ent + offset;
1007 			/* Check if there's any non printable strings */
1008 			for (i = 0; i < len; i++) {
1009 				if (str[i] && !(isascii(str[i]) && isprint(str[i])))
1010 					break;
1011 			}
1012 			if (i < len) {
1013 				for (i = 0; i < len; i++) {
1014 					if (isascii(str[i]) && isprint(str[i]))
1015 						trace_seq_putc(&iter->seq, str[i]);
1016 					else
1017 						trace_seq_putc(&iter->seq, '.');
1018 				}
1019 				trace_seq_puts(&iter->seq, " (");
1020 				for (i = 0; i < len; i++) {
1021 					if (i)
1022 						trace_seq_putc(&iter->seq, ':');
1023 					trace_seq_printf(&iter->seq, "%02x", str[i]);
1024 				}
1025 				trace_seq_putc(&iter->seq, ')');
1026 			} else {
1027 				trace_seq_printf(&iter->seq, "%.*s", len, str);
1028 			}
1029 			break;
1030 		case FILTER_PTR_STRING:
1031 			if (!iter->fmt_size)
1032 				trace_iter_expand_format(iter);
1033 			addr = trace_adjust_address(tr, *(unsigned long *)pos);
1034 			ret = strncpy_from_kernel_nofault(iter->fmt, (void *)addr,
1035 							  iter->fmt_size);
1036 			if (ret < 0)
1037 				trace_seq_printf(&iter->seq, "(0x%px)", pos);
1038 			else
1039 				trace_seq_printf(&iter->seq, "(0x%px:%s)",
1040 						 pos, iter->fmt);
1041 			break;
1042 		case FILTER_TRACE_FN:
1043 			addr = trace_adjust_address(tr, *(unsigned long *)pos);
1044 			trace_seq_printf(&iter->seq, "%pS", (void *)addr);
1045 			break;
1046 		case FILTER_CPU:
1047 		case FILTER_OTHER:
1048 			switch (field->size) {
1049 			case 1:
1050 				if (isprint(*(char *)pos)) {
1051 					trace_seq_printf(&iter->seq, "'%c'",
1052 						 *(unsigned char *)pos);
1053 				}
1054 				trace_seq_printf(&iter->seq, "(%d)",
1055 						 *(unsigned char *)pos);
1056 				break;
1057 			case 2:
1058 				trace_seq_printf(&iter->seq, "0x%x (%d)",
1059 						 *(unsigned short *)pos,
1060 						 *(unsigned short *)pos);
1061 				break;
1062 			case 4:
1063 				/* dynamic array info is 4 bytes */
1064 				if (strstr(field->type, "__data_loc")) {
1065 					print_array(iter, pos, NULL);
1066 					break;
1067 				}
1068 
1069 				if (strstr(field->type, "__rel_loc")) {
1070 					print_array(iter, pos, field);
1071 					break;
1072 				}
1073 
1074 				addr = *(unsigned int *)pos;
1075 
1076 				/* Some fields reference offset from _stext. */
1077 				if (!strcmp(field->name, "caller_offs") ||
1078 				    !strcmp(field->name, "parent_offs")) {
1079 					unsigned long ip;
1080 
1081 					ip = addr + (unsigned long)_stext;
1082 					ip = trace_adjust_address(tr, ip);
1083 					trace_seq_printf(&iter->seq, "%pS ", (void *)ip);
1084 				}
1085 
1086 				if (sizeof(long) == 4) {
1087 					addr = trace_adjust_address(tr, addr);
1088 					trace_seq_printf(&iter->seq, "%pS (%d)",
1089 							 (void *)addr, (int)addr);
1090 				} else {
1091 					trace_seq_printf(&iter->seq, "0x%x (%d)",
1092 							 (unsigned int)addr, (int)addr);
1093 				}
1094 				break;
1095 			case 8:
1096 				laddr = *(unsigned long long *)pos;
1097 				if (sizeof(long) == 8) {
1098 					laddr = trace_adjust_address(tr, (unsigned long)laddr);
1099 					trace_seq_printf(&iter->seq, "%pS (%lld)",
1100 							 (void *)(long)laddr, laddr);
1101 				} else {
1102 					trace_seq_printf(&iter->seq, "0x%llx (%lld)", laddr, laddr);
1103 				}
1104 				break;
1105 			default:
1106 				trace_seq_puts(&iter->seq, "<INVALID-SIZE>");
1107 				break;
1108 			}
1109 			break;
1110 		default:
1111 			trace_seq_puts(&iter->seq, "<INVALID-TYPE>");
1112 		}
1113 	}
1114 	trace_seq_putc(&iter->seq, '\n');
1115 }
1116 
1117 enum print_line_t print_event_fields(struct trace_iterator *iter,
1118 				     struct trace_event *event)
1119 {
1120 	struct trace_event_call *call;
1121 	struct list_head *head;
1122 
1123 	lockdep_assert_held_read(&trace_event_sem);
1124 
1125 	/* ftrace defined events have separate call structures */
1126 	if (event->type <= __TRACE_LAST_TYPE) {
1127 		bool found = false;
1128 
1129 		list_for_each_entry(call, &ftrace_events, list) {
1130 			if (call->event.type == event->type) {
1131 				found = true;
1132 				break;
1133 			}
1134 			/* No need to search all events */
1135 			if (call->event.type > __TRACE_LAST_TYPE)
1136 				break;
1137 		}
1138 		if (!found) {
1139 			trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type);
1140 			goto out;
1141 		}
1142 	} else {
1143 		call = container_of(event, struct trace_event_call, event);
1144 	}
1145 	head = trace_get_fields(call);
1146 
1147 	trace_seq_printf(&iter->seq, "%s:", trace_event_name(call));
1148 
1149 	if (head && !list_empty(head))
1150 		print_fields(iter, call, head);
1151 	else
1152 		trace_seq_puts(&iter->seq, "No fields found\n");
1153 
1154  out:
1155 	return trace_handle_return(&iter->seq);
1156 }
1157 
1158 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
1159 				  struct trace_event *event)
1160 {
1161 	trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type);
1162 
1163 	return trace_handle_return(&iter->seq);
1164 }
1165 
1166 static void print_fn_trace(struct trace_seq *s, unsigned long ip,
1167 			   unsigned long parent_ip, unsigned long *args,
1168 			   struct trace_array *tr, int flags)
1169 {
1170 	ip = trace_adjust_address(tr, ip);
1171 	parent_ip = trace_adjust_address(tr, parent_ip);
1172 
1173 	seq_print_ip_sym(s, ip, flags);
1174 	if (args)
1175 		print_function_args(s, args, ip);
1176 
1177 	if ((flags & TRACE_ITER(PRINT_PARENT)) && parent_ip) {
1178 		trace_seq_puts(s, " <-");
1179 		seq_print_ip_sym(s, parent_ip, flags);
1180 	}
1181 }
1182 
1183 /* TRACE_FN */
1184 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
1185 					struct trace_event *event)
1186 {
1187 	struct ftrace_entry *field;
1188 	struct trace_seq *s = &iter->seq;
1189 	unsigned long *args;
1190 	int args_size;
1191 
1192 	trace_assign_type(field, iter->ent);
1193 
1194 	args_size = iter->ent_size - offsetof(struct ftrace_entry, args);
1195 	if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long))
1196 		args = field->args;
1197 	else
1198 		args = NULL;
1199 
1200 	print_fn_trace(s, field->ip, field->parent_ip, args, iter->tr, flags);
1201 	trace_seq_putc(s, '\n');
1202 
1203 	return trace_handle_return(s);
1204 }
1205 
1206 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
1207 				      struct trace_event *event)
1208 {
1209 	struct ftrace_entry *field;
1210 
1211 	trace_assign_type(field, iter->ent);
1212 
1213 	trace_seq_printf(&iter->seq, "%lx %lx\n",
1214 			 field->ip,
1215 			 field->parent_ip);
1216 
1217 	return trace_handle_return(&iter->seq);
1218 }
1219 
1220 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
1221 				      struct trace_event *event)
1222 {
1223 	struct ftrace_entry *field;
1224 	struct trace_seq *s = &iter->seq;
1225 
1226 	trace_assign_type(field, iter->ent);
1227 
1228 	SEQ_PUT_HEX_FIELD(s, field->ip);
1229 	SEQ_PUT_HEX_FIELD(s, field->parent_ip);
1230 
1231 	return trace_handle_return(s);
1232 }
1233 
1234 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
1235 				      struct trace_event *event)
1236 {
1237 	struct ftrace_entry *field;
1238 	struct trace_seq *s = &iter->seq;
1239 
1240 	trace_assign_type(field, iter->ent);
1241 
1242 	SEQ_PUT_FIELD(s, field->ip);
1243 	SEQ_PUT_FIELD(s, field->parent_ip);
1244 
1245 	return trace_handle_return(s);
1246 }
1247 
1248 static struct trace_event_functions trace_fn_funcs = {
1249 	.trace		= trace_fn_trace,
1250 	.raw		= trace_fn_raw,
1251 	.hex		= trace_fn_hex,
1252 	.binary		= trace_fn_bin,
1253 };
1254 
1255 static struct trace_event trace_fn_event = {
1256 	.type		= TRACE_FN,
1257 	.funcs		= &trace_fn_funcs,
1258 };
1259 
1260 /* TRACE_CTX an TRACE_WAKE */
1261 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
1262 					     char *delim)
1263 {
1264 	struct ctx_switch_entry *field;
1265 	char comm[TASK_COMM_LEN];
1266 	int S, T;
1267 
1268 
1269 	trace_assign_type(field, iter->ent);
1270 
1271 	T = task_index_to_char(field->next_state);
1272 	S = task_index_to_char(field->prev_state);
1273 	trace_find_cmdline(field->next_pid, comm);
1274 	trace_seq_printf(&iter->seq,
1275 			 " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n",
1276 			 field->prev_pid,
1277 			 field->prev_prio,
1278 			 S, delim,
1279 			 field->next_cpu,
1280 			 field->next_pid,
1281 			 field->next_prio,
1282 			 T, comm);
1283 
1284 	return trace_handle_return(&iter->seq);
1285 }
1286 
1287 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
1288 					 struct trace_event *event)
1289 {
1290 	return trace_ctxwake_print(iter, "==>");
1291 }
1292 
1293 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
1294 					  int flags, struct trace_event *event)
1295 {
1296 	return trace_ctxwake_print(iter, "  +");
1297 }
1298 
1299 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
1300 {
1301 	struct ctx_switch_entry *field;
1302 	int T;
1303 
1304 	trace_assign_type(field, iter->ent);
1305 
1306 	if (!S)
1307 		S = task_index_to_char(field->prev_state);
1308 	T = task_index_to_char(field->next_state);
1309 	trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
1310 			 field->prev_pid,
1311 			 field->prev_prio,
1312 			 S,
1313 			 field->next_cpu,
1314 			 field->next_pid,
1315 			 field->next_prio,
1316 			 T);
1317 
1318 	return trace_handle_return(&iter->seq);
1319 }
1320 
1321 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
1322 				       struct trace_event *event)
1323 {
1324 	return trace_ctxwake_raw(iter, 0);
1325 }
1326 
1327 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
1328 					struct trace_event *event)
1329 {
1330 	return trace_ctxwake_raw(iter, '+');
1331 }
1332 
1333 
1334 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
1335 {
1336 	struct ctx_switch_entry *field;
1337 	struct trace_seq *s = &iter->seq;
1338 	int T;
1339 
1340 	trace_assign_type(field, iter->ent);
1341 
1342 	if (!S)
1343 		S = task_index_to_char(field->prev_state);
1344 	T = task_index_to_char(field->next_state);
1345 
1346 	SEQ_PUT_HEX_FIELD(s, field->prev_pid);
1347 	SEQ_PUT_HEX_FIELD(s, field->prev_prio);
1348 	SEQ_PUT_HEX_FIELD(s, S);
1349 	SEQ_PUT_HEX_FIELD(s, field->next_cpu);
1350 	SEQ_PUT_HEX_FIELD(s, field->next_pid);
1351 	SEQ_PUT_HEX_FIELD(s, field->next_prio);
1352 	SEQ_PUT_HEX_FIELD(s, T);
1353 
1354 	return trace_handle_return(s);
1355 }
1356 
1357 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
1358 				       struct trace_event *event)
1359 {
1360 	return trace_ctxwake_hex(iter, 0);
1361 }
1362 
1363 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
1364 					struct trace_event *event)
1365 {
1366 	return trace_ctxwake_hex(iter, '+');
1367 }
1368 
1369 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
1370 					   int flags, struct trace_event *event)
1371 {
1372 	struct ctx_switch_entry *field;
1373 	struct trace_seq *s = &iter->seq;
1374 
1375 	trace_assign_type(field, iter->ent);
1376 
1377 	SEQ_PUT_FIELD(s, field->prev_pid);
1378 	SEQ_PUT_FIELD(s, field->prev_prio);
1379 	SEQ_PUT_FIELD(s, field->prev_state);
1380 	SEQ_PUT_FIELD(s, field->next_cpu);
1381 	SEQ_PUT_FIELD(s, field->next_pid);
1382 	SEQ_PUT_FIELD(s, field->next_prio);
1383 	SEQ_PUT_FIELD(s, field->next_state);
1384 
1385 	return trace_handle_return(s);
1386 }
1387 
1388 static struct trace_event_functions trace_ctx_funcs = {
1389 	.trace		= trace_ctx_print,
1390 	.raw		= trace_ctx_raw,
1391 	.hex		= trace_ctx_hex,
1392 	.binary		= trace_ctxwake_bin,
1393 };
1394 
1395 static struct trace_event trace_ctx_event = {
1396 	.type		= TRACE_CTX,
1397 	.funcs		= &trace_ctx_funcs,
1398 };
1399 
1400 static struct trace_event_functions trace_wake_funcs = {
1401 	.trace		= trace_wake_print,
1402 	.raw		= trace_wake_raw,
1403 	.hex		= trace_wake_hex,
1404 	.binary		= trace_ctxwake_bin,
1405 };
1406 
1407 static struct trace_event trace_wake_event = {
1408 	.type		= TRACE_WAKE,
1409 	.funcs		= &trace_wake_funcs,
1410 };
1411 
1412 /* TRACE_STACK */
1413 
1414 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1415 					   int flags, struct trace_event *event)
1416 {
1417 	struct stack_entry *field;
1418 	struct trace_seq *s = &iter->seq;
1419 	unsigned long *p;
1420 	unsigned long *end;
1421 
1422 	trace_assign_type(field, iter->ent);
1423 	end = (unsigned long *)((long)iter->ent + iter->ent_size);
1424 
1425 	trace_seq_puts(s, "<stack trace>\n");
1426 
1427 	for (p = field->caller; p && p < end && *p != ULONG_MAX; p++) {
1428 
1429 		if (trace_seq_has_overflowed(s))
1430 			break;
1431 
1432 		trace_seq_puts(s, " => ");
1433 		if ((*p) == FTRACE_TRAMPOLINE_MARKER) {
1434 			trace_seq_puts(s, "[FTRACE TRAMPOLINE]\n");
1435 			continue;
1436 		}
1437 		seq_print_ip_sym(s, trace_adjust_address(iter->tr, *p), flags);
1438 		trace_seq_putc(s, '\n');
1439 	}
1440 
1441 	return trace_handle_return(s);
1442 }
1443 
1444 static struct trace_event_functions trace_stack_funcs = {
1445 	.trace		= trace_stack_print,
1446 };
1447 
1448 static struct trace_event trace_stack_event = {
1449 	.type		= TRACE_STACK,
1450 	.funcs		= &trace_stack_funcs,
1451 };
1452 
1453 /* TRACE_USER_STACK */
1454 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1455 						int flags, struct trace_event *event)
1456 {
1457 	struct trace_array *tr = iter->tr;
1458 	struct userstack_entry *field;
1459 	struct trace_seq *s = &iter->seq;
1460 	struct mm_struct *mm = NULL;
1461 	unsigned int i;
1462 
1463 	trace_assign_type(field, iter->ent);
1464 
1465 	trace_seq_puts(s, "<user stack trace>\n");
1466 
1467 	if (tr->trace_flags & TRACE_ITER(SYM_USEROBJ)) {
1468 		struct task_struct *task;
1469 		/*
1470 		 * we do the lookup on the thread group leader,
1471 		 * since individual threads might have already quit!
1472 		 */
1473 		rcu_read_lock();
1474 		task = find_task_by_vpid(field->tgid);
1475 		if (task)
1476 			mm = get_task_mm(task);
1477 		rcu_read_unlock();
1478 	}
1479 
1480 	for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1481 		unsigned long ip = field->caller[i];
1482 
1483 		if (!ip || trace_seq_has_overflowed(s))
1484 			break;
1485 
1486 		trace_seq_puts(s, " => ");
1487 		seq_print_user_ip(s, mm, ip, flags);
1488 		trace_seq_putc(s, '\n');
1489 	}
1490 
1491 	if (mm)
1492 		mmput(mm);
1493 
1494 	return trace_handle_return(s);
1495 }
1496 
1497 static struct trace_event_functions trace_user_stack_funcs = {
1498 	.trace		= trace_user_stack_print,
1499 };
1500 
1501 static struct trace_event trace_user_stack_event = {
1502 	.type		= TRACE_USER_STACK,
1503 	.funcs		= &trace_user_stack_funcs,
1504 };
1505 
1506 /* TRACE_HWLAT */
1507 static enum print_line_t
1508 trace_hwlat_print(struct trace_iterator *iter, int flags,
1509 		  struct trace_event *event)
1510 {
1511 	struct trace_entry *entry = iter->ent;
1512 	struct trace_seq *s = &iter->seq;
1513 	struct hwlat_entry *field;
1514 
1515 	trace_assign_type(field, entry);
1516 
1517 	trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ptSp count:%d",
1518 			 field->seqnum,
1519 			 field->duration,
1520 			 field->outer_duration,
1521 			 &field->timestamp,
1522 			 field->count);
1523 
1524 	if (field->nmi_count) {
1525 		/*
1526 		 * The generic sched_clock() is not NMI safe, thus
1527 		 * we only record the count and not the time.
1528 		 */
1529 		if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK))
1530 			trace_seq_printf(s, " nmi-total:%llu",
1531 					 field->nmi_total_ts);
1532 		trace_seq_printf(s, " nmi-count:%u",
1533 				 field->nmi_count);
1534 	}
1535 
1536 	trace_seq_putc(s, '\n');
1537 
1538 	return trace_handle_return(s);
1539 }
1540 
1541 static enum print_line_t
1542 trace_hwlat_raw(struct trace_iterator *iter, int flags,
1543 		struct trace_event *event)
1544 {
1545 	struct hwlat_entry *field;
1546 	struct trace_seq *s = &iter->seq;
1547 
1548 	trace_assign_type(field, iter->ent);
1549 
1550 	trace_seq_printf(s, "%llu %lld %lld %09ld %u\n",
1551 			 field->duration,
1552 			 field->outer_duration,
1553 			 (long long)field->timestamp.tv_sec,
1554 			 field->timestamp.tv_nsec,
1555 			 field->seqnum);
1556 
1557 	return trace_handle_return(s);
1558 }
1559 
1560 static struct trace_event_functions trace_hwlat_funcs = {
1561 	.trace		= trace_hwlat_print,
1562 	.raw		= trace_hwlat_raw,
1563 };
1564 
1565 static struct trace_event trace_hwlat_event = {
1566 	.type		= TRACE_HWLAT,
1567 	.funcs		= &trace_hwlat_funcs,
1568 };
1569 
1570 /* TRACE_OSNOISE */
1571 static enum print_line_t
1572 trace_osnoise_print(struct trace_iterator *iter, int flags,
1573 		    struct trace_event *event)
1574 {
1575 	struct trace_entry *entry = iter->ent;
1576 	struct trace_seq *s = &iter->seq;
1577 	struct osnoise_entry *field;
1578 	u64 ratio, ratio_dec;
1579 	u64 net_runtime;
1580 
1581 	trace_assign_type(field, entry);
1582 
1583 	/*
1584 	 * compute the available % of cpu time.
1585 	 */
1586 	net_runtime = field->runtime - field->noise;
1587 	ratio = net_runtime * 10000000;
1588 	do_div(ratio, field->runtime);
1589 	ratio_dec = do_div(ratio, 100000);
1590 
1591 	trace_seq_printf(s, "%llu %10llu %3llu.%05llu %7llu",
1592 			 field->runtime,
1593 			 field->noise,
1594 			 ratio, ratio_dec,
1595 			 field->max_sample);
1596 
1597 	trace_seq_printf(s, " %6u", field->hw_count);
1598 	trace_seq_printf(s, " %6u", field->nmi_count);
1599 	trace_seq_printf(s, " %6u", field->irq_count);
1600 	trace_seq_printf(s, " %6u", field->softirq_count);
1601 	trace_seq_printf(s, " %6u", field->thread_count);
1602 
1603 	trace_seq_putc(s, '\n');
1604 
1605 	return trace_handle_return(s);
1606 }
1607 
1608 static enum print_line_t
1609 trace_osnoise_raw(struct trace_iterator *iter, int flags,
1610 		  struct trace_event *event)
1611 {
1612 	struct osnoise_entry *field;
1613 	struct trace_seq *s = &iter->seq;
1614 
1615 	trace_assign_type(field, iter->ent);
1616 
1617 	trace_seq_printf(s, "%lld %llu %llu %u %u %u %u %u\n",
1618 			 field->runtime,
1619 			 field->noise,
1620 			 field->max_sample,
1621 			 field->hw_count,
1622 			 field->nmi_count,
1623 			 field->irq_count,
1624 			 field->softirq_count,
1625 			 field->thread_count);
1626 
1627 	return trace_handle_return(s);
1628 }
1629 
1630 static struct trace_event_functions trace_osnoise_funcs = {
1631 	.trace		= trace_osnoise_print,
1632 	.raw		= trace_osnoise_raw,
1633 };
1634 
1635 static struct trace_event trace_osnoise_event = {
1636 	.type		= TRACE_OSNOISE,
1637 	.funcs		= &trace_osnoise_funcs,
1638 };
1639 
1640 /* TRACE_TIMERLAT */
1641 
1642 static char *timerlat_lat_context[] = {"irq", "thread", "user-ret"};
1643 static enum print_line_t
1644 trace_timerlat_print(struct trace_iterator *iter, int flags,
1645 		     struct trace_event *event)
1646 {
1647 	struct trace_entry *entry = iter->ent;
1648 	struct trace_seq *s = &iter->seq;
1649 	struct timerlat_entry *field;
1650 
1651 	trace_assign_type(field, entry);
1652 
1653 	trace_seq_printf(s, "#%-5u context %6s timer_latency %9llu ns\n",
1654 			 field->seqnum,
1655 			 timerlat_lat_context[field->context],
1656 			 field->timer_latency);
1657 
1658 	return trace_handle_return(s);
1659 }
1660 
1661 static enum print_line_t
1662 trace_timerlat_raw(struct trace_iterator *iter, int flags,
1663 		   struct trace_event *event)
1664 {
1665 	struct timerlat_entry *field;
1666 	struct trace_seq *s = &iter->seq;
1667 
1668 	trace_assign_type(field, iter->ent);
1669 
1670 	trace_seq_printf(s, "%u %d %llu\n",
1671 			 field->seqnum,
1672 			 field->context,
1673 			 field->timer_latency);
1674 
1675 	return trace_handle_return(s);
1676 }
1677 
1678 static struct trace_event_functions trace_timerlat_funcs = {
1679 	.trace		= trace_timerlat_print,
1680 	.raw		= trace_timerlat_raw,
1681 };
1682 
1683 static struct trace_event trace_timerlat_event = {
1684 	.type		= TRACE_TIMERLAT,
1685 	.funcs		= &trace_timerlat_funcs,
1686 };
1687 
1688 /* TRACE_BPUTS */
1689 static enum print_line_t
1690 trace_bputs_print(struct trace_iterator *iter, int flags,
1691 		   struct trace_event *event)
1692 {
1693 	struct trace_entry *entry = iter->ent;
1694 	struct trace_seq *s = &iter->seq;
1695 	struct bputs_entry *field;
1696 
1697 	trace_assign_type(field, entry);
1698 
1699 	seq_print_ip_sym(s, field->ip, flags);
1700 	trace_seq_puts(s, ": ");
1701 	trace_seq_puts(s, field->str);
1702 
1703 	return trace_handle_return(s);
1704 }
1705 
1706 
1707 static enum print_line_t
1708 trace_bputs_raw(struct trace_iterator *iter, int flags,
1709 		struct trace_event *event)
1710 {
1711 	struct bputs_entry *field;
1712 	struct trace_seq *s = &iter->seq;
1713 
1714 	trace_assign_type(field, iter->ent);
1715 
1716 	trace_seq_printf(s, ": %lx : ", field->ip);
1717 	trace_seq_puts(s, field->str);
1718 
1719 	return trace_handle_return(s);
1720 }
1721 
1722 static struct trace_event_functions trace_bputs_funcs = {
1723 	.trace		= trace_bputs_print,
1724 	.raw		= trace_bputs_raw,
1725 };
1726 
1727 static struct trace_event trace_bputs_event = {
1728 	.type		= TRACE_BPUTS,
1729 	.funcs		= &trace_bputs_funcs,
1730 };
1731 
1732 /* TRACE_BPRINT */
1733 static enum print_line_t
1734 trace_bprint_print(struct trace_iterator *iter, int flags,
1735 		   struct trace_event *event)
1736 {
1737 	struct trace_entry *entry = iter->ent;
1738 	struct trace_seq *s = &iter->seq;
1739 	struct bprint_entry *field;
1740 
1741 	trace_assign_type(field, entry);
1742 
1743 	seq_print_ip_sym(s, field->ip, flags);
1744 	trace_seq_puts(s, ": ");
1745 	trace_seq_bprintf(s, field->fmt, field->buf);
1746 
1747 	return trace_handle_return(s);
1748 }
1749 
1750 
1751 static enum print_line_t
1752 trace_bprint_raw(struct trace_iterator *iter, int flags,
1753 		 struct trace_event *event)
1754 {
1755 	struct bprint_entry *field;
1756 	struct trace_seq *s = &iter->seq;
1757 
1758 	trace_assign_type(field, iter->ent);
1759 
1760 	trace_seq_printf(s, ": %lx : ", field->ip);
1761 	trace_seq_bprintf(s, field->fmt, field->buf);
1762 
1763 	return trace_handle_return(s);
1764 }
1765 
1766 static struct trace_event_functions trace_bprint_funcs = {
1767 	.trace		= trace_bprint_print,
1768 	.raw		= trace_bprint_raw,
1769 };
1770 
1771 static struct trace_event trace_bprint_event = {
1772 	.type		= TRACE_BPRINT,
1773 	.funcs		= &trace_bprint_funcs,
1774 };
1775 
1776 /* TRACE_PRINT */
1777 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1778 					   int flags, struct trace_event *event)
1779 {
1780 	struct print_entry *field;
1781 	struct trace_seq *s = &iter->seq;
1782 	unsigned long ip;
1783 
1784 	trace_assign_type(field, iter->ent);
1785 
1786 	ip = trace_adjust_address(iter->tr, field->ip);
1787 
1788 	seq_print_ip_sym(s, ip, flags);
1789 	trace_seq_printf(s, ": %s", field->buf);
1790 
1791 	return trace_handle_return(s);
1792 }
1793 
1794 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
1795 					 struct trace_event *event)
1796 {
1797 	struct print_entry *field;
1798 
1799 	trace_assign_type(field, iter->ent);
1800 
1801 	trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
1802 
1803 	return trace_handle_return(&iter->seq);
1804 }
1805 
1806 static struct trace_event_functions trace_print_funcs = {
1807 	.trace		= trace_print_print,
1808 	.raw		= trace_print_raw,
1809 };
1810 
1811 static struct trace_event trace_print_event = {
1812 	.type	 	= TRACE_PRINT,
1813 	.funcs		= &trace_print_funcs,
1814 };
1815 
1816 static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags,
1817 					 struct trace_event *event)
1818 {
1819 	struct raw_data_entry *field;
1820 	int i;
1821 
1822 	trace_assign_type(field, iter->ent);
1823 
1824 	trace_seq_printf(&iter->seq, "# %x buf:", field->id);
1825 
1826 	for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++)
1827 		trace_seq_printf(&iter->seq, " %02x",
1828 				 (unsigned char)field->buf[i]);
1829 
1830 	trace_seq_putc(&iter->seq, '\n');
1831 
1832 	return trace_handle_return(&iter->seq);
1833 }
1834 
1835 static struct trace_event_functions trace_raw_data_funcs = {
1836 	.trace		= trace_raw_data,
1837 	.raw		= trace_raw_data,
1838 };
1839 
1840 static struct trace_event trace_raw_data_event = {
1841 	.type	 	= TRACE_RAW_DATA,
1842 	.funcs		= &trace_raw_data_funcs,
1843 };
1844 
1845 static enum print_line_t
1846 trace_func_repeats_raw(struct trace_iterator *iter, int flags,
1847 			 struct trace_event *event)
1848 {
1849 	struct func_repeats_entry *field;
1850 	struct trace_seq *s = &iter->seq;
1851 
1852 	trace_assign_type(field, iter->ent);
1853 
1854 	trace_seq_printf(s, "%lu %lu %u %llu\n",
1855 			 field->ip,
1856 			 field->parent_ip,
1857 			 field->count,
1858 			 FUNC_REPEATS_GET_DELTA_TS(field));
1859 
1860 	return trace_handle_return(s);
1861 }
1862 
1863 static enum print_line_t
1864 trace_func_repeats_print(struct trace_iterator *iter, int flags,
1865 			 struct trace_event *event)
1866 {
1867 	struct func_repeats_entry *field;
1868 	struct trace_seq *s = &iter->seq;
1869 
1870 	trace_assign_type(field, iter->ent);
1871 
1872 	print_fn_trace(s, field->ip, field->parent_ip, NULL, iter->tr, flags);
1873 	trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
1874 	trace_print_time(s, iter,
1875 			 iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
1876 	trace_seq_puts(s, ")\n");
1877 
1878 	return trace_handle_return(s);
1879 }
1880 
1881 static struct trace_event_functions trace_func_repeats_funcs = {
1882 	.trace		= trace_func_repeats_print,
1883 	.raw		= trace_func_repeats_raw,
1884 };
1885 
1886 static struct trace_event trace_func_repeats_event = {
1887 	.type	 	= TRACE_FUNC_REPEATS,
1888 	.funcs		= &trace_func_repeats_funcs,
1889 };
1890 
1891 static struct trace_event *events[] __initdata = {
1892 	&trace_fn_event,
1893 	&trace_ctx_event,
1894 	&trace_wake_event,
1895 	&trace_stack_event,
1896 	&trace_user_stack_event,
1897 	&trace_bputs_event,
1898 	&trace_bprint_event,
1899 	&trace_print_event,
1900 	&trace_hwlat_event,
1901 	&trace_osnoise_event,
1902 	&trace_timerlat_event,
1903 	&trace_raw_data_event,
1904 	&trace_func_repeats_event,
1905 	NULL
1906 };
1907 
1908 __init int init_events(void)
1909 {
1910 	struct trace_event *event;
1911 	int i, ret;
1912 
1913 	for (i = 0; events[i]; i++) {
1914 		event = events[i];
1915 		ret = register_trace_event(event);
1916 		WARN_ONCE(!ret, "event %d failed to register", event->type);
1917 	}
1918 
1919 	return 0;
1920 }
1921