xref: /linux/kernel/trace/trace_output.c (revision cb30bf881c5b4ee8b879558a2fce93d7de652955)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_output.c
4  *
5  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6  *
7  */
8 #include "trace.h"
9 #include <linux/module.h>
10 #include <linux/mutex.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/sched/clock.h>
14 #include <linux/sched/mm.h>
15 #include <linux/idr.h>
16 #include <linux/btf.h>
17 #include <linux/bpf.h>
18 #include <linux/hashtable.h>
19 
20 #include "trace_output.h"
21 #include "trace_btf.h"
22 
23 /* 2^7 = 128 */
24 #define EVENT_HASH_BITS 7
25 
26 DECLARE_RWSEM(trace_event_sem);
27 
28 static DEFINE_HASHTABLE(event_hash, EVENT_HASH_BITS);
29 
30 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
31 {
32 	struct trace_seq *s = &iter->seq;
33 	struct trace_entry *entry = iter->ent;
34 	struct bputs_entry *field;
35 
36 	trace_assign_type(field, entry);
37 
38 	trace_seq_puts(s, field->str);
39 
40 	return trace_handle_return(s);
41 }
42 
43 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
44 {
45 	struct trace_seq *s = &iter->seq;
46 	struct trace_entry *entry = iter->ent;
47 	struct bprint_entry *field;
48 
49 	trace_assign_type(field, entry);
50 
51 	trace_seq_bprintf(s, field->fmt, field->buf);
52 
53 	return trace_handle_return(s);
54 }
55 
56 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
57 {
58 	struct trace_seq *s = &iter->seq;
59 	struct trace_entry *entry = iter->ent;
60 	struct print_entry *field;
61 
62 	trace_assign_type(field, entry);
63 
64 	trace_seq_puts(s, field->buf);
65 
66 	return trace_handle_return(s);
67 }
68 
69 const char *
70 trace_print_flags_seq(struct trace_seq *p, const char *delim,
71 		      unsigned long flags,
72 		      const struct trace_print_flags *flag_array,
73 		      size_t flag_array_size)
74 {
75 	unsigned long mask;
76 	const char *str;
77 	const char *ret = trace_seq_buffer_ptr(p);
78 	int i, first = 1;
79 
80 	for (i = 0; i < flag_array_size && flags; i++) {
81 
82 		mask = flag_array[i].mask;
83 		if ((flags & mask) != mask)
84 			continue;
85 
86 		str = flag_array[i].name;
87 		flags &= ~mask;
88 		if (!first && delim)
89 			trace_seq_puts(p, delim);
90 		else
91 			first = 0;
92 		trace_seq_puts(p, str);
93 	}
94 
95 	/* check for left over flags */
96 	if (flags) {
97 		if (!first && delim)
98 			trace_seq_puts(p, delim);
99 		trace_seq_printf(p, "0x%lx", flags);
100 	}
101 
102 	trace_seq_putc(p, 0);
103 
104 	return ret;
105 }
106 EXPORT_SYMBOL(trace_print_flags_seq);
107 
108 const char *
109 trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
110 			const struct trace_print_flags *symbol_array,
111 			size_t symbol_array_size)
112 {
113 	int i;
114 	const char *ret = trace_seq_buffer_ptr(p);
115 
116 	for (i = 0; i < symbol_array_size; i++) {
117 
118 		if (val != symbol_array[i].mask)
119 			continue;
120 
121 		trace_seq_puts(p, symbol_array[i].name);
122 		break;
123 	}
124 
125 	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
126 		trace_seq_printf(p, "0x%lx", val);
127 
128 	trace_seq_putc(p, 0);
129 
130 	return ret;
131 }
132 EXPORT_SYMBOL(trace_print_symbols_seq);
133 
134 #if BITS_PER_LONG == 32
135 const char *
136 trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
137 		      unsigned long long flags,
138 		      const struct trace_print_flags_u64 *flag_array,
139 		      size_t flag_array_size)
140 {
141 	unsigned long long mask;
142 	const char *str;
143 	const char *ret = trace_seq_buffer_ptr(p);
144 	int i, first = 1;
145 
146 	for (i = 0; i < flag_array_size && flags; i++) {
147 
148 		mask = flag_array[i].mask;
149 		if ((flags & mask) != mask)
150 			continue;
151 
152 		str = flag_array[i].name;
153 		flags &= ~mask;
154 		if (!first && delim)
155 			trace_seq_puts(p, delim);
156 		else
157 			first = 0;
158 		trace_seq_puts(p, str);
159 	}
160 
161 	/* check for left over flags */
162 	if (flags) {
163 		if (!first && delim)
164 			trace_seq_puts(p, delim);
165 		trace_seq_printf(p, "0x%llx", flags);
166 	}
167 
168 	trace_seq_putc(p, 0);
169 
170 	return ret;
171 }
172 EXPORT_SYMBOL(trace_print_flags_seq_u64);
173 
174 const char *
175 trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
176 			    const struct trace_print_flags_u64 *symbol_array,
177 			    size_t symbol_array_size)
178 {
179 	int i;
180 	const char *ret = trace_seq_buffer_ptr(p);
181 
182 	for (i = 0; i < symbol_array_size; i++) {
183 
184 		if (val != symbol_array[i].mask)
185 			continue;
186 
187 		trace_seq_puts(p, symbol_array[i].name);
188 		break;
189 	}
190 
191 	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
192 		trace_seq_printf(p, "0x%llx", val);
193 
194 	trace_seq_putc(p, 0);
195 
196 	return ret;
197 }
198 EXPORT_SYMBOL(trace_print_symbols_seq_u64);
199 #endif
200 
201 /**
202  * trace_print_bitmask_seq - print a bitmask to a sequence buffer
203  * @iter: The trace iterator for the current event instance
204  * @bitmask_ptr: The pointer to the bitmask data
205  * @bitmask_size: The size of the bitmask in bytes
206  *
207  * Prints a bitmask into a sequence buffer as either a hex string or a
208  * human-readable range list, depending on the instance's "bitmask-list"
209  * trace option. The bitmask is formatted into the iterator's temporary
210  * scratchpad rather than the primary sequence buffer. This avoids
211  * duplication and pointer-collision issues when the returned string is
212  * processed by a "%s" specifier in a TP_printk() macro.
213  *
214  * Returns a pointer to the formatted string within the temporary buffer.
215  */
216 const char *
217 trace_print_bitmask_seq(struct trace_iterator *iter, void *bitmask_ptr,
218 			unsigned int bitmask_size)
219 {
220 	struct trace_seq *p = &iter->tmp_seq;
221 	const struct trace_array *tr = iter->tr;
222 	const char *ret;
223 
224 	trace_seq_init(p);
225 	ret = trace_seq_buffer_ptr(p);
226 
227 	if (tr->trace_flags & TRACE_ITER(BITMASK_LIST))
228 		trace_seq_bitmask_list(p, bitmask_ptr, bitmask_size * 8);
229 	else
230 		trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
231 
232 	trace_seq_putc(p, 0);
233 
234 	return ret;
235 }
236 EXPORT_SYMBOL_GPL(trace_print_bitmask_seq);
237 
238 /**
239  * trace_print_hex_seq - print buffer as hex sequence
240  * @p: trace seq struct to write to
241  * @buf: The buffer to print
242  * @buf_len: Length of @buf in bytes
243  * @concatenate: Print @buf as single hex string or with spacing
244  *
245  * Prints the passed buffer as a hex sequence either as a whole,
246  * single hex string if @concatenate is true or with spacing after
247  * each byte in case @concatenate is false.
248  */
249 const char *
250 trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
251 		    bool concatenate)
252 {
253 	int i;
254 	const char *ret = trace_seq_buffer_ptr(p);
255 	const char *fmt = concatenate ? "%*phN" : "%*ph";
256 
257 	for (i = 0; i < buf_len; i += 16) {
258 		if (!concatenate && i != 0)
259 			trace_seq_putc(p, ' ');
260 		trace_seq_printf(p, fmt, min(buf_len - i, 16), &buf[i]);
261 	}
262 	trace_seq_putc(p, 0);
263 
264 	return ret;
265 }
266 EXPORT_SYMBOL(trace_print_hex_seq);
267 
268 const char *
269 trace_print_array_seq(struct trace_seq *p, const void *buf, int count,
270 		      size_t el_size)
271 {
272 	const char *ret = trace_seq_buffer_ptr(p);
273 	const char *prefix = "";
274 	void *ptr = (void *)buf;
275 	size_t buf_len = count * el_size;
276 
277 	trace_seq_putc(p, '{');
278 
279 	while (ptr < buf + buf_len) {
280 		switch (el_size) {
281 		case 1:
282 			trace_seq_printf(p, "%s0x%x", prefix,
283 					 *(u8 *)ptr);
284 			break;
285 		case 2:
286 			trace_seq_printf(p, "%s0x%x", prefix,
287 					 *(u16 *)ptr);
288 			break;
289 		case 4:
290 			trace_seq_printf(p, "%s0x%x", prefix,
291 					 *(u32 *)ptr);
292 			break;
293 		case 8:
294 			trace_seq_printf(p, "%s0x%llx", prefix,
295 					 *(u64 *)ptr);
296 			break;
297 		default:
298 			trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size,
299 					 *(u8 *)ptr);
300 			el_size = 1;
301 		}
302 		prefix = ",";
303 		ptr += el_size;
304 	}
305 
306 	trace_seq_putc(p, '}');
307 	trace_seq_putc(p, 0);
308 
309 	return ret;
310 }
311 EXPORT_SYMBOL(trace_print_array_seq);
312 
313 const char *
314 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
315 			 int prefix_type, int rowsize, int groupsize,
316 			 const void *buf, size_t len, bool ascii)
317 {
318 	const char *ret = trace_seq_buffer_ptr(p);
319 
320 	trace_seq_putc(p, '\n');
321 	trace_seq_hex_dump(p, prefix_str, prefix_type,
322 			   rowsize, groupsize, buf, len, ascii);
323 	trace_seq_putc(p, 0);
324 	return ret;
325 }
326 EXPORT_SYMBOL(trace_print_hex_dump_seq);
327 
328 int trace_raw_output_prep(struct trace_iterator *iter,
329 			  struct trace_event *trace_event)
330 {
331 	struct trace_event_call *event;
332 	struct trace_seq *s = &iter->seq;
333 	struct trace_seq *p = &iter->tmp_seq;
334 	struct trace_entry *entry;
335 
336 	event = container_of(trace_event, struct trace_event_call, event);
337 	entry = iter->ent;
338 
339 	if (entry->type != event->event.type) {
340 		WARN_ON_ONCE(1);
341 		return TRACE_TYPE_UNHANDLED;
342 	}
343 
344 	trace_seq_init(p);
345 	trace_seq_printf(s, "%s: ", trace_event_name(event));
346 
347 	return trace_handle_return(s);
348 }
349 EXPORT_SYMBOL(trace_raw_output_prep);
350 
351 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
352 {
353 	struct trace_seq *s = &iter->seq;
354 	va_list ap;
355 
356 	if (ignore_event(iter))
357 		return;
358 
359 	va_start(ap, fmt);
360 	trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
361 	va_end(ap);
362 }
363 EXPORT_SYMBOL(trace_event_printf);
364 
365 static __printf(3, 0)
366 int trace_output_raw(struct trace_iterator *iter, char *name,
367 		     char *fmt, va_list ap)
368 {
369 	struct trace_seq *s = &iter->seq;
370 
371 	trace_seq_printf(s, "%s: ", name);
372 	trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
373 
374 	return trace_handle_return(s);
375 }
376 
377 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
378 {
379 	va_list ap;
380 	int ret;
381 
382 	va_start(ap, fmt);
383 	ret = trace_output_raw(iter, name, fmt, ap);
384 	va_end(ap);
385 
386 	return ret;
387 }
388 EXPORT_SYMBOL_GPL(trace_output_call);
389 
390 static inline const char *kretprobed(const char *name, unsigned long addr)
391 {
392 	if (is_kretprobe_trampoline(addr))
393 		return "[unknown/kretprobe'd]";
394 	return name;
395 }
396 
397 void
398 trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
399 {
400 #ifdef CONFIG_KALLSYMS
401 	char str[KSYM_SYMBOL_LEN];
402 	const char *name;
403 
404 	if (offset)
405 		sprint_symbol(str, address);
406 	else
407 		kallsyms_lookup(address, NULL, NULL, NULL, str);
408 	name = kretprobed(str, address);
409 
410 	if (name && strlen(name)) {
411 		trace_seq_puts(s, name);
412 		return;
413 	}
414 #endif
415 	trace_seq_printf(s, "0x%08lx", address);
416 }
417 
418 #ifndef CONFIG_64BIT
419 # define IP_FMT "%08lx"
420 #else
421 # define IP_FMT "%016lx"
422 #endif
423 
424 static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
425 			     unsigned long ip, unsigned long sym_flags)
426 {
427 	struct file *file = NULL;
428 	unsigned long vmstart = 0;
429 	int ret = 1;
430 
431 	if (s->full)
432 		return 0;
433 
434 	if (mm) {
435 		const struct vm_area_struct *vma;
436 
437 		mmap_read_lock(mm);
438 		vma = find_vma(mm, ip);
439 		if (vma) {
440 			file = vma->vm_file;
441 			vmstart = vma->vm_start;
442 		}
443 		if (file) {
444 			ret = trace_seq_path(s, file_user_path(file));
445 			if (ret)
446 				trace_seq_printf(s, "[+0x%lx]",
447 						 ip - vmstart);
448 		}
449 		mmap_read_unlock(mm);
450 	}
451 	if (ret && ((sym_flags & TRACE_ITER(SYM_ADDR)) || !file))
452 		trace_seq_printf(s, " <" IP_FMT ">", ip);
453 	return !trace_seq_has_overflowed(s);
454 }
455 
456 int
457 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
458 {
459 	if (!ip) {
460 		trace_seq_putc(s, '0');
461 		goto out;
462 	}
463 
464 	trace_seq_print_sym(s, ip, sym_flags & TRACE_ITER(SYM_OFFSET));
465 
466 	if (sym_flags & TRACE_ITER(SYM_ADDR))
467 		trace_seq_printf(s, " <" IP_FMT ">", ip);
468 
469  out:
470 	return !trace_seq_has_overflowed(s);
471 }
472 
473 /**
474  * trace_print_lat_fmt - print the irq, preempt and lockdep fields
475  * @s: trace seq struct to write to
476  * @entry: The trace entry field from the ring buffer
477  *
478  * Prints the generic fields of irqs off, in hard or softirq, preempt
479  * count.
480  */
481 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
482 {
483 	char hardsoft_irq;
484 	char need_resched;
485 	char irqs_off;
486 	int hardirq;
487 	int softirq;
488 	int bh_off;
489 	int nmi;
490 
491 	nmi = entry->flags & TRACE_FLAG_NMI;
492 	hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
493 	softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
494 	bh_off = entry->flags & TRACE_FLAG_BH_OFF;
495 
496 	irqs_off =
497 		(entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
498 		(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
499 		bh_off ? 'b' :
500 		'.';
501 
502 	switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY |
503 				TRACE_FLAG_PREEMPT_RESCHED)) {
504 	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
505 		need_resched = 'B';
506 		break;
507 	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
508 		need_resched = 'N';
509 		break;
510 	case TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
511 		need_resched = 'L';
512 		break;
513 	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY:
514 		need_resched = 'b';
515 		break;
516 	case TRACE_FLAG_NEED_RESCHED:
517 		need_resched = 'n';
518 		break;
519 	case TRACE_FLAG_PREEMPT_RESCHED:
520 		need_resched = 'p';
521 		break;
522 	case TRACE_FLAG_NEED_RESCHED_LAZY:
523 		need_resched = 'l';
524 		break;
525 	default:
526 		need_resched = '.';
527 		break;
528 	}
529 
530 	hardsoft_irq =
531 		(nmi && hardirq)     ? 'Z' :
532 		nmi                  ? 'z' :
533 		(hardirq && softirq) ? 'H' :
534 		hardirq              ? 'h' :
535 		softirq              ? 's' :
536 		                       '.' ;
537 
538 	trace_seq_printf(s, "%c%c%c",
539 			 irqs_off, need_resched, hardsoft_irq);
540 
541 	if (entry->preempt_count & 0xf)
542 		trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
543 	else
544 		trace_seq_putc(s, '.');
545 
546 	if (entry->preempt_count & 0xf0)
547 		trace_seq_printf(s, "%x", entry->preempt_count >> 4);
548 	else
549 		trace_seq_putc(s, '.');
550 
551 	return !trace_seq_has_overflowed(s);
552 }
553 
554 static int
555 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
556 {
557 	char comm[TASK_COMM_LEN];
558 
559 	trace_find_cmdline(entry->pid, comm);
560 
561 	trace_seq_printf(s, "%8.8s-%-7d %3d",
562 			 comm, entry->pid, cpu);
563 
564 	return trace_print_lat_fmt(s, entry);
565 }
566 
567 #undef MARK
568 #define MARK(v, s) {.val = v, .sym = s}
569 /* trace overhead mark */
570 static const struct trace_mark {
571 	unsigned long long	val; /* unit: nsec */
572 	char			sym;
573 } mark[] = {
574 	MARK(1000000000ULL	, '$'), /* 1 sec */
575 	MARK(100000000ULL	, '@'), /* 100 msec */
576 	MARK(10000000ULL	, '*'), /* 10 msec */
577 	MARK(1000000ULL		, '#'), /* 1000 usecs */
578 	MARK(100000ULL		, '!'), /* 100 usecs */
579 	MARK(10000ULL		, '+'), /* 10 usecs */
580 };
581 #undef MARK
582 
583 char trace_find_mark(unsigned long long d)
584 {
585 	int i;
586 	int size = ARRAY_SIZE(mark);
587 
588 	for (i = 0; i < size; i++) {
589 		if (d > mark[i].val)
590 			break;
591 	}
592 
593 	return (i == size) ? ' ' : mark[i].sym;
594 }
595 
596 static int
597 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
598 {
599 	struct trace_array *tr = iter->tr;
600 	unsigned long verbose = tr->trace_flags & TRACE_ITER(VERBOSE);
601 	unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
602 	unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
603 	unsigned long long rel_ts = next_ts - iter->ts;
604 	struct trace_seq *s = &iter->seq;
605 
606 	if (in_ns) {
607 		abs_ts = ns2usecs(abs_ts);
608 		rel_ts = ns2usecs(rel_ts);
609 	}
610 
611 	if (verbose && in_ns) {
612 		unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
613 		unsigned long abs_msec = (unsigned long)abs_ts;
614 		unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
615 		unsigned long rel_msec = (unsigned long)rel_ts;
616 
617 		trace_seq_printf(
618 			s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
619 			ns2usecs(iter->ts),
620 			abs_msec, abs_usec,
621 			rel_msec, rel_usec);
622 
623 	} else if (verbose && !in_ns) {
624 		trace_seq_printf(
625 			s, "[%016llx] %lld (+%lld): ",
626 			iter->ts, abs_ts, rel_ts);
627 
628 	} else if (!verbose && in_ns) {
629 		trace_seq_printf(
630 			s, " %4lldus%c: ",
631 			abs_ts,
632 			trace_find_mark(rel_ts * NSEC_PER_USEC));
633 
634 	} else { /* !verbose && !in_ns */
635 		trace_seq_printf(s, " %4lld: ", abs_ts);
636 	}
637 
638 	return !trace_seq_has_overflowed(s);
639 }
640 
641 static void trace_print_time(struct trace_seq *s, struct trace_iterator *iter,
642 			     unsigned long long ts)
643 {
644 	unsigned long secs, usec_rem;
645 	unsigned long long t;
646 
647 	if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
648 		t = ns2usecs(ts);
649 		usec_rem = do_div(t, USEC_PER_SEC);
650 		secs = (unsigned long)t;
651 		trace_seq_printf(s, " %5lu.%06lu", secs, usec_rem);
652 	} else
653 		trace_seq_printf(s, " %12llu", ts);
654 }
655 
656 int trace_print_context(struct trace_iterator *iter)
657 {
658 	struct trace_array *tr = iter->tr;
659 	struct trace_seq *s = &iter->seq;
660 	struct trace_entry *entry = iter->ent;
661 	char comm[TASK_COMM_LEN];
662 
663 	trace_find_cmdline(entry->pid, comm);
664 
665 	trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid);
666 
667 	if (tr->trace_flags & TRACE_ITER(RECORD_TGID)) {
668 		unsigned int tgid = trace_find_tgid(entry->pid);
669 
670 		if (!tgid)
671 			trace_seq_printf(s, "(-------) ");
672 		else
673 			trace_seq_printf(s, "(%7d) ", tgid);
674 	}
675 
676 	trace_seq_printf(s, "[%03d] ", iter->cpu);
677 
678 	if (tr->trace_flags & TRACE_ITER(IRQ_INFO))
679 		trace_print_lat_fmt(s, entry);
680 
681 	trace_print_time(s, iter, iter->ts);
682 	trace_seq_puts(s, ": ");
683 
684 	return !trace_seq_has_overflowed(s);
685 }
686 
687 int trace_print_lat_context(struct trace_iterator *iter)
688 {
689 	struct trace_entry *entry, *next_entry;
690 	struct trace_array *tr = iter->tr;
691 	struct trace_seq *s = &iter->seq;
692 	unsigned long verbose = (tr->trace_flags & TRACE_ITER(VERBOSE));
693 	u64 next_ts;
694 
695 	next_entry = trace_find_next_entry(iter, NULL, &next_ts);
696 	if (!next_entry)
697 		next_ts = iter->ts;
698 
699 	/* trace_find_next_entry() may change iter->ent */
700 	entry = iter->ent;
701 
702 	if (verbose) {
703 		char comm[TASK_COMM_LEN];
704 
705 		trace_find_cmdline(entry->pid, comm);
706 
707 		trace_seq_printf(
708 			s, "%16s %7d %3d %d %08x %08lx ",
709 			comm, entry->pid, iter->cpu, entry->flags,
710 			entry->preempt_count & 0xf, iter->idx);
711 	} else {
712 		lat_print_generic(s, entry, iter->cpu);
713 	}
714 
715 	lat_print_timestamp(iter, next_ts);
716 
717 	return !trace_seq_has_overflowed(s);
718 }
719 
720 #ifdef CONFIG_FUNCTION_TRACE_ARGS
721 void print_function_args(struct trace_seq *s, unsigned long *args,
722 			 unsigned long func)
723 {
724 	const struct btf_param *param;
725 	const struct btf_type *t;
726 	const struct btf_enum *enums;
727 	const char *param_name;
728 	char name[KSYM_NAME_LEN];
729 	unsigned long arg;
730 	struct btf *btf;
731 	s32 tid, nr = 0;
732 	int a, p, x, i;
733 	u16 encode;
734 
735 	trace_seq_printf(s, "(");
736 
737 	if (!args)
738 		goto out;
739 	if (lookup_symbol_name(func, name))
740 		goto out;
741 
742 	/* TODO: Pass module name here too */
743 	t = btf_find_func_proto(name, &btf);
744 	if (IS_ERR_OR_NULL(t))
745 		goto out;
746 
747 	param = btf_get_func_param(t, &nr);
748 	if (!param)
749 		goto out_put;
750 
751 	for (a = 0, p = 0; p < nr; a++, p++) {
752 		if (p)
753 			trace_seq_puts(s, ", ");
754 
755 		/* This only prints what the arch allows (6 args by default) */
756 		if (a == FTRACE_REGS_MAX_ARGS) {
757 			trace_seq_puts(s, "...");
758 			break;
759 		}
760 
761 		arg = args[a];
762 
763 		param_name = btf_name_by_offset(btf, param[p].name_off);
764 		if (param_name)
765 			trace_seq_printf(s, "%s=", param_name);
766 		t = btf_type_skip_modifiers(btf, param[p].type, &tid);
767 
768 		switch (t ? BTF_INFO_KIND(t->info) : BTF_KIND_UNKN) {
769 		case BTF_KIND_UNKN:
770 			trace_seq_putc(s, '?');
771 			/* Still print unknown type values */
772 			fallthrough;
773 		case BTF_KIND_PTR:
774 			trace_seq_printf(s, "0x%lx", arg);
775 			break;
776 		case BTF_KIND_INT:
777 			encode = btf_int_encoding(t);
778 			/* Print unsigned ints as hex */
779 			if (encode & BTF_INT_SIGNED)
780 				trace_seq_printf(s, "%ld", arg);
781 			else
782 				trace_seq_printf(s, "0x%lx", arg);
783 			break;
784 		case BTF_KIND_ENUM:
785 			trace_seq_printf(s, "%ld", arg);
786 			enums = btf_enum(t);
787 			for (i = 0; i < btf_vlen(t); i++) {
788 				if (arg == enums[i].val) {
789 					trace_seq_printf(s, " [%s]",
790 							 btf_name_by_offset(btf,
791 							 enums[i].name_off));
792 					break;
793 				}
794 			}
795 			break;
796 		default:
797 			/* This does not handle complex arguments */
798 			trace_seq_printf(s, "(%s)[0x%lx", btf_type_str(t), arg);
799 			for (x = sizeof(long); x < t->size; x += sizeof(long)) {
800 				trace_seq_putc(s, ':');
801 				if (++a == FTRACE_REGS_MAX_ARGS) {
802 					trace_seq_puts(s, "...]");
803 					goto out_put;
804 				}
805 				trace_seq_printf(s, "0x%lx", args[a]);
806 			}
807 			trace_seq_putc(s, ']');
808 			break;
809 		}
810 	}
811 out_put:
812 	btf_put(btf);
813 out:
814 	trace_seq_printf(s, ")");
815 }
816 #endif
817 
818 /**
819  * ftrace_find_event - find a registered event
820  * @type: the type of event to look for
821  *
822  * Returns an event of type @type otherwise NULL
823  * Called with trace_event_read_lock() held.
824  */
825 struct trace_event *ftrace_find_event(int type)
826 {
827 	struct trace_event *event;
828 
829 	hash_for_each_possible(event_hash, event, node, type) {
830 		if (event->type == type)
831 			return event;
832 	}
833 
834 	return NULL;
835 }
836 
837 static DEFINE_IDA(trace_event_ida);
838 
839 static void free_trace_event_type(int type)
840 {
841 	if (type >= __TRACE_LAST_TYPE)
842 		ida_free(&trace_event_ida, type);
843 }
844 
845 static int alloc_trace_event_type(void)
846 {
847 	int next;
848 
849 	/* Skip static defined type numbers */
850 	next = ida_alloc_range(&trace_event_ida, __TRACE_LAST_TYPE,
851 			       TRACE_EVENT_TYPE_MAX, GFP_KERNEL);
852 	if (next < 0)
853 		return 0;
854 	return next;
855 }
856 
857 void trace_event_read_lock(void)
858 {
859 	down_read(&trace_event_sem);
860 }
861 
862 void trace_event_read_unlock(void)
863 {
864 	up_read(&trace_event_sem);
865 }
866 
867 /**
868  * register_trace_event - register output for an event type
869  * @event: the event type to register
870  *
871  * Event types are stored in a hash and this hash is used to
872  * find a way to print an event. If the @event->type is set
873  * then it will use that type, otherwise it will assign a
874  * type to use.
875  *
876  * If you assign your own type, please make sure it is added
877  * to the trace_type enum in trace.h, to avoid collisions
878  * with the dynamic types.
879  *
880  * Returns the event type number or zero on error.
881  */
882 int register_trace_event(struct trace_event *event)
883 {
884 	int ret = 0;
885 
886 	down_write(&trace_event_sem);
887 
888 	if (WARN_ON(!event))
889 		goto out;
890 
891 	if (WARN_ON(!event->funcs))
892 		goto out;
893 
894 	if (!event->type) {
895 		event->type = alloc_trace_event_type();
896 		if (!event->type)
897 			goto out;
898 	} else if (WARN(event->type > __TRACE_LAST_TYPE,
899 			"Need to add type to trace.h")) {
900 		goto out;
901 	} else {
902 		/* Is this event already used */
903 		if (ftrace_find_event(event->type))
904 			goto out;
905 	}
906 
907 	if (event->funcs->trace == NULL)
908 		event->funcs->trace = trace_nop_print;
909 	if (event->funcs->raw == NULL)
910 		event->funcs->raw = trace_nop_print;
911 	if (event->funcs->hex == NULL)
912 		event->funcs->hex = trace_nop_print;
913 	if (event->funcs->binary == NULL)
914 		event->funcs->binary = trace_nop_print;
915 
916 	hash_add(event_hash, &event->node, event->type);
917 
918 	ret = event->type;
919  out:
920 	up_write(&trace_event_sem);
921 
922 	return ret;
923 }
924 EXPORT_SYMBOL_GPL(register_trace_event);
925 
926 /*
927  * Used by module code with the trace_event_sem held for write.
928  */
929 int __unregister_trace_event(struct trace_event *event)
930 {
931 	hash_del(&event->node);
932 	free_trace_event_type(event->type);
933 	return 0;
934 }
935 
936 /**
937  * unregister_trace_event - remove a no longer used event
938  * @event: the event to remove
939  */
940 int unregister_trace_event(struct trace_event *event)
941 {
942 	down_write(&trace_event_sem);
943 	__unregister_trace_event(event);
944 	up_write(&trace_event_sem);
945 
946 	return 0;
947 }
948 EXPORT_SYMBOL_GPL(unregister_trace_event);
949 
950 /*
951  * Standard events
952  */
953 
954 static void print_array(struct trace_iterator *iter, void *pos,
955 			struct ftrace_event_field *field)
956 {
957 	int offset;
958 	int len;
959 	int i;
960 
961 	offset = *(int *)pos & 0xffff;
962 	len = *(int *)pos >> 16;
963 
964 	if (field)
965 		offset += field->offset + sizeof(int);
966 
967 	if (offset + len > iter->ent_size) {
968 		trace_seq_puts(&iter->seq, "<OVERFLOW>");
969 		return;
970 	}
971 
972 	pos = (void *)iter->ent + offset;
973 
974 	for (i = 0; i < len; i++, pos++) {
975 		if (i)
976 			trace_seq_putc(&iter->seq, ',');
977 		trace_seq_printf(&iter->seq, "%02x", *(unsigned char *)pos);
978 	}
979 }
980 
981 static void print_fields(struct trace_iterator *iter, struct trace_event_call *call,
982 			 struct list_head *head)
983 {
984 	struct ftrace_event_field *field;
985 	struct trace_array *tr = iter->tr;
986 	unsigned long long laddr;
987 	unsigned long addr;
988 	int offset;
989 	int len;
990 	int ret;
991 	int i;
992 	void *pos;
993 	char *str;
994 
995 	list_for_each_entry_reverse(field, head, link) {
996 		trace_seq_printf(&iter->seq, " %s=", field->name);
997 		if (field->offset + field->size > iter->ent_size) {
998 			trace_seq_puts(&iter->seq, "<OVERFLOW>");
999 			continue;
1000 		}
1001 		pos = (void *)iter->ent + field->offset;
1002 
1003 		switch (field->filter_type) {
1004 		case FILTER_COMM:
1005 		case FILTER_STATIC_STRING:
1006 			trace_seq_printf(&iter->seq, "%.*s", field->size, (char *)pos);
1007 			break;
1008 		case FILTER_RDYN_STRING:
1009 		case FILTER_DYN_STRING:
1010 			offset = *(int *)pos & 0xffff;
1011 			len = *(int *)pos >> 16;
1012 
1013 			if (field->filter_type == FILTER_RDYN_STRING)
1014 				offset += field->offset + sizeof(int);
1015 
1016 			if (offset + len > iter->ent_size) {
1017 				trace_seq_puts(&iter->seq, "<OVERFLOW>");
1018 				break;
1019 			}
1020 			str = (char *)iter->ent + offset;
1021 			/* Check if there's any non printable strings */
1022 			for (i = 0; i < len; i++) {
1023 				if (str[i] && !(isascii(str[i]) && isprint(str[i])))
1024 					break;
1025 			}
1026 			if (i < len) {
1027 				for (i = 0; i < len; i++) {
1028 					if (isascii(str[i]) && isprint(str[i]))
1029 						trace_seq_putc(&iter->seq, str[i]);
1030 					else
1031 						trace_seq_putc(&iter->seq, '.');
1032 				}
1033 				trace_seq_puts(&iter->seq, " (");
1034 				for (i = 0; i < len; i++) {
1035 					if (i)
1036 						trace_seq_putc(&iter->seq, ':');
1037 					trace_seq_printf(&iter->seq, "%02x", str[i]);
1038 				}
1039 				trace_seq_putc(&iter->seq, ')');
1040 			} else {
1041 				trace_seq_printf(&iter->seq, "%.*s", len, str);
1042 			}
1043 			break;
1044 		case FILTER_PTR_STRING:
1045 			if (!iter->fmt_size)
1046 				trace_iter_expand_format(iter);
1047 			addr = trace_adjust_address(tr, *(unsigned long *)pos);
1048 			ret = strncpy_from_kernel_nofault(iter->fmt, (void *)addr,
1049 							  iter->fmt_size);
1050 			if (ret < 0)
1051 				trace_seq_printf(&iter->seq, "(0x%px)", pos);
1052 			else
1053 				trace_seq_printf(&iter->seq, "(0x%px:%s)",
1054 						 pos, iter->fmt);
1055 			break;
1056 		case FILTER_TRACE_FN:
1057 			addr = trace_adjust_address(tr, *(unsigned long *)pos);
1058 			trace_seq_printf(&iter->seq, "%pS", (void *)addr);
1059 			break;
1060 		case FILTER_CPU:
1061 		case FILTER_OTHER:
1062 			switch (field->size) {
1063 			case 1:
1064 				if (isprint(*(char *)pos)) {
1065 					trace_seq_printf(&iter->seq, "'%c'",
1066 						 *(unsigned char *)pos);
1067 				}
1068 				trace_seq_printf(&iter->seq, "(%d)",
1069 						 *(unsigned char *)pos);
1070 				break;
1071 			case 2:
1072 				trace_seq_printf(&iter->seq, "0x%x (%d)",
1073 						 *(unsigned short *)pos,
1074 						 *(unsigned short *)pos);
1075 				break;
1076 			case 4:
1077 				/* dynamic array info is 4 bytes */
1078 				if (strstr(field->type, "__data_loc")) {
1079 					print_array(iter, pos, NULL);
1080 					break;
1081 				}
1082 
1083 				if (strstr(field->type, "__rel_loc")) {
1084 					print_array(iter, pos, field);
1085 					break;
1086 				}
1087 
1088 				addr = *(unsigned int *)pos;
1089 
1090 				/* Some fields reference offset from _stext. */
1091 				if (!strcmp(field->name, "caller_offs") ||
1092 				    !strcmp(field->name, "parent_offs")) {
1093 					unsigned long ip;
1094 
1095 					ip = addr + (unsigned long)_stext;
1096 					ip = trace_adjust_address(tr, ip);
1097 					trace_seq_printf(&iter->seq, "%pS ", (void *)ip);
1098 				}
1099 
1100 				if (sizeof(long) == 4) {
1101 					addr = trace_adjust_address(tr, addr);
1102 					trace_seq_printf(&iter->seq, "%pS (%d)",
1103 							 (void *)addr, (int)addr);
1104 				} else {
1105 					trace_seq_printf(&iter->seq, "0x%x (%d)",
1106 							 (unsigned int)addr, (int)addr);
1107 				}
1108 				break;
1109 			case 8:
1110 				laddr = *(unsigned long long *)pos;
1111 				if (sizeof(long) == 8) {
1112 					laddr = trace_adjust_address(tr, (unsigned long)laddr);
1113 					trace_seq_printf(&iter->seq, "%pS (%lld)",
1114 							 (void *)(long)laddr, laddr);
1115 				} else {
1116 					trace_seq_printf(&iter->seq, "0x%llx (%lld)", laddr, laddr);
1117 				}
1118 				break;
1119 			default:
1120 				trace_seq_puts(&iter->seq, "<INVALID-SIZE>");
1121 				break;
1122 			}
1123 			break;
1124 		default:
1125 			trace_seq_puts(&iter->seq, "<INVALID-TYPE>");
1126 		}
1127 	}
1128 	trace_seq_putc(&iter->seq, '\n');
1129 }
1130 
1131 enum print_line_t print_event_fields(struct trace_iterator *iter,
1132 				     struct trace_event *event)
1133 {
1134 	struct trace_event_call *call;
1135 	struct list_head *head;
1136 
1137 	lockdep_assert_held_read(&trace_event_sem);
1138 
1139 	/* ftrace defined events have separate call structures */
1140 	if (event->type <= __TRACE_LAST_TYPE) {
1141 		bool found = false;
1142 
1143 		list_for_each_entry(call, &ftrace_events, list) {
1144 			if (call->event.type == event->type) {
1145 				found = true;
1146 				break;
1147 			}
1148 			/* No need to search all events */
1149 			if (call->event.type > __TRACE_LAST_TYPE)
1150 				break;
1151 		}
1152 		if (!found) {
1153 			trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type);
1154 			goto out;
1155 		}
1156 	} else {
1157 		call = container_of(event, struct trace_event_call, event);
1158 	}
1159 	head = trace_get_fields(call);
1160 
1161 	trace_seq_printf(&iter->seq, "%s:", trace_event_name(call));
1162 
1163 	if (head && !list_empty(head))
1164 		print_fields(iter, call, head);
1165 	else
1166 		trace_seq_puts(&iter->seq, "No fields found\n");
1167 
1168  out:
1169 	return trace_handle_return(&iter->seq);
1170 }
1171 
1172 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
1173 				  struct trace_event *event)
1174 {
1175 	trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type);
1176 
1177 	return trace_handle_return(&iter->seq);
1178 }
1179 
1180 static void print_fn_trace(struct trace_seq *s, unsigned long ip,
1181 			   unsigned long parent_ip, unsigned long *args,
1182 			   struct trace_array *tr, int flags)
1183 {
1184 	ip = trace_adjust_address(tr, ip);
1185 	parent_ip = trace_adjust_address(tr, parent_ip);
1186 
1187 	seq_print_ip_sym(s, ip, flags);
1188 	if (args)
1189 		print_function_args(s, args, ip);
1190 
1191 	if ((flags & TRACE_ITER(PRINT_PARENT)) && parent_ip) {
1192 		trace_seq_puts(s, " <-");
1193 		seq_print_ip_sym(s, parent_ip, flags);
1194 	}
1195 }
1196 
1197 /* TRACE_FN */
1198 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
1199 					struct trace_event *event)
1200 {
1201 	struct ftrace_entry *field;
1202 	struct trace_seq *s = &iter->seq;
1203 	unsigned long *args;
1204 	int args_size;
1205 
1206 	trace_assign_type(field, iter->ent);
1207 
1208 	args_size = iter->ent_size - offsetof(struct ftrace_entry, args);
1209 	if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long))
1210 		args = field->args;
1211 	else
1212 		args = NULL;
1213 
1214 	print_fn_trace(s, field->ip, field->parent_ip, args, iter->tr, flags);
1215 	trace_seq_putc(s, '\n');
1216 
1217 	return trace_handle_return(s);
1218 }
1219 
1220 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
1221 				      struct trace_event *event)
1222 {
1223 	struct ftrace_entry *field;
1224 
1225 	trace_assign_type(field, iter->ent);
1226 
1227 	trace_seq_printf(&iter->seq, "%lx %lx\n",
1228 			 field->ip,
1229 			 field->parent_ip);
1230 
1231 	return trace_handle_return(&iter->seq);
1232 }
1233 
1234 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
1235 				      struct trace_event *event)
1236 {
1237 	struct ftrace_entry *field;
1238 	struct trace_seq *s = &iter->seq;
1239 
1240 	trace_assign_type(field, iter->ent);
1241 
1242 	SEQ_PUT_HEX_FIELD(s, field->ip);
1243 	SEQ_PUT_HEX_FIELD(s, field->parent_ip);
1244 
1245 	return trace_handle_return(s);
1246 }
1247 
1248 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
1249 				      struct trace_event *event)
1250 {
1251 	struct ftrace_entry *field;
1252 	struct trace_seq *s = &iter->seq;
1253 
1254 	trace_assign_type(field, iter->ent);
1255 
1256 	SEQ_PUT_FIELD(s, field->ip);
1257 	SEQ_PUT_FIELD(s, field->parent_ip);
1258 
1259 	return trace_handle_return(s);
1260 }
1261 
1262 static struct trace_event_functions trace_fn_funcs = {
1263 	.trace		= trace_fn_trace,
1264 	.raw		= trace_fn_raw,
1265 	.hex		= trace_fn_hex,
1266 	.binary		= trace_fn_bin,
1267 };
1268 
1269 static struct trace_event trace_fn_event = {
1270 	.type		= TRACE_FN,
1271 	.funcs		= &trace_fn_funcs,
1272 };
1273 
1274 /* TRACE_CTX an TRACE_WAKE */
1275 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
1276 					     char *delim)
1277 {
1278 	struct ctx_switch_entry *field;
1279 	char comm[TASK_COMM_LEN];
1280 	int S, T;
1281 
1282 
1283 	trace_assign_type(field, iter->ent);
1284 
1285 	T = task_index_to_char(field->next_state);
1286 	S = task_index_to_char(field->prev_state);
1287 	trace_find_cmdline(field->next_pid, comm);
1288 	trace_seq_printf(&iter->seq,
1289 			 " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n",
1290 			 field->prev_pid,
1291 			 field->prev_prio,
1292 			 S, delim,
1293 			 field->next_cpu,
1294 			 field->next_pid,
1295 			 field->next_prio,
1296 			 T, comm);
1297 
1298 	return trace_handle_return(&iter->seq);
1299 }
1300 
1301 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
1302 					 struct trace_event *event)
1303 {
1304 	return trace_ctxwake_print(iter, "==>");
1305 }
1306 
1307 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
1308 					  int flags, struct trace_event *event)
1309 {
1310 	return trace_ctxwake_print(iter, "  +");
1311 }
1312 
1313 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
1314 {
1315 	struct ctx_switch_entry *field;
1316 	int T;
1317 
1318 	trace_assign_type(field, iter->ent);
1319 
1320 	if (!S)
1321 		S = task_index_to_char(field->prev_state);
1322 	T = task_index_to_char(field->next_state);
1323 	trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
1324 			 field->prev_pid,
1325 			 field->prev_prio,
1326 			 S,
1327 			 field->next_cpu,
1328 			 field->next_pid,
1329 			 field->next_prio,
1330 			 T);
1331 
1332 	return trace_handle_return(&iter->seq);
1333 }
1334 
1335 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
1336 				       struct trace_event *event)
1337 {
1338 	return trace_ctxwake_raw(iter, 0);
1339 }
1340 
1341 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
1342 					struct trace_event *event)
1343 {
1344 	return trace_ctxwake_raw(iter, '+');
1345 }
1346 
1347 
1348 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
1349 {
1350 	struct ctx_switch_entry *field;
1351 	struct trace_seq *s = &iter->seq;
1352 	int T;
1353 
1354 	trace_assign_type(field, iter->ent);
1355 
1356 	if (!S)
1357 		S = task_index_to_char(field->prev_state);
1358 	T = task_index_to_char(field->next_state);
1359 
1360 	SEQ_PUT_HEX_FIELD(s, field->prev_pid);
1361 	SEQ_PUT_HEX_FIELD(s, field->prev_prio);
1362 	SEQ_PUT_HEX_FIELD(s, S);
1363 	SEQ_PUT_HEX_FIELD(s, field->next_cpu);
1364 	SEQ_PUT_HEX_FIELD(s, field->next_pid);
1365 	SEQ_PUT_HEX_FIELD(s, field->next_prio);
1366 	SEQ_PUT_HEX_FIELD(s, T);
1367 
1368 	return trace_handle_return(s);
1369 }
1370 
1371 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
1372 				       struct trace_event *event)
1373 {
1374 	return trace_ctxwake_hex(iter, 0);
1375 }
1376 
1377 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
1378 					struct trace_event *event)
1379 {
1380 	return trace_ctxwake_hex(iter, '+');
1381 }
1382 
1383 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
1384 					   int flags, struct trace_event *event)
1385 {
1386 	struct ctx_switch_entry *field;
1387 	struct trace_seq *s = &iter->seq;
1388 
1389 	trace_assign_type(field, iter->ent);
1390 
1391 	SEQ_PUT_FIELD(s, field->prev_pid);
1392 	SEQ_PUT_FIELD(s, field->prev_prio);
1393 	SEQ_PUT_FIELD(s, field->prev_state);
1394 	SEQ_PUT_FIELD(s, field->next_cpu);
1395 	SEQ_PUT_FIELD(s, field->next_pid);
1396 	SEQ_PUT_FIELD(s, field->next_prio);
1397 	SEQ_PUT_FIELD(s, field->next_state);
1398 
1399 	return trace_handle_return(s);
1400 }
1401 
1402 static struct trace_event_functions trace_ctx_funcs = {
1403 	.trace		= trace_ctx_print,
1404 	.raw		= trace_ctx_raw,
1405 	.hex		= trace_ctx_hex,
1406 	.binary		= trace_ctxwake_bin,
1407 };
1408 
1409 static struct trace_event trace_ctx_event = {
1410 	.type		= TRACE_CTX,
1411 	.funcs		= &trace_ctx_funcs,
1412 };
1413 
1414 static struct trace_event_functions trace_wake_funcs = {
1415 	.trace		= trace_wake_print,
1416 	.raw		= trace_wake_raw,
1417 	.hex		= trace_wake_hex,
1418 	.binary		= trace_ctxwake_bin,
1419 };
1420 
1421 static struct trace_event trace_wake_event = {
1422 	.type		= TRACE_WAKE,
1423 	.funcs		= &trace_wake_funcs,
1424 };
1425 
1426 /* TRACE_STACK */
1427 
1428 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1429 					   int flags, struct trace_event *event)
1430 {
1431 	struct stack_entry *field;
1432 	struct trace_seq *s = &iter->seq;
1433 	unsigned long *p;
1434 	unsigned long *end;
1435 
1436 	trace_assign_type(field, iter->ent);
1437 	end = (unsigned long *)((long)iter->ent + iter->ent_size);
1438 
1439 	trace_seq_puts(s, "<stack trace>\n");
1440 
1441 	for (p = field->caller; p && p < end && *p != ULONG_MAX; p++) {
1442 
1443 		if (trace_seq_has_overflowed(s))
1444 			break;
1445 
1446 		trace_seq_puts(s, " => ");
1447 		if ((*p) == FTRACE_TRAMPOLINE_MARKER) {
1448 			trace_seq_puts(s, "[FTRACE TRAMPOLINE]\n");
1449 			continue;
1450 		}
1451 		seq_print_ip_sym(s, trace_adjust_address(iter->tr, *p), flags);
1452 		trace_seq_putc(s, '\n');
1453 	}
1454 
1455 	return trace_handle_return(s);
1456 }
1457 
1458 static struct trace_event_functions trace_stack_funcs = {
1459 	.trace		= trace_stack_print,
1460 };
1461 
1462 static struct trace_event trace_stack_event = {
1463 	.type		= TRACE_STACK,
1464 	.funcs		= &trace_stack_funcs,
1465 };
1466 
1467 /* TRACE_USER_STACK */
1468 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1469 						int flags, struct trace_event *event)
1470 {
1471 	struct trace_array *tr = iter->tr;
1472 	struct userstack_entry *field;
1473 	struct trace_seq *s = &iter->seq;
1474 	struct mm_struct *mm = NULL;
1475 	unsigned int i;
1476 
1477 	trace_assign_type(field, iter->ent);
1478 
1479 	trace_seq_puts(s, "<user stack trace>\n");
1480 
1481 	if (tr->trace_flags & TRACE_ITER(SYM_USEROBJ)) {
1482 		struct task_struct *task;
1483 		/*
1484 		 * we do the lookup on the thread group leader,
1485 		 * since individual threads might have already quit!
1486 		 */
1487 		rcu_read_lock();
1488 		task = find_task_by_vpid(field->tgid);
1489 		if (task)
1490 			mm = get_task_mm(task);
1491 		rcu_read_unlock();
1492 	}
1493 
1494 	for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1495 		unsigned long ip = field->caller[i];
1496 
1497 		if (!ip || trace_seq_has_overflowed(s))
1498 			break;
1499 
1500 		trace_seq_puts(s, " => ");
1501 		seq_print_user_ip(s, mm, ip, flags);
1502 		trace_seq_putc(s, '\n');
1503 	}
1504 
1505 	if (mm)
1506 		mmput(mm);
1507 
1508 	return trace_handle_return(s);
1509 }
1510 
1511 static struct trace_event_functions trace_user_stack_funcs = {
1512 	.trace		= trace_user_stack_print,
1513 };
1514 
1515 static struct trace_event trace_user_stack_event = {
1516 	.type		= TRACE_USER_STACK,
1517 	.funcs		= &trace_user_stack_funcs,
1518 };
1519 
1520 /* TRACE_HWLAT */
1521 static enum print_line_t
1522 trace_hwlat_print(struct trace_iterator *iter, int flags,
1523 		  struct trace_event *event)
1524 {
1525 	struct trace_entry *entry = iter->ent;
1526 	struct trace_seq *s = &iter->seq;
1527 	struct hwlat_entry *field;
1528 
1529 	trace_assign_type(field, entry);
1530 
1531 	trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ptSp count:%d",
1532 			 field->seqnum,
1533 			 field->duration,
1534 			 field->outer_duration,
1535 			 &field->timestamp,
1536 			 field->count);
1537 
1538 	if (field->nmi_count) {
1539 		/*
1540 		 * The generic sched_clock() is not NMI safe, thus
1541 		 * we only record the count and not the time.
1542 		 */
1543 		if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK))
1544 			trace_seq_printf(s, " nmi-total:%llu",
1545 					 field->nmi_total_ts);
1546 		trace_seq_printf(s, " nmi-count:%u",
1547 				 field->nmi_count);
1548 	}
1549 
1550 	trace_seq_putc(s, '\n');
1551 
1552 	return trace_handle_return(s);
1553 }
1554 
1555 static enum print_line_t
1556 trace_hwlat_raw(struct trace_iterator *iter, int flags,
1557 		struct trace_event *event)
1558 {
1559 	struct hwlat_entry *field;
1560 	struct trace_seq *s = &iter->seq;
1561 
1562 	trace_assign_type(field, iter->ent);
1563 
1564 	trace_seq_printf(s, "%llu %lld %lld %09ld %u\n",
1565 			 field->duration,
1566 			 field->outer_duration,
1567 			 (long long)field->timestamp.tv_sec,
1568 			 field->timestamp.tv_nsec,
1569 			 field->seqnum);
1570 
1571 	return trace_handle_return(s);
1572 }
1573 
1574 static struct trace_event_functions trace_hwlat_funcs = {
1575 	.trace		= trace_hwlat_print,
1576 	.raw		= trace_hwlat_raw,
1577 };
1578 
1579 static struct trace_event trace_hwlat_event = {
1580 	.type		= TRACE_HWLAT,
1581 	.funcs		= &trace_hwlat_funcs,
1582 };
1583 
1584 /* TRACE_OSNOISE */
1585 static enum print_line_t
1586 trace_osnoise_print(struct trace_iterator *iter, int flags,
1587 		    struct trace_event *event)
1588 {
1589 	struct trace_entry *entry = iter->ent;
1590 	struct trace_seq *s = &iter->seq;
1591 	struct osnoise_entry *field;
1592 	u64 ratio, ratio_dec;
1593 	u64 net_runtime;
1594 
1595 	trace_assign_type(field, entry);
1596 
1597 	/*
1598 	 * compute the available % of cpu time.
1599 	 */
1600 	net_runtime = field->runtime - field->noise;
1601 	ratio = net_runtime * 10000000;
1602 	do_div(ratio, field->runtime);
1603 	ratio_dec = do_div(ratio, 100000);
1604 
1605 	trace_seq_printf(s, "%llu %10llu %3llu.%05llu %7llu",
1606 			 field->runtime,
1607 			 field->noise,
1608 			 ratio, ratio_dec,
1609 			 field->max_sample);
1610 
1611 	trace_seq_printf(s, " %6u", field->hw_count);
1612 	trace_seq_printf(s, " %6u", field->nmi_count);
1613 	trace_seq_printf(s, " %6u", field->irq_count);
1614 	trace_seq_printf(s, " %6u", field->softirq_count);
1615 	trace_seq_printf(s, " %6u", field->thread_count);
1616 
1617 	trace_seq_putc(s, '\n');
1618 
1619 	return trace_handle_return(s);
1620 }
1621 
1622 static enum print_line_t
1623 trace_osnoise_raw(struct trace_iterator *iter, int flags,
1624 		  struct trace_event *event)
1625 {
1626 	struct osnoise_entry *field;
1627 	struct trace_seq *s = &iter->seq;
1628 
1629 	trace_assign_type(field, iter->ent);
1630 
1631 	trace_seq_printf(s, "%lld %llu %llu %u %u %u %u %u\n",
1632 			 field->runtime,
1633 			 field->noise,
1634 			 field->max_sample,
1635 			 field->hw_count,
1636 			 field->nmi_count,
1637 			 field->irq_count,
1638 			 field->softirq_count,
1639 			 field->thread_count);
1640 
1641 	return trace_handle_return(s);
1642 }
1643 
1644 static struct trace_event_functions trace_osnoise_funcs = {
1645 	.trace		= trace_osnoise_print,
1646 	.raw		= trace_osnoise_raw,
1647 };
1648 
1649 static struct trace_event trace_osnoise_event = {
1650 	.type		= TRACE_OSNOISE,
1651 	.funcs		= &trace_osnoise_funcs,
1652 };
1653 
1654 /* TRACE_TIMERLAT */
1655 
1656 static char *timerlat_lat_context[] = {"irq", "thread", "user-ret"};
1657 static enum print_line_t
1658 trace_timerlat_print(struct trace_iterator *iter, int flags,
1659 		     struct trace_event *event)
1660 {
1661 	struct trace_entry *entry = iter->ent;
1662 	struct trace_seq *s = &iter->seq;
1663 	struct timerlat_entry *field;
1664 
1665 	trace_assign_type(field, entry);
1666 
1667 	trace_seq_printf(s, "#%-5u context %6s timer_latency %9llu ns\n",
1668 			 field->seqnum,
1669 			 timerlat_lat_context[field->context],
1670 			 field->timer_latency);
1671 
1672 	return trace_handle_return(s);
1673 }
1674 
1675 static enum print_line_t
1676 trace_timerlat_raw(struct trace_iterator *iter, int flags,
1677 		   struct trace_event *event)
1678 {
1679 	struct timerlat_entry *field;
1680 	struct trace_seq *s = &iter->seq;
1681 
1682 	trace_assign_type(field, iter->ent);
1683 
1684 	trace_seq_printf(s, "%u %d %llu\n",
1685 			 field->seqnum,
1686 			 field->context,
1687 			 field->timer_latency);
1688 
1689 	return trace_handle_return(s);
1690 }
1691 
1692 static struct trace_event_functions trace_timerlat_funcs = {
1693 	.trace		= trace_timerlat_print,
1694 	.raw		= trace_timerlat_raw,
1695 };
1696 
1697 static struct trace_event trace_timerlat_event = {
1698 	.type		= TRACE_TIMERLAT,
1699 	.funcs		= &trace_timerlat_funcs,
1700 };
1701 
1702 /* TRACE_BPUTS */
1703 static enum print_line_t
1704 trace_bputs_print(struct trace_iterator *iter, int flags,
1705 		   struct trace_event *event)
1706 {
1707 	struct trace_entry *entry = iter->ent;
1708 	struct trace_seq *s = &iter->seq;
1709 	struct bputs_entry *field;
1710 
1711 	trace_assign_type(field, entry);
1712 
1713 	seq_print_ip_sym(s, field->ip, flags);
1714 	trace_seq_puts(s, ": ");
1715 	trace_seq_puts(s, field->str);
1716 
1717 	return trace_handle_return(s);
1718 }
1719 
1720 
1721 static enum print_line_t
1722 trace_bputs_raw(struct trace_iterator *iter, int flags,
1723 		struct trace_event *event)
1724 {
1725 	struct bputs_entry *field;
1726 	struct trace_seq *s = &iter->seq;
1727 
1728 	trace_assign_type(field, iter->ent);
1729 
1730 	trace_seq_printf(s, ": %lx : ", field->ip);
1731 	trace_seq_puts(s, field->str);
1732 
1733 	return trace_handle_return(s);
1734 }
1735 
1736 static struct trace_event_functions trace_bputs_funcs = {
1737 	.trace		= trace_bputs_print,
1738 	.raw		= trace_bputs_raw,
1739 };
1740 
1741 static struct trace_event trace_bputs_event = {
1742 	.type		= TRACE_BPUTS,
1743 	.funcs		= &trace_bputs_funcs,
1744 };
1745 
1746 /* TRACE_BPRINT */
1747 static enum print_line_t
1748 trace_bprint_print(struct trace_iterator *iter, int flags,
1749 		   struct trace_event *event)
1750 {
1751 	struct trace_entry *entry = iter->ent;
1752 	struct trace_seq *s = &iter->seq;
1753 	struct bprint_entry *field;
1754 
1755 	trace_assign_type(field, entry);
1756 
1757 	seq_print_ip_sym(s, field->ip, flags);
1758 	trace_seq_puts(s, ": ");
1759 	trace_seq_bprintf(s, field->fmt, field->buf);
1760 
1761 	return trace_handle_return(s);
1762 }
1763 
1764 
1765 static enum print_line_t
1766 trace_bprint_raw(struct trace_iterator *iter, int flags,
1767 		 struct trace_event *event)
1768 {
1769 	struct bprint_entry *field;
1770 	struct trace_seq *s = &iter->seq;
1771 
1772 	trace_assign_type(field, iter->ent);
1773 
1774 	trace_seq_printf(s, ": %lx : ", field->ip);
1775 	trace_seq_bprintf(s, field->fmt, field->buf);
1776 
1777 	return trace_handle_return(s);
1778 }
1779 
1780 static struct trace_event_functions trace_bprint_funcs = {
1781 	.trace		= trace_bprint_print,
1782 	.raw		= trace_bprint_raw,
1783 };
1784 
1785 static struct trace_event trace_bprint_event = {
1786 	.type		= TRACE_BPRINT,
1787 	.funcs		= &trace_bprint_funcs,
1788 };
1789 
1790 /* TRACE_PRINT */
1791 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1792 					   int flags, struct trace_event *event)
1793 {
1794 	struct print_entry *field;
1795 	struct trace_seq *s = &iter->seq;
1796 	unsigned long ip;
1797 
1798 	trace_assign_type(field, iter->ent);
1799 
1800 	ip = trace_adjust_address(iter->tr, field->ip);
1801 
1802 	seq_print_ip_sym(s, ip, flags);
1803 	trace_seq_printf(s, ": %s", field->buf);
1804 
1805 	return trace_handle_return(s);
1806 }
1807 
1808 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
1809 					 struct trace_event *event)
1810 {
1811 	struct print_entry *field;
1812 
1813 	trace_assign_type(field, iter->ent);
1814 
1815 	trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
1816 
1817 	return trace_handle_return(&iter->seq);
1818 }
1819 
1820 static struct trace_event_functions trace_print_funcs = {
1821 	.trace		= trace_print_print,
1822 	.raw		= trace_print_raw,
1823 };
1824 
1825 static struct trace_event trace_print_event = {
1826 	.type	 	= TRACE_PRINT,
1827 	.funcs		= &trace_print_funcs,
1828 };
1829 
1830 static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags,
1831 					 struct trace_event *event)
1832 {
1833 	struct raw_data_entry *field;
1834 	int i;
1835 
1836 	trace_assign_type(field, iter->ent);
1837 
1838 	trace_seq_printf(&iter->seq, "# %x buf:", field->id);
1839 
1840 	for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++)
1841 		trace_seq_printf(&iter->seq, " %02x",
1842 				 (unsigned char)field->buf[i]);
1843 
1844 	trace_seq_putc(&iter->seq, '\n');
1845 
1846 	return trace_handle_return(&iter->seq);
1847 }
1848 
1849 static struct trace_event_functions trace_raw_data_funcs = {
1850 	.trace		= trace_raw_data,
1851 	.raw		= trace_raw_data,
1852 };
1853 
1854 static struct trace_event trace_raw_data_event = {
1855 	.type	 	= TRACE_RAW_DATA,
1856 	.funcs		= &trace_raw_data_funcs,
1857 };
1858 
1859 static enum print_line_t
1860 trace_func_repeats_raw(struct trace_iterator *iter, int flags,
1861 			 struct trace_event *event)
1862 {
1863 	struct func_repeats_entry *field;
1864 	struct trace_seq *s = &iter->seq;
1865 
1866 	trace_assign_type(field, iter->ent);
1867 
1868 	trace_seq_printf(s, "%lu %lu %u %llu\n",
1869 			 field->ip,
1870 			 field->parent_ip,
1871 			 field->count,
1872 			 FUNC_REPEATS_GET_DELTA_TS(field));
1873 
1874 	return trace_handle_return(s);
1875 }
1876 
1877 static enum print_line_t
1878 trace_func_repeats_print(struct trace_iterator *iter, int flags,
1879 			 struct trace_event *event)
1880 {
1881 	struct func_repeats_entry *field;
1882 	struct trace_seq *s = &iter->seq;
1883 
1884 	trace_assign_type(field, iter->ent);
1885 
1886 	print_fn_trace(s, field->ip, field->parent_ip, NULL, iter->tr, flags);
1887 	trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
1888 	trace_print_time(s, iter,
1889 			 iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
1890 	trace_seq_puts(s, ")\n");
1891 
1892 	return trace_handle_return(s);
1893 }
1894 
1895 static struct trace_event_functions trace_func_repeats_funcs = {
1896 	.trace		= trace_func_repeats_print,
1897 	.raw		= trace_func_repeats_raw,
1898 };
1899 
1900 static struct trace_event trace_func_repeats_event = {
1901 	.type	 	= TRACE_FUNC_REPEATS,
1902 	.funcs		= &trace_func_repeats_funcs,
1903 };
1904 
1905 static struct trace_event *events[] __initdata = {
1906 	&trace_fn_event,
1907 	&trace_ctx_event,
1908 	&trace_wake_event,
1909 	&trace_stack_event,
1910 	&trace_user_stack_event,
1911 	&trace_bputs_event,
1912 	&trace_bprint_event,
1913 	&trace_print_event,
1914 	&trace_hwlat_event,
1915 	&trace_osnoise_event,
1916 	&trace_timerlat_event,
1917 	&trace_raw_data_event,
1918 	&trace_func_repeats_event,
1919 	NULL
1920 };
1921 
1922 __init int init_events(void)
1923 {
1924 	struct trace_event *event;
1925 	int i, ret;
1926 
1927 	for (i = 0; events[i]; i++) {
1928 		event = events[i];
1929 		ret = register_trace_event(event);
1930 		WARN_ONCE(!ret, "event %d failed to register", event->type);
1931 	}
1932 
1933 	return 0;
1934 }
1935