xref: /linux/tools/perf/util/event.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 #include <errno.h>
2 #include <fcntl.h>
3 #include <inttypes.h>
4 #include <linux/compiler.h>
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <perf/cpumap.h>
8 #include <perf/event.h>
9 #include <stdio.h>
10 #include <sys/types.h>
11 #include <sys/stat.h>
12 #include <unistd.h>
13 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
14 #include <linux/perf_event.h>
15 #include "cpumap.h"
16 #include "dso.h"
17 #include "event.h"
18 #include "debug.h"
19 #include "hist.h"
20 #include "machine.h"
21 #include "sort.h"
22 #include "string2.h"
23 #include "strlist.h"
24 #include "thread.h"
25 #include "thread_map.h"
26 #include "time-utils.h"
27 #include <linux/ctype.h>
28 #include "map.h"
29 #include "util/namespaces.h"
30 #include "symbol.h"
31 #include "symbol/kallsyms.h"
32 #include "asm/bug.h"
33 #include "stat.h"
34 #include "session.h"
35 #include "bpf-event.h"
36 #include "print_binary.h"
37 #include "tool.h"
38 #include "util.h"
39 
40 static const char *perf_event__names[] = {
41 	[0]					= "TOTAL",
42 	[PERF_RECORD_MMAP]			= "MMAP",
43 	[PERF_RECORD_MMAP2]			= "MMAP2",
44 	[PERF_RECORD_LOST]			= "LOST",
45 	[PERF_RECORD_COMM]			= "COMM",
46 	[PERF_RECORD_EXIT]			= "EXIT",
47 	[PERF_RECORD_THROTTLE]			= "THROTTLE",
48 	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
49 	[PERF_RECORD_FORK]			= "FORK",
50 	[PERF_RECORD_READ]			= "READ",
51 	[PERF_RECORD_SAMPLE]			= "SAMPLE",
52 	[PERF_RECORD_AUX]			= "AUX",
53 	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
54 	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
55 	[PERF_RECORD_SWITCH]			= "SWITCH",
56 	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
57 	[PERF_RECORD_NAMESPACES]		= "NAMESPACES",
58 	[PERF_RECORD_KSYMBOL]			= "KSYMBOL",
59 	[PERF_RECORD_BPF_EVENT]			= "BPF_EVENT",
60 	[PERF_RECORD_CGROUP]			= "CGROUP",
61 	[PERF_RECORD_TEXT_POKE]			= "TEXT_POKE",
62 	[PERF_RECORD_AUX_OUTPUT_HW_ID]		= "AUX_OUTPUT_HW_ID",
63 	[PERF_RECORD_CALLCHAIN_DEFERRED]	= "CALLCHAIN_DEFERRED",
64 	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
65 	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
66 	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
67 	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
68 	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
69 	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
70 	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
71 	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
72 	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
73 	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
74 	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
75 	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
76 	[PERF_RECORD_STAT]			= "STAT",
77 	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
78 	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
79 	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
80 	[PERF_RECORD_HEADER_FEATURE]		= "FEATURE",
81 	[PERF_RECORD_COMPRESSED]		= "COMPRESSED",
82 	[PERF_RECORD_FINISHED_INIT]		= "FINISHED_INIT",
83 	[PERF_RECORD_COMPRESSED2]		= "COMPRESSED2",
84 	[PERF_RECORD_BPF_METADATA]		= "BPF_METADATA",
85 	[PERF_RECORD_SCHEDSTAT_CPU]		= "SCHEDSTAT_CPU",
86 	[PERF_RECORD_SCHEDSTAT_DOMAIN]		= "SCHEDSTAT_DOMAIN",
87 };
88 
89 const char *perf_event__name(unsigned int id)
90 {
91 	if (id >= ARRAY_SIZE(perf_event__names))
92 		return "INVALID";
93 	if (!perf_event__names[id])
94 		return "UNKNOWN";
95 	return perf_event__names[id];
96 }
97 
98 struct process_symbol_args {
99 	const char *name;
100 	u64	   start;
101 };
102 
103 static int find_func_symbol_cb(void *arg, const char *name, char type,
104 			       u64 start)
105 {
106 	struct process_symbol_args *args = arg;
107 
108 	/*
109 	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
110 	 * an 'A' to the same address as "_stext".
111 	 */
112 	if (!(kallsyms__is_function(type) ||
113 	      type == 'A') || strcmp(name, args->name))
114 		return 0;
115 
116 	args->start = start;
117 	return 1;
118 }
119 
120 static int find_any_symbol_cb(void *arg, const char *name,
121 			      char type __maybe_unused, u64 start)
122 {
123 	struct process_symbol_args *args = arg;
124 
125 	if (strcmp(name, args->name))
126 		return 0;
127 
128 	args->start = start;
129 	return 1;
130 }
131 
132 int kallsyms__get_function_start(const char *kallsyms_filename,
133 				 const char *symbol_name, u64 *addr)
134 {
135 	struct process_symbol_args args = { .name = symbol_name, };
136 
137 	if (kallsyms__parse(kallsyms_filename, &args, find_func_symbol_cb) <= 0)
138 		return -1;
139 
140 	*addr = args.start;
141 	return 0;
142 }
143 
144 int kallsyms__get_symbol_start(const char *kallsyms_filename,
145 			       const char *symbol_name, u64 *addr)
146 {
147 	struct process_symbol_args args = { .name = symbol_name, };
148 
149 	if (kallsyms__parse(kallsyms_filename, &args, find_any_symbol_cb) <= 0)
150 		return -1;
151 
152 	*addr = args.start;
153 	return 0;
154 }
155 
156 void perf_event__read_stat_config(struct perf_stat_config *config,
157 				  struct perf_record_stat_config *event)
158 {
159 	unsigned i;
160 
161 	for (i = 0; i < event->nr; i++) {
162 
163 		switch (event->data[i].tag) {
164 #define CASE(__term, __val)					\
165 		case PERF_STAT_CONFIG_TERM__##__term:		\
166 			config->__val = event->data[i].val;	\
167 			break;
168 
169 		CASE(AGGR_MODE,  aggr_mode)
170 		CASE(SCALE,      scale)
171 		CASE(INTERVAL,   interval)
172 		CASE(AGGR_LEVEL, aggr_level)
173 #undef CASE
174 		default:
175 			pr_warning("unknown stat config term %" PRI_lu64 "\n",
176 				   event->data[i].tag);
177 		}
178 	}
179 }
180 
181 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
182 {
183 	const char *s;
184 
185 	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
186 		s = " exec";
187 	else
188 		s = "";
189 
190 	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
191 }
192 
193 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
194 {
195 	size_t ret = 0;
196 	struct perf_ns_link_info *ns_link_info;
197 	u32 nr_namespaces, idx;
198 
199 	ns_link_info = event->namespaces.link_info;
200 	nr_namespaces = event->namespaces.nr_namespaces;
201 
202 	ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
203 		       event->namespaces.pid,
204 		       event->namespaces.tid,
205 		       nr_namespaces);
206 
207 	for (idx = 0; idx < nr_namespaces; idx++) {
208 		if (idx && (idx % 4 == 0))
209 			ret += fprintf(fp, "\n\t\t ");
210 
211 		ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
212 				perf_ns__name(idx), (u64)ns_link_info[idx].dev,
213 				(u64)ns_link_info[idx].ino,
214 				((idx + 1) != nr_namespaces) ? ", " : "]\n");
215 	}
216 
217 	return ret;
218 }
219 
220 size_t perf_event__fprintf_cgroup(union perf_event *event, FILE *fp)
221 {
222 	return fprintf(fp, " cgroup: %" PRI_lu64 " %s\n",
223 		       event->cgroup.id, event->cgroup.path);
224 }
225 
226 int perf_event__process_comm(const struct perf_tool *tool __maybe_unused,
227 			     union perf_event *event,
228 			     struct perf_sample *sample,
229 			     struct machine *machine)
230 {
231 	return machine__process_comm_event(machine, event, sample);
232 }
233 
234 int perf_event__process_namespaces(const struct perf_tool *tool __maybe_unused,
235 				   union perf_event *event,
236 				   struct perf_sample *sample,
237 				   struct machine *machine)
238 {
239 	return machine__process_namespaces_event(machine, event, sample);
240 }
241 
242 int perf_event__process_cgroup(const struct perf_tool *tool __maybe_unused,
243 			       union perf_event *event,
244 			       struct perf_sample *sample,
245 			       struct machine *machine)
246 {
247 	return machine__process_cgroup_event(machine, event, sample);
248 }
249 
250 int perf_event__process_lost(const struct perf_tool *tool __maybe_unused,
251 			     union perf_event *event,
252 			     struct perf_sample *sample,
253 			     struct machine *machine)
254 {
255 	return machine__process_lost_event(machine, event, sample);
256 }
257 
258 int perf_event__process_aux(const struct perf_tool *tool __maybe_unused,
259 			    union perf_event *event,
260 			    struct perf_sample *sample __maybe_unused,
261 			    struct machine *machine)
262 {
263 	return machine__process_aux_event(machine, event);
264 }
265 
266 int perf_event__process_itrace_start(const struct perf_tool *tool __maybe_unused,
267 				     union perf_event *event,
268 				     struct perf_sample *sample __maybe_unused,
269 				     struct machine *machine)
270 {
271 	return machine__process_itrace_start_event(machine, event);
272 }
273 
274 int perf_event__process_aux_output_hw_id(const struct perf_tool *tool __maybe_unused,
275 					 union perf_event *event,
276 					 struct perf_sample *sample __maybe_unused,
277 					 struct machine *machine)
278 {
279 	return machine__process_aux_output_hw_id_event(machine, event);
280 }
281 
282 int perf_event__process_lost_samples(const struct perf_tool *tool __maybe_unused,
283 				     union perf_event *event,
284 				     struct perf_sample *sample,
285 				     struct machine *machine)
286 {
287 	return machine__process_lost_samples_event(machine, event, sample);
288 }
289 
290 int perf_event__process_switch(const struct perf_tool *tool __maybe_unused,
291 			       union perf_event *event,
292 			       struct perf_sample *sample __maybe_unused,
293 			       struct machine *machine)
294 {
295 	return machine__process_switch_event(machine, event);
296 }
297 
298 int perf_event__process_ksymbol(const struct perf_tool *tool __maybe_unused,
299 				union perf_event *event,
300 				struct perf_sample *sample __maybe_unused,
301 				struct machine *machine)
302 {
303 	return machine__process_ksymbol(machine, event, sample);
304 }
305 
306 int perf_event__process_bpf(const struct perf_tool *tool __maybe_unused,
307 			    union perf_event *event,
308 			    struct perf_sample *sample,
309 			    struct machine *machine)
310 {
311 	return machine__process_bpf(machine, event, sample);
312 }
313 
314 int perf_event__process_text_poke(const struct perf_tool *tool __maybe_unused,
315 				  union perf_event *event,
316 				  struct perf_sample *sample,
317 				  struct machine *machine)
318 {
319 	return machine__process_text_poke(machine, event, sample);
320 }
321 
322 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
323 {
324 	return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64 "]: %c %s\n",
325 		       event->mmap.pid, event->mmap.tid, event->mmap.start,
326 		       event->mmap.len, event->mmap.pgoff,
327 		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
328 		       event->mmap.filename);
329 }
330 
331 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
332 {
333 	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
334 		char sbuild_id[SBUILD_ID_SIZE];
335 		struct build_id bid;
336 
337 		build_id__init(&bid, event->mmap2.build_id,
338 			       event->mmap2.build_id_size);
339 		build_id__snprintf(&bid, sbuild_id, sizeof(sbuild_id));
340 
341 		return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
342 				   " <%s>]: %c%c%c%c %s\n",
343 			       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
344 			       event->mmap2.len, event->mmap2.pgoff, sbuild_id,
345 			       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
346 			       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
347 			       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
348 			       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
349 			       event->mmap2.filename);
350 	} else {
351 		return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
352 				   " %02x:%02x %"PRI_lu64" %"PRI_lu64"]: %c%c%c%c %s\n",
353 			       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
354 			       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
355 			       event->mmap2.min, event->mmap2.ino,
356 			       event->mmap2.ino_generation,
357 			       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
358 			       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
359 			       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
360 			       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
361 			       event->mmap2.filename);
362 	}
363 }
364 
365 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
366 {
367 	struct perf_thread_map *threads = thread_map__new_event(&event->thread_map);
368 	size_t ret;
369 
370 	ret = fprintf(fp, " nr: ");
371 
372 	if (threads)
373 		ret += thread_map__fprintf(threads, fp);
374 	else
375 		ret += fprintf(fp, "failed to get threads from event\n");
376 
377 	perf_thread_map__put(threads);
378 	return ret;
379 }
380 
381 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
382 {
383 	struct perf_cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
384 	size_t ret;
385 
386 	ret = fprintf(fp, ": ");
387 
388 	if (cpus)
389 		ret += cpu_map__fprintf(cpus, fp);
390 	else
391 		ret += fprintf(fp, "failed to get cpumap from event\n");
392 
393 	perf_cpu_map__put(cpus);
394 	return ret;
395 }
396 
397 int perf_event__process_mmap(const struct perf_tool *tool __maybe_unused,
398 			     union perf_event *event,
399 			     struct perf_sample *sample,
400 			     struct machine *machine)
401 {
402 	return machine__process_mmap_event(machine, event, sample);
403 }
404 
405 int perf_event__process_mmap2(const struct perf_tool *tool __maybe_unused,
406 			     union perf_event *event,
407 			     struct perf_sample *sample,
408 			     struct machine *machine)
409 {
410 	return machine__process_mmap2_event(machine, event, sample);
411 }
412 
413 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
414 {
415 	return fprintf(fp, "(%d:%d):(%d:%d)\n",
416 		       event->fork.pid, event->fork.tid,
417 		       event->fork.ppid, event->fork.ptid);
418 }
419 
420 int perf_event__process_fork(const struct perf_tool *tool __maybe_unused,
421 			     union perf_event *event,
422 			     struct perf_sample *sample,
423 			     struct machine *machine)
424 {
425 	return machine__process_fork_event(machine, event, sample);
426 }
427 
428 int perf_event__process_exit(const struct perf_tool *tool __maybe_unused,
429 			     union perf_event *event,
430 			     struct perf_sample *sample,
431 			     struct machine *machine)
432 {
433 	return machine__process_exit_event(machine, event, sample);
434 }
435 
436 int perf_event__exit_del_thread(const struct perf_tool *tool __maybe_unused,
437 				union perf_event *event,
438 				struct perf_sample *sample __maybe_unused,
439 				struct machine *machine)
440 {
441 	struct thread *thread = machine__findnew_thread(machine,
442 							event->fork.pid,
443 							event->fork.tid);
444 
445 	dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
446 		    event->fork.ppid, event->fork.ptid);
447 
448 	if (thread) {
449 		machine__remove_thread(machine, thread);
450 		thread__put(thread);
451 	}
452 
453 	return 0;
454 }
455 
456 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
457 {
458 	return fprintf(fp, " offset: %#"PRI_lx64" size: %#"PRI_lx64" flags: %#"PRI_lx64" [%s%s%s%s]\n",
459 		       event->aux.aux_offset, event->aux.aux_size,
460 		       event->aux.flags,
461 		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
462 		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
463 		       event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "",
464 		       event->aux.flags & PERF_AUX_FLAG_COLLISION ? "C" : "");
465 }
466 
467 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
468 {
469 	return fprintf(fp, " pid: %u tid: %u\n",
470 		       event->itrace_start.pid, event->itrace_start.tid);
471 }
472 
473 size_t perf_event__fprintf_aux_output_hw_id(union perf_event *event, FILE *fp)
474 {
475 	return fprintf(fp, " hw_id: %#"PRI_lx64"\n",
476 		       event->aux_output_hw_id.hw_id);
477 }
478 
479 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
480 {
481 	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
482 	const char *in_out = !out ? "IN         " :
483 		!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
484 				    "OUT        " : "OUT preempt";
485 
486 	if (event->header.type == PERF_RECORD_SWITCH)
487 		return fprintf(fp, " %s\n", in_out);
488 
489 	return fprintf(fp, " %s  %s pid/tid: %5d/%-5d\n",
490 		       in_out, out ? "next" : "prev",
491 		       event->context_switch.next_prev_pid,
492 		       event->context_switch.next_prev_tid);
493 }
494 
495 static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
496 {
497 	return fprintf(fp, " lost %" PRI_lu64 "\n", event->lost.lost);
498 }
499 
500 size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
501 {
502 	return fprintf(fp, " addr %" PRI_lx64 " len %u type %u flags 0x%x name %s\n",
503 		       event->ksymbol.addr, event->ksymbol.len,
504 		       event->ksymbol.ksym_type,
505 		       event->ksymbol.flags, event->ksymbol.name);
506 }
507 
508 size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp)
509 {
510 	return fprintf(fp, " type %u, flags %u, id %u\n",
511 		       event->bpf.type, event->bpf.flags, event->bpf.id);
512 }
513 
514 size_t perf_event__fprintf_bpf_metadata(union perf_event *event, FILE *fp)
515 {
516 	struct perf_record_bpf_metadata *metadata = &event->bpf_metadata;
517 	size_t ret;
518 
519 	ret = fprintf(fp, " prog %s\n", metadata->prog_name);
520 	for (__u32 i = 0; i < metadata->nr_entries; i++) {
521 		ret += fprintf(fp, "  entry %d: %20s = %s\n", i,
522 			       metadata->entries[i].key,
523 			       metadata->entries[i].value);
524 	}
525 	return ret;
526 }
527 
528 static int text_poke_printer(enum binary_printer_ops op, unsigned int val,
529 			     void *extra, FILE *fp)
530 {
531 	bool old = *(bool *)extra;
532 
533 	switch ((int)op) {
534 	case BINARY_PRINT_LINE_BEGIN:
535 		return fprintf(fp, "            %s bytes:", old ? "Old" : "New");
536 	case BINARY_PRINT_NUM_DATA:
537 		return fprintf(fp, " %02x", val);
538 	case BINARY_PRINT_LINE_END:
539 		return fprintf(fp, "\n");
540 	default:
541 		return 0;
542 	}
543 }
544 
545 size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *machine, FILE *fp)
546 {
547 	struct perf_record_text_poke_event *tp = &event->text_poke;
548 	size_t ret;
549 	bool old;
550 
551 	ret = fprintf(fp, " %" PRI_lx64 " ", tp->addr);
552 	if (machine) {
553 		struct addr_location al;
554 
555 		addr_location__init(&al);
556 		al.map = maps__find(machine__kernel_maps(machine), tp->addr);
557 		if (al.map && map__load(al.map) >= 0) {
558 			al.addr = map__map_ip(al.map, tp->addr);
559 			al.sym = map__find_symbol(al.map, al.addr);
560 			if (al.sym)
561 				ret += symbol__fprintf_symname_offs(al.sym, &al, fp);
562 		}
563 		addr_location__exit(&al);
564 	}
565 	ret += fprintf(fp, " old len %u new len %u\n", tp->old_len, tp->new_len);
566 	old = true;
567 	ret += binary__fprintf(tp->bytes, tp->old_len, 16, text_poke_printer,
568 			       &old, fp);
569 	old = false;
570 	ret += binary__fprintf(tp->bytes + tp->old_len, tp->new_len, 16,
571 			       text_poke_printer, &old, fp);
572 	return ret;
573 }
574 
575 size_t perf_event__fprintf_schedstat_cpu(union perf_event *event, FILE *fp)
576 {
577 	struct perf_record_schedstat_cpu *cs = &event->schedstat_cpu;
578 	size_t size = fprintf(fp, "\ncpu%u ", cs->cpu);
579 	__u16 version = cs->version;
580 
581 #define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver)		\
582 	size += fprintf(fp, "%" PRIu64 " ", (uint64_t)cs->_ver._name)
583 
584 	if (version == 15) {
585 #include <perf/schedstat-v15.h>
586 		return size;
587 	} else if (version == 16) {
588 #include <perf/schedstat-v16.h>
589 		return size;
590 	} else if (version == 17) {
591 #include <perf/schedstat-v17.h>
592 		return size;
593 	}
594 #undef CPU_FIELD
595 
596 	return fprintf(fp, "Unsupported /proc/schedstat version %d.\n",
597 		       event->schedstat_cpu.version);
598 }
599 
600 size_t perf_event__fprintf_schedstat_domain(union perf_event *event, FILE *fp)
601 {
602 	struct perf_record_schedstat_domain *ds = &event->schedstat_domain;
603 	__u16 version = ds->version;
604 	size_t size = fprintf(fp, "\ndomain%u ", ds->domain);
605 
606 #define DOMAIN_FIELD(_type, _name, _desc, _format, _is_jiffies, _ver)		\
607 	size += fprintf(fp, "%" PRIu64 " ", (uint64_t)ds->_ver._name)
608 
609 	if (version == 15) {
610 #include <perf/schedstat-v15.h>
611 		return size;
612 	} else if (version == 16) {
613 #include <perf/schedstat-v16.h>
614 		return size;
615 	} else if (version == 17) {
616 #include <perf/schedstat-v17.h>
617 		return size;
618 	}
619 #undef DOMAIN_FIELD
620 
621 	return fprintf(fp, "Unsupported /proc/schedstat version %d.\n",
622 		       event->schedstat_domain.version);
623 }
624 
625 size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FILE *fp)
626 {
627 	size_t ret = fprintf(fp, "PERF_RECORD_%s",
628 			     perf_event__name(event->header.type));
629 
630 	switch (event->header.type) {
631 	case PERF_RECORD_COMM:
632 		ret += perf_event__fprintf_comm(event, fp);
633 		break;
634 	case PERF_RECORD_FORK:
635 	case PERF_RECORD_EXIT:
636 		ret += perf_event__fprintf_task(event, fp);
637 		break;
638 	case PERF_RECORD_MMAP:
639 		ret += perf_event__fprintf_mmap(event, fp);
640 		break;
641 	case PERF_RECORD_NAMESPACES:
642 		ret += perf_event__fprintf_namespaces(event, fp);
643 		break;
644 	case PERF_RECORD_CGROUP:
645 		ret += perf_event__fprintf_cgroup(event, fp);
646 		break;
647 	case PERF_RECORD_MMAP2:
648 		ret += perf_event__fprintf_mmap2(event, fp);
649 		break;
650 	case PERF_RECORD_AUX:
651 		ret += perf_event__fprintf_aux(event, fp);
652 		break;
653 	case PERF_RECORD_ITRACE_START:
654 		ret += perf_event__fprintf_itrace_start(event, fp);
655 		break;
656 	case PERF_RECORD_SWITCH:
657 	case PERF_RECORD_SWITCH_CPU_WIDE:
658 		ret += perf_event__fprintf_switch(event, fp);
659 		break;
660 	case PERF_RECORD_LOST:
661 		ret += perf_event__fprintf_lost(event, fp);
662 		break;
663 	case PERF_RECORD_KSYMBOL:
664 		ret += perf_event__fprintf_ksymbol(event, fp);
665 		break;
666 	case PERF_RECORD_BPF_EVENT:
667 		ret += perf_event__fprintf_bpf(event, fp);
668 		break;
669 	case PERF_RECORD_TEXT_POKE:
670 		ret += perf_event__fprintf_text_poke(event, machine, fp);
671 		break;
672 	case PERF_RECORD_AUX_OUTPUT_HW_ID:
673 		ret += perf_event__fprintf_aux_output_hw_id(event, fp);
674 		break;
675 	case PERF_RECORD_BPF_METADATA:
676 		ret += perf_event__fprintf_bpf_metadata(event, fp);
677 		break;
678 	default:
679 		ret += fprintf(fp, "\n");
680 	}
681 
682 	return ret;
683 }
684 
685 int perf_event__process(const struct perf_tool *tool __maybe_unused,
686 			union perf_event *event,
687 			struct perf_sample *sample,
688 			struct machine *machine)
689 {
690 	return machine__process_event(machine, event, sample);
691 }
692 
693 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
694 			     struct addr_location *al)
695 {
696 	struct maps *maps = thread__maps(thread);
697 	struct machine *machine = maps__machine(maps);
698 	bool load_map = false;
699 
700 	map__zput(al->map);
701 	thread__zput(al->thread);
702 	al->thread = thread__get(thread);
703 
704 	al->addr = addr;
705 	al->cpumode = cpumode;
706 	al->filtered = 0;
707 
708 	if (machine == NULL)
709 		return NULL;
710 
711 	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
712 		al->level = 'k';
713 		maps = machine__kernel_maps(machine);
714 		load_map = !symbol_conf.lazy_load_kernel_maps;
715 	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
716 		al->level = '.';
717 	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
718 		al->level = 'g';
719 		maps = machine__kernel_maps(machine);
720 		load_map = !symbol_conf.lazy_load_kernel_maps;
721 	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
722 		al->level = 'u';
723 	} else {
724 		al->level = 'H';
725 
726 		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
727 			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
728 			!perf_guest)
729 			al->filtered |= (1 << HIST_FILTER__GUEST);
730 		if ((cpumode == PERF_RECORD_MISC_USER ||
731 			cpumode == PERF_RECORD_MISC_KERNEL) &&
732 			!perf_host)
733 			al->filtered |= (1 << HIST_FILTER__HOST);
734 
735 		return NULL;
736 	}
737 	al->map = maps__find(maps, al->addr);
738 	if (al->map != NULL) {
739 		/*
740 		 * Kernel maps might be changed when loading symbols so loading
741 		 * must be done prior to using kernel maps.
742 		 */
743 		if (load_map)
744 			map__load(al->map);
745 		al->addr = map__map_ip(al->map, al->addr);
746 	}
747 
748 	return al->map;
749 }
750 
751 /*
752  * For branch stacks or branch samples, the sample cpumode might not be correct
753  * because it applies only to the sample 'ip' and not necessary to 'addr' or
754  * branch stack addresses. If possible, use a fallback to deal with those cases.
755  */
756 struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
757 				struct addr_location *al)
758 {
759 	struct map *map = thread__find_map(thread, cpumode, addr, al);
760 	struct machine *machine = maps__machine(thread__maps(thread));
761 	u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
762 
763 	if (map || addr_cpumode == cpumode)
764 		return map;
765 
766 	return thread__find_map(thread, addr_cpumode, addr, al);
767 }
768 
769 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
770 				   u64 addr, struct addr_location *al)
771 {
772 	al->sym = NULL;
773 	if (thread__find_map(thread, cpumode, addr, al))
774 		al->sym = map__find_symbol(al->map, al->addr);
775 	return al->sym;
776 }
777 
778 struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
779 				      u64 addr, struct addr_location *al)
780 {
781 	al->sym = NULL;
782 	if (thread__find_map_fb(thread, cpumode, addr, al))
783 		al->sym = map__find_symbol(al->map, al->addr);
784 	return al->sym;
785 }
786 
787 static bool check_address_range(struct intlist *addr_list, int addr_range,
788 				unsigned long addr)
789 {
790 	struct int_node *pos;
791 
792 	intlist__for_each_entry(pos, addr_list) {
793 		if (addr >= pos->i && addr < pos->i + addr_range)
794 			return true;
795 	}
796 
797 	return false;
798 }
799 
800 /*
801  * Callers need to drop the reference to al->thread, obtained in
802  * machine__findnew_thread()
803  */
804 int machine__resolve(struct machine *machine, struct addr_location *al,
805 		     struct perf_sample *sample)
806 {
807 	struct thread *thread;
808 	struct dso *dso;
809 
810 	if (symbol_conf.guest_code && !machine__is_host(machine))
811 		thread = machine__findnew_guest_code(machine, sample->pid);
812 	else
813 		thread = machine__findnew_thread(machine, sample->pid, sample->tid);
814 	if (thread == NULL)
815 		return -1;
816 
817 	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread__tid(thread));
818 	thread__find_map(thread, sample->cpumode, sample->ip, al);
819 	dso = al->map ? map__dso(al->map) : NULL;
820 	dump_printf(" ...... dso: %s\n",
821 		dso
822 		? dso__long_name(dso)
823 		: (al->level == 'H' ? "[hypervisor]" : "<not found>"));
824 
825 	if (thread__is_filtered(thread))
826 		al->filtered |= (1 << HIST_FILTER__THREAD);
827 
828 	thread__put(thread);
829 	thread = NULL;
830 
831 	al->sym = NULL;
832 	al->cpu = sample->cpu;
833 	al->socket = -1;
834 	al->srcline = NULL;
835 
836 	if (al->cpu >= 0) {
837 		struct perf_env *env = machine->env;
838 
839 		if (env && env->cpu)
840 			al->socket = env->cpu[al->cpu].socket_id;
841 	}
842 
843 	/* Account for possible out-of-order switch events. */
844 	al->parallelism = max(1, min(machine->parallelism, machine__nr_cpus_avail(machine)));
845 	if (test_bit(al->parallelism, symbol_conf.parallelism_filter))
846 		al->filtered |= (1 << HIST_FILTER__PARALLELISM);
847 	/*
848 	 * Multiply it by some const to avoid precision loss or dealing
849 	 * with floats. The multiplier does not matter otherwise since
850 	 * we only print it as percents.
851 	 */
852 	al->latency = sample->period * 1000 / al->parallelism;
853 
854 	if (al->map) {
855 		if (symbol_conf.dso_list &&
856 		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
857 						  dso__short_name(dso)) ||
858 			       (dso__short_name(dso) != dso__long_name(dso) &&
859 				strlist__has_entry(symbol_conf.dso_list,
860 						   dso__long_name(dso)))))) {
861 			al->filtered |= (1 << HIST_FILTER__DSO);
862 		}
863 
864 		al->sym = map__find_symbol(al->map, al->addr);
865 	} else if (symbol_conf.dso_list) {
866 		al->filtered |= (1 << HIST_FILTER__DSO);
867 	}
868 
869 	if (symbol_conf.sym_list) {
870 		int ret = 0;
871 		char al_addr_str[32];
872 		size_t sz = sizeof(al_addr_str);
873 
874 		if (al->sym) {
875 			ret = strlist__has_entry(symbol_conf.sym_list,
876 						al->sym->name);
877 		}
878 		if (!ret && al->sym) {
879 			snprintf(al_addr_str, sz, "0x%"PRIx64,
880 				 map__unmap_ip(al->map, al->sym->start));
881 			ret = strlist__has_entry(symbol_conf.sym_list,
882 						al_addr_str);
883 		}
884 		if (!ret && symbol_conf.addr_list && al->map) {
885 			unsigned long addr = map__unmap_ip(al->map, al->addr);
886 
887 			ret = intlist__has_entry(symbol_conf.addr_list, addr);
888 			if (!ret && symbol_conf.addr_range) {
889 				ret = check_address_range(symbol_conf.addr_list,
890 							  symbol_conf.addr_range,
891 							  addr);
892 			}
893 		}
894 
895 		if (!ret)
896 			al->filtered |= (1 << HIST_FILTER__SYMBOL);
897 	}
898 
899 	return 0;
900 }
901 
902 bool is_bts_event(struct perf_event_attr *attr)
903 {
904 	return attr->type == PERF_TYPE_HARDWARE &&
905 	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
906 	       attr->sample_period == 1;
907 }
908 
909 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
910 {
911 	if (attr->type == PERF_TYPE_SOFTWARE &&
912 	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
913 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
914 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
915 		return true;
916 
917 	if (is_bts_event(attr))
918 		return true;
919 
920 	return false;
921 }
922 
923 void thread__resolve(struct thread *thread, struct addr_location *al,
924 		     struct perf_sample *sample)
925 {
926 	thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
927 
928 	al->cpu = sample->cpu;
929 	al->sym = NULL;
930 
931 	if (al->map)
932 		al->sym = map__find_symbol(al->map, al->addr);
933 }
934