1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3
4 #include "util/counts.h"
5 #include "util/debug.h"
6 #include "util/dso.h"
7 #include <subcmd/exec-cmd.h>
8 #include "util/header.h"
9 #include <subcmd/parse-options.h>
10 #include "util/perf_regs.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 #include "util/map.h"
14 #include "util/srcline.h"
15 #include "util/symbol.h"
16 #include "util/thread.h"
17 #include "util/trace-event.h"
18 #include "util/env.h"
19 #include "util/evlist.h"
20 #include "util/evsel.h"
21 #include "util/evsel_fprintf.h"
22 #include "util/evswitch.h"
23 #include "util/sort.h"
24 #include "util/data.h"
25 #include "util/auxtrace.h"
26 #include "util/cpumap.h"
27 #include "util/thread_map.h"
28 #include "util/stat.h"
29 #include "util/color.h"
30 #include "util/string2.h"
31 #include "util/thread-stack.h"
32 #include "util/time-utils.h"
33 #include "util/path.h"
34 #include "util/event.h"
35 #include "util/mem-info.h"
36 #include "util/metricgroup.h"
37 #include "ui/ui.h"
38 #include "print_binary.h"
39 #include "print_insn.h"
40 #include <linux/bitmap.h>
41 #include <linux/compiler.h>
42 #include <linux/kernel.h>
43 #include <linux/stringify.h>
44 #include <linux/time64.h>
45 #include <linux/zalloc.h>
46 #include <linux/unaligned.h>
47 #include <sys/utsname.h>
48 #include "asm/bug.h"
49 #include "util/mem-events.h"
50 #include "util/dump-insn.h"
51 #include <dirent.h>
52 #include <errno.h>
53 #include <inttypes.h>
54 #include <signal.h>
55 #include <stdio.h>
56 #include <sys/param.h>
57 #include <sys/types.h>
58 #include <sys/stat.h>
59 #include <fcntl.h>
60 #include <unistd.h>
61 #include <subcmd/pager.h>
62 #include <perf/evlist.h>
63 #include <linux/err.h>
64 #include "util/dlfilter.h"
65 #include "util/record.h"
66 #include "util/util.h"
67 #include "util/cgroup.h"
68 #include "util/annotate.h"
69 #include "perf.h"
70
71 #include <linux/ctype.h>
72 #ifdef HAVE_LIBTRACEEVENT
73 #include <event-parse.h>
74 #endif
75
76 static char const *script_name;
77 static char const *generate_script_lang;
78 static bool reltime;
79 static bool deltatime;
80 static u64 initial_time;
81 static u64 previous_time;
82 static bool debug_mode;
83 static u64 last_timestamp;
84 static u64 nr_unordered;
85 static bool no_callchain;
86 static bool latency_format;
87 static bool system_wide;
88 static bool print_flags;
89 static const char *cpu_list;
90 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
91 static int max_blocks;
92 static struct dlfilter *dlfilter;
93 static int dlargc;
94 static char **dlargv;
95
96 enum perf_output_field {
97 PERF_OUTPUT_COMM = 1ULL << 0,
98 PERF_OUTPUT_TID = 1ULL << 1,
99 PERF_OUTPUT_PID = 1ULL << 2,
100 PERF_OUTPUT_TIME = 1ULL << 3,
101 PERF_OUTPUT_CPU = 1ULL << 4,
102 PERF_OUTPUT_EVNAME = 1ULL << 5,
103 PERF_OUTPUT_TRACE = 1ULL << 6,
104 PERF_OUTPUT_IP = 1ULL << 7,
105 PERF_OUTPUT_SYM = 1ULL << 8,
106 PERF_OUTPUT_DSO = 1ULL << 9,
107 PERF_OUTPUT_ADDR = 1ULL << 10,
108 PERF_OUTPUT_SYMOFFSET = 1ULL << 11,
109 PERF_OUTPUT_SRCLINE = 1ULL << 12,
110 PERF_OUTPUT_PERIOD = 1ULL << 13,
111 PERF_OUTPUT_IREGS = 1ULL << 14,
112 PERF_OUTPUT_BRSTACK = 1ULL << 15,
113 PERF_OUTPUT_BRSTACKSYM = 1ULL << 16,
114 PERF_OUTPUT_DATA_SRC = 1ULL << 17,
115 PERF_OUTPUT_WEIGHT = 1ULL << 18,
116 PERF_OUTPUT_BPF_OUTPUT = 1ULL << 19,
117 PERF_OUTPUT_CALLINDENT = 1ULL << 20,
118 PERF_OUTPUT_INSN = 1ULL << 21,
119 PERF_OUTPUT_INSNLEN = 1ULL << 22,
120 PERF_OUTPUT_BRSTACKINSN = 1ULL << 23,
121 PERF_OUTPUT_BRSTACKOFF = 1ULL << 24,
122 PERF_OUTPUT_SYNTH = 1ULL << 25,
123 PERF_OUTPUT_PHYS_ADDR = 1ULL << 26,
124 PERF_OUTPUT_UREGS = 1ULL << 27,
125 PERF_OUTPUT_METRIC = 1ULL << 28,
126 PERF_OUTPUT_MISC = 1ULL << 29,
127 PERF_OUTPUT_SRCCODE = 1ULL << 30,
128 PERF_OUTPUT_IPC = 1ULL << 31,
129 PERF_OUTPUT_TOD = 1ULL << 32,
130 PERF_OUTPUT_DATA_PAGE_SIZE = 1ULL << 33,
131 PERF_OUTPUT_CODE_PAGE_SIZE = 1ULL << 34,
132 PERF_OUTPUT_INS_LAT = 1ULL << 35,
133 PERF_OUTPUT_BRSTACKINSNLEN = 1ULL << 36,
134 PERF_OUTPUT_MACHINE_PID = 1ULL << 37,
135 PERF_OUTPUT_VCPU = 1ULL << 38,
136 PERF_OUTPUT_CGROUP = 1ULL << 39,
137 PERF_OUTPUT_RETIRE_LAT = 1ULL << 40,
138 PERF_OUTPUT_DSOFF = 1ULL << 41,
139 PERF_OUTPUT_DISASM = 1ULL << 42,
140 PERF_OUTPUT_BRSTACKDISASM = 1ULL << 43,
141 PERF_OUTPUT_BRCNTR = 1ULL << 44,
142 };
143
144 struct perf_script {
145 struct perf_tool tool;
146 struct perf_session *session;
147 bool show_task_events;
148 bool show_mmap_events;
149 bool show_switch_events;
150 bool show_namespace_events;
151 bool show_lost_events;
152 bool show_round_events;
153 bool show_bpf_events;
154 bool show_cgroup_events;
155 bool show_text_poke_events;
156 bool allocated;
157 bool per_event_dump;
158 bool stitch_lbr;
159 struct evswitch evswitch;
160 struct perf_cpu_map *cpus;
161 struct perf_thread_map *threads;
162 int name_width;
163 const char *time_str;
164 struct perf_time_interval *ptime_range;
165 int range_size;
166 int range_num;
167 };
168
169 struct output_option {
170 const char *str;
171 enum perf_output_field field;
172 } all_output_options[] = {
173 {.str = "comm", .field = PERF_OUTPUT_COMM},
174 {.str = "tid", .field = PERF_OUTPUT_TID},
175 {.str = "pid", .field = PERF_OUTPUT_PID},
176 {.str = "time", .field = PERF_OUTPUT_TIME},
177 {.str = "cpu", .field = PERF_OUTPUT_CPU},
178 {.str = "event", .field = PERF_OUTPUT_EVNAME},
179 {.str = "trace", .field = PERF_OUTPUT_TRACE},
180 {.str = "ip", .field = PERF_OUTPUT_IP},
181 {.str = "sym", .field = PERF_OUTPUT_SYM},
182 {.str = "dso", .field = PERF_OUTPUT_DSO},
183 {.str = "dsoff", .field = PERF_OUTPUT_DSOFF},
184 {.str = "addr", .field = PERF_OUTPUT_ADDR},
185 {.str = "symoff", .field = PERF_OUTPUT_SYMOFFSET},
186 {.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
187 {.str = "period", .field = PERF_OUTPUT_PERIOD},
188 {.str = "iregs", .field = PERF_OUTPUT_IREGS},
189 {.str = "uregs", .field = PERF_OUTPUT_UREGS},
190 {.str = "brstack", .field = PERF_OUTPUT_BRSTACK},
191 {.str = "brstacksym", .field = PERF_OUTPUT_BRSTACKSYM},
192 {.str = "data_src", .field = PERF_OUTPUT_DATA_SRC},
193 {.str = "weight", .field = PERF_OUTPUT_WEIGHT},
194 {.str = "bpf-output", .field = PERF_OUTPUT_BPF_OUTPUT},
195 {.str = "callindent", .field = PERF_OUTPUT_CALLINDENT},
196 {.str = "insn", .field = PERF_OUTPUT_INSN},
197 {.str = "disasm", .field = PERF_OUTPUT_DISASM},
198 {.str = "insnlen", .field = PERF_OUTPUT_INSNLEN},
199 {.str = "brstackinsn", .field = PERF_OUTPUT_BRSTACKINSN},
200 {.str = "brstackoff", .field = PERF_OUTPUT_BRSTACKOFF},
201 {.str = "synth", .field = PERF_OUTPUT_SYNTH},
202 {.str = "phys_addr", .field = PERF_OUTPUT_PHYS_ADDR},
203 {.str = "metric", .field = PERF_OUTPUT_METRIC},
204 {.str = "misc", .field = PERF_OUTPUT_MISC},
205 {.str = "srccode", .field = PERF_OUTPUT_SRCCODE},
206 {.str = "ipc", .field = PERF_OUTPUT_IPC},
207 {.str = "tod", .field = PERF_OUTPUT_TOD},
208 {.str = "data_page_size", .field = PERF_OUTPUT_DATA_PAGE_SIZE},
209 {.str = "code_page_size", .field = PERF_OUTPUT_CODE_PAGE_SIZE},
210 {.str = "ins_lat", .field = PERF_OUTPUT_INS_LAT},
211 {.str = "brstackinsnlen", .field = PERF_OUTPUT_BRSTACKINSNLEN},
212 {.str = "machine_pid", .field = PERF_OUTPUT_MACHINE_PID},
213 {.str = "vcpu", .field = PERF_OUTPUT_VCPU},
214 {.str = "cgroup", .field = PERF_OUTPUT_CGROUP},
215 {.str = "retire_lat", .field = PERF_OUTPUT_RETIRE_LAT},
216 {.str = "brstackdisasm", .field = PERF_OUTPUT_BRSTACKDISASM},
217 {.str = "brcntr", .field = PERF_OUTPUT_BRCNTR},
218 };
219
220 enum {
221 OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
222 OUTPUT_TYPE_OTHER,
223 OUTPUT_TYPE_MAX
224 };
225
226 // We need to refactor the evsel->priv use in 'perf script' to allow for
227 // using that area, that is being used only in some cases.
228 #define OUTPUT_TYPE_UNSET -1
229
230 /* default set to maintain compatibility with current format */
231 static struct {
232 bool user_set;
233 bool wildcard_set;
234 unsigned int print_ip_opts;
235 u64 fields;
236 u64 invalid_fields;
237 u64 user_set_fields;
238 u64 user_unset_fields;
239 } output[OUTPUT_TYPE_MAX] = {
240
241 [PERF_TYPE_HARDWARE] = {
242 .user_set = false,
243
244 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
245 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
246 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
247 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
248 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
249
250 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
251 },
252
253 [PERF_TYPE_SOFTWARE] = {
254 .user_set = false,
255
256 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
257 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
258 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
259 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
260 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD |
261 PERF_OUTPUT_BPF_OUTPUT,
262
263 .invalid_fields = PERF_OUTPUT_TRACE,
264 },
265
266 [PERF_TYPE_TRACEPOINT] = {
267 .user_set = false,
268
269 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
270 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
271 PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE
272 },
273
274 [PERF_TYPE_HW_CACHE] = {
275 .user_set = false,
276
277 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
278 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
279 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
280 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
281 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
282
283 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
284 },
285
286 [PERF_TYPE_RAW] = {
287 .user_set = false,
288
289 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
290 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
291 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
292 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
293 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD |
294 PERF_OUTPUT_ADDR | PERF_OUTPUT_DATA_SRC |
295 PERF_OUTPUT_WEIGHT | PERF_OUTPUT_PHYS_ADDR |
296 PERF_OUTPUT_DATA_PAGE_SIZE | PERF_OUTPUT_CODE_PAGE_SIZE |
297 PERF_OUTPUT_INS_LAT | PERF_OUTPUT_RETIRE_LAT,
298
299 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
300 },
301
302 [PERF_TYPE_BREAKPOINT] = {
303 .user_set = false,
304
305 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
306 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
307 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
308 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
309 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
310
311 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
312 },
313
314 [OUTPUT_TYPE_SYNTH] = {
315 .user_set = false,
316
317 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
318 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
319 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
320 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
321 PERF_OUTPUT_DSO | PERF_OUTPUT_SYNTH,
322
323 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
324 },
325
326 [OUTPUT_TYPE_OTHER] = {
327 .user_set = false,
328
329 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
330 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
331 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
332 PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
333 PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
334
335 .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
336 },
337 };
338
339 struct evsel_script {
340 char *filename;
341 FILE *fp;
342 u64 samples;
343 };
344
evsel_script__new(struct evsel * evsel,struct perf_data * data)345 static struct evsel_script *evsel_script__new(struct evsel *evsel, struct perf_data *data)
346 {
347 struct evsel_script *es = zalloc(sizeof(*es));
348
349 if (es != NULL) {
350 if (asprintf(&es->filename, "%s.%s.dump", data->file.path, evsel__name(evsel)) < 0)
351 goto out_free;
352 es->fp = fopen(es->filename, "w");
353 if (es->fp == NULL)
354 goto out_free_filename;
355 }
356
357 return es;
358 out_free_filename:
359 zfree(&es->filename);
360 out_free:
361 free(es);
362 return NULL;
363 }
364
evsel_script__delete(struct evsel_script * es)365 static void evsel_script__delete(struct evsel_script *es)
366 {
367 zfree(&es->filename);
368 fclose(es->fp);
369 es->fp = NULL;
370 free(es);
371 }
372
evsel_script__fprintf(struct evsel_script * es,FILE * fp)373 static int evsel_script__fprintf(struct evsel_script *es, FILE *fp)
374 {
375 struct stat st;
376
377 fstat(fileno(es->fp), &st);
378 return fprintf(fp, "[ perf script: Wrote %.3f MB %s (%" PRIu64 " samples) ]\n",
379 st.st_size / 1024.0 / 1024.0, es->filename, es->samples);
380 }
381
output_type(unsigned int type)382 static inline int output_type(unsigned int type)
383 {
384 switch (type) {
385 case PERF_TYPE_SYNTH:
386 return OUTPUT_TYPE_SYNTH;
387 default:
388 if (type < PERF_TYPE_MAX)
389 return type;
390 }
391
392 return OUTPUT_TYPE_OTHER;
393 }
394
evsel__output_type(struct evsel * evsel)395 static inline int evsel__output_type(struct evsel *evsel)
396 {
397 int type = evsel->script_output_type;
398
399 if (type == OUTPUT_TYPE_UNSET) {
400 type = output_type(evsel->core.attr.type);
401 if (type == OUTPUT_TYPE_OTHER) {
402 struct perf_pmu *pmu = evsel__find_pmu(evsel);
403
404 if (pmu && pmu->is_core)
405 type = PERF_TYPE_RAW;
406 }
407 evsel->script_output_type = type;
408 }
409
410 return type;
411 }
412
output_set_by_user(void)413 static bool output_set_by_user(void)
414 {
415 int j;
416 for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
417 if (output[j].user_set)
418 return true;
419 }
420 return false;
421 }
422
output_field2str(enum perf_output_field field)423 static const char *output_field2str(enum perf_output_field field)
424 {
425 int i, imax = ARRAY_SIZE(all_output_options);
426 const char *str = "";
427
428 for (i = 0; i < imax; ++i) {
429 if (all_output_options[i].field == field) {
430 str = all_output_options[i].str;
431 break;
432 }
433 }
434 return str;
435 }
436
437 #define PRINT_FIELD(x) (output[evsel__output_type(evsel)].fields & PERF_OUTPUT_##x)
438
evsel__do_check_stype(struct evsel * evsel,u64 sample_type,const char * sample_msg,enum perf_output_field field,bool allow_user_set)439 static int evsel__do_check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg,
440 enum perf_output_field field, bool allow_user_set)
441 {
442 struct perf_event_attr *attr = &evsel->core.attr;
443 int type = evsel__output_type(evsel);
444 const char *evname;
445
446 if (attr->sample_type & sample_type)
447 return 0;
448
449 if (output[type].user_set_fields & field) {
450 if (allow_user_set)
451 return 0;
452 evname = evsel__name(evsel);
453 pr_err("Samples for '%s' event do not have %s attribute set. "
454 "Cannot print '%s' field.\n",
455 evname, sample_msg, output_field2str(field));
456 return -1;
457 }
458
459 /* user did not ask for it explicitly so remove from the default list */
460 output[type].fields &= ~field;
461 evname = evsel__name(evsel);
462 pr_debug("Samples for '%s' event do not have %s attribute set. "
463 "Skipping '%s' field.\n",
464 evname, sample_msg, output_field2str(field));
465
466 return 0;
467 }
468
evsel__check_stype(struct evsel * evsel,u64 sample_type,const char * sample_msg,enum perf_output_field field)469 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg,
470 enum perf_output_field field)
471 {
472 return evsel__do_check_stype(evsel, sample_type, sample_msg, field, false);
473 }
474
evsel__check_attr(struct evsel * evsel,struct perf_session * session)475 static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
476 {
477 bool allow_user_set;
478
479 if (evsel__is_dummy_event(evsel))
480 return 0;
481
482 if (perf_header__has_feat(&session->header, HEADER_STAT))
483 return 0;
484
485 allow_user_set = perf_header__has_feat(&session->header,
486 HEADER_AUXTRACE);
487
488 if (PRINT_FIELD(TRACE) &&
489 !perf_session__has_traces(session, "record -R"))
490 return -EINVAL;
491
492 if (PRINT_FIELD(IP)) {
493 if (evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP", PERF_OUTPUT_IP))
494 return -EINVAL;
495 }
496
497 if (PRINT_FIELD(ADDR) &&
498 evsel__do_check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR", PERF_OUTPUT_ADDR, allow_user_set))
499 return -EINVAL;
500
501 if (PRINT_FIELD(DATA_SRC) &&
502 evsel__do_check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC, allow_user_set))
503 return -EINVAL;
504
505 if (PRINT_FIELD(WEIGHT) &&
506 evsel__do_check_stype(evsel, PERF_SAMPLE_WEIGHT_TYPE, "WEIGHT", PERF_OUTPUT_WEIGHT, allow_user_set))
507 return -EINVAL;
508
509 if (PRINT_FIELD(SYM) &&
510 !(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
511 pr_err("Display of symbols requested but neither sample IP nor "
512 "sample address\navailable. Hence, no addresses to convert "
513 "to symbols.\n");
514 return -EINVAL;
515 }
516 if (PRINT_FIELD(SYMOFFSET) && !PRINT_FIELD(SYM)) {
517 pr_err("Display of offsets requested but symbol is not"
518 "selected.\n");
519 return -EINVAL;
520 }
521 if (PRINT_FIELD(DSO) &&
522 !(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
523 pr_err("Display of DSO requested but no address to convert.\n");
524 return -EINVAL;
525 }
526 if ((PRINT_FIELD(SRCLINE) || PRINT_FIELD(SRCCODE)) && !PRINT_FIELD(IP)) {
527 pr_err("Display of source line number requested but sample IP is not\n"
528 "selected. Hence, no address to lookup the source line number.\n");
529 return -EINVAL;
530 }
531 if ((PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN) || PRINT_FIELD(BRSTACKDISASM))
532 && !allow_user_set &&
533 !(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY)) {
534 pr_err("Display of branch stack assembler requested, but non all-branch filter set\n"
535 "Hint: run 'perf record -b ...'\n");
536 return -EINVAL;
537 }
538 if (PRINT_FIELD(BRCNTR) &&
539 !(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_COUNTERS)) {
540 pr_err("Display of branch counter requested but it's not enabled\n"
541 "Hint: run 'perf record -j any,counter ...'\n");
542 return -EINVAL;
543 }
544 if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
545 evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID", PERF_OUTPUT_TID|PERF_OUTPUT_PID))
546 return -EINVAL;
547
548 if (PRINT_FIELD(TIME) &&
549 evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME", PERF_OUTPUT_TIME))
550 return -EINVAL;
551
552 if (PRINT_FIELD(CPU) &&
553 evsel__do_check_stype(evsel, PERF_SAMPLE_CPU, "CPU", PERF_OUTPUT_CPU, allow_user_set))
554 return -EINVAL;
555
556 if (PRINT_FIELD(IREGS) &&
557 evsel__do_check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS, allow_user_set))
558 return -EINVAL;
559
560 if (PRINT_FIELD(UREGS) &&
561 evsel__check_stype(evsel, PERF_SAMPLE_REGS_USER, "UREGS", PERF_OUTPUT_UREGS))
562 return -EINVAL;
563
564 if (PRINT_FIELD(PHYS_ADDR) &&
565 evsel__do_check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR", PERF_OUTPUT_PHYS_ADDR, allow_user_set))
566 return -EINVAL;
567
568 if (PRINT_FIELD(DATA_PAGE_SIZE) &&
569 evsel__check_stype(evsel, PERF_SAMPLE_DATA_PAGE_SIZE, "DATA_PAGE_SIZE", PERF_OUTPUT_DATA_PAGE_SIZE))
570 return -EINVAL;
571
572 if (PRINT_FIELD(CODE_PAGE_SIZE) &&
573 evsel__check_stype(evsel, PERF_SAMPLE_CODE_PAGE_SIZE, "CODE_PAGE_SIZE", PERF_OUTPUT_CODE_PAGE_SIZE))
574 return -EINVAL;
575
576 if (PRINT_FIELD(INS_LAT) &&
577 evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_STRUCT, "WEIGHT_STRUCT", PERF_OUTPUT_INS_LAT))
578 return -EINVAL;
579
580 if (PRINT_FIELD(CGROUP) &&
581 evsel__check_stype(evsel, PERF_SAMPLE_CGROUP, "CGROUP", PERF_OUTPUT_CGROUP)) {
582 pr_err("Hint: run 'perf record --all-cgroups ...'\n");
583 return -EINVAL;
584 }
585
586 if (PRINT_FIELD(RETIRE_LAT) &&
587 evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_STRUCT, "WEIGHT_STRUCT", PERF_OUTPUT_RETIRE_LAT))
588 return -EINVAL;
589
590 return 0;
591 }
592
evsel__set_print_ip_opts(struct evsel * evsel)593 static void evsel__set_print_ip_opts(struct evsel *evsel)
594 {
595 unsigned int type = evsel__output_type(evsel);
596
597 output[type].print_ip_opts = 0;
598 if (PRINT_FIELD(IP))
599 output[type].print_ip_opts |= EVSEL__PRINT_IP;
600
601 if (PRINT_FIELD(SYM))
602 output[type].print_ip_opts |= EVSEL__PRINT_SYM;
603
604 if (PRINT_FIELD(DSO))
605 output[type].print_ip_opts |= EVSEL__PRINT_DSO;
606
607 if (PRINT_FIELD(DSOFF))
608 output[type].print_ip_opts |= EVSEL__PRINT_DSOFF;
609
610 if (PRINT_FIELD(SYMOFFSET))
611 output[type].print_ip_opts |= EVSEL__PRINT_SYMOFFSET;
612
613 if (PRINT_FIELD(SRCLINE))
614 output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
615 }
616
find_first_output_type(struct evlist * evlist,unsigned int type)617 static struct evsel *find_first_output_type(struct evlist *evlist,
618 unsigned int type)
619 {
620 struct evsel *evsel;
621
622 evlist__for_each_entry(evlist, evsel) {
623 if (evsel__is_dummy_event(evsel))
624 continue;
625 if (evsel__output_type(evsel) == (int)type)
626 return evsel;
627 }
628 return NULL;
629 }
630
631 /*
632 * verify all user requested events exist and the samples
633 * have the expected data
634 */
perf_session__check_output_opt(struct perf_session * session)635 static int perf_session__check_output_opt(struct perf_session *session)
636 {
637 bool tod = false;
638 unsigned int j;
639 struct evsel *evsel;
640
641 for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
642 evsel = find_first_output_type(session->evlist, j);
643
644 /*
645 * even if fields is set to 0 (ie., show nothing) event must
646 * exist if user explicitly includes it on the command line
647 */
648 if (!evsel && output[j].user_set && !output[j].wildcard_set &&
649 j != OUTPUT_TYPE_SYNTH) {
650 pr_err("%s events do not exist. "
651 "Remove corresponding -F option to proceed.\n",
652 event_type(j));
653 return -1;
654 }
655
656 if (evsel && output[j].fields &&
657 evsel__check_attr(evsel, session))
658 return -1;
659
660 if (evsel == NULL)
661 continue;
662
663 /* 'dsoff' implys 'dso' field */
664 if (output[j].fields & PERF_OUTPUT_DSOFF)
665 output[j].fields |= PERF_OUTPUT_DSO;
666
667 evsel__set_print_ip_opts(evsel);
668 tod |= output[j].fields & PERF_OUTPUT_TOD;
669 }
670
671 if (!no_callchain) {
672 bool use_callchain = false;
673 bool not_pipe = false;
674
675 evlist__for_each_entry(session->evlist, evsel) {
676 not_pipe = true;
677 if (evsel__has_callchain(evsel) || evsel__is_offcpu_event(evsel)) {
678 use_callchain = true;
679 break;
680 }
681 }
682 if (not_pipe && !use_callchain)
683 symbol_conf.use_callchain = false;
684 }
685
686 /*
687 * set default for tracepoints to print symbols only
688 * if callchains are present
689 */
690 if (symbol_conf.use_callchain &&
691 !output[PERF_TYPE_TRACEPOINT].user_set) {
692 j = PERF_TYPE_TRACEPOINT;
693
694 evlist__for_each_entry(session->evlist, evsel) {
695 if (evsel->core.attr.type != j)
696 continue;
697
698 if (evsel__has_callchain(evsel)) {
699 output[j].fields |= PERF_OUTPUT_IP;
700 output[j].fields |= PERF_OUTPUT_SYM;
701 output[j].fields |= PERF_OUTPUT_SYMOFFSET;
702 output[j].fields |= PERF_OUTPUT_DSO;
703 evsel__set_print_ip_opts(evsel);
704 goto out;
705 }
706 }
707 }
708
709 if (tod && !perf_session__env(session)->clock.enabled) {
710 pr_err("Can't provide 'tod' time, missing clock data. "
711 "Please record with -k/--clockid option.\n");
712 return -1;
713 }
714 out:
715 return 0;
716 }
717
perf_sample__fprintf_regs(struct regs_dump * regs,uint64_t mask,uint16_t e_machine,uint32_t e_flags,FILE * fp)718 static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask,
719 uint16_t e_machine, uint32_t e_flags,
720 FILE *fp)
721 {
722 unsigned i = 0, r;
723 int printed = 0;
724
725 if (!regs || !regs->regs)
726 return 0;
727
728 printed += fprintf(fp, " ABI:%" PRIu64 " ", regs->abi);
729
730 for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
731 u64 val = regs->regs[i++];
732 printed += fprintf(fp, "%5s:0x%"PRIx64" ",
733 perf_reg_name(r, e_machine, e_flags),
734 val);
735 }
736
737 return printed;
738 }
739
740 #define DEFAULT_TOD_FMT "%F %H:%M:%S"
741
742 static char*
tod_scnprintf(struct perf_script * script,char * buf,int buflen,u64 timestamp)743 tod_scnprintf(struct perf_script *script, char *buf, int buflen,
744 u64 timestamp)
745 {
746 u64 tod_ns, clockid_ns;
747 struct perf_env *env;
748 unsigned long nsec;
749 struct tm ltime;
750 char date[64];
751 time_t sec;
752
753 buf[0] = '\0';
754 if (buflen < 64 || !script)
755 return buf;
756
757 env = perf_session__env(script->session);
758 if (!env->clock.enabled) {
759 scnprintf(buf, buflen, "disabled");
760 return buf;
761 }
762
763 clockid_ns = env->clock.clockid_ns;
764 tod_ns = env->clock.tod_ns;
765
766 if (timestamp > clockid_ns)
767 tod_ns += timestamp - clockid_ns;
768 else
769 tod_ns -= clockid_ns - timestamp;
770
771 sec = (time_t) (tod_ns / NSEC_PER_SEC);
772 nsec = tod_ns - sec * NSEC_PER_SEC;
773
774 if (localtime_r(&sec, <ime) == NULL) {
775 scnprintf(buf, buflen, "failed");
776 } else {
777 strftime(date, sizeof(date), DEFAULT_TOD_FMT, <ime);
778
779 if (symbol_conf.nanosecs) {
780 snprintf(buf, buflen, "%s.%09lu", date, nsec);
781 } else {
782 snprintf(buf, buflen, "%s.%06lu",
783 date, nsec / NSEC_PER_USEC);
784 }
785 }
786
787 return buf;
788 }
789
perf_sample__fprintf_iregs(struct perf_sample * sample,struct perf_event_attr * attr,uint16_t e_machine,uint32_t e_flags,FILE * fp)790 static int perf_sample__fprintf_iregs(struct perf_sample *sample,
791 struct perf_event_attr *attr,
792 uint16_t e_machine,
793 uint32_t e_flags,
794 FILE *fp)
795 {
796 if (!sample->intr_regs)
797 return 0;
798
799 return perf_sample__fprintf_regs(perf_sample__intr_regs(sample),
800 attr->sample_regs_intr, e_machine, e_flags, fp);
801 }
802
perf_sample__fprintf_uregs(struct perf_sample * sample,struct perf_event_attr * attr,uint16_t e_machine,uint32_t e_flags,FILE * fp)803 static int perf_sample__fprintf_uregs(struct perf_sample *sample,
804 struct perf_event_attr *attr,
805 uint16_t e_machine,
806 uint32_t e_flags,
807 FILE *fp)
808 {
809 if (!sample->user_regs)
810 return 0;
811
812 return perf_sample__fprintf_regs(perf_sample__user_regs(sample),
813 attr->sample_regs_user, e_machine, e_flags, fp);
814 }
815
perf_sample__fprintf_start(struct perf_script * script,struct perf_sample * sample,struct thread * thread,struct evsel * evsel,u32 type,FILE * fp)816 static int perf_sample__fprintf_start(struct perf_script *script,
817 struct perf_sample *sample,
818 struct thread *thread,
819 struct evsel *evsel,
820 u32 type, FILE *fp)
821 {
822 unsigned long secs;
823 unsigned long long nsecs;
824 int printed = 0;
825 char tstr[128];
826
827 /*
828 * Print the branch counter's abbreviation list,
829 * if the branch counter is available.
830 */
831 if (PRINT_FIELD(BRCNTR) && !verbose) {
832 char *buf;
833
834 if (!annotation_br_cntr_abbr_list(&buf, evsel, true)) {
835 printed += fprintf(stdout, "%s", buf);
836 free(buf);
837 }
838 }
839
840 if (PRINT_FIELD(MACHINE_PID) && sample->machine_pid)
841 printed += fprintf(fp, "VM:%5d ", sample->machine_pid);
842
843 /* Print VCPU only for guest events i.e. with machine_pid */
844 if (PRINT_FIELD(VCPU) && sample->machine_pid)
845 printed += fprintf(fp, "VCPU:%03d ", sample->vcpu);
846
847 if (PRINT_FIELD(COMM)) {
848 const char *comm = thread ? thread__comm_str(thread) : ":-1";
849
850 if (latency_format)
851 printed += fprintf(fp, "%8.8s ", comm);
852 else if (PRINT_FIELD(IP) && evsel__has_callchain(evsel) && symbol_conf.use_callchain)
853 printed += fprintf(fp, "%s ", comm);
854 else
855 printed += fprintf(fp, "%16s ", comm);
856 }
857
858 if (PRINT_FIELD(PID) && PRINT_FIELD(TID))
859 printed += fprintf(fp, "%7d/%-7d ", sample->pid, sample->tid);
860 else if (PRINT_FIELD(PID))
861 printed += fprintf(fp, "%7d ", sample->pid);
862 else if (PRINT_FIELD(TID))
863 printed += fprintf(fp, "%7d ", sample->tid);
864
865 if (PRINT_FIELD(CPU)) {
866 if (latency_format)
867 printed += fprintf(fp, "%3d ", sample->cpu);
868 else
869 printed += fprintf(fp, "[%03d] ", sample->cpu);
870 }
871
872 if (PRINT_FIELD(MISC)) {
873 int ret = 0;
874
875 #define has(m) \
876 (sample->misc & PERF_RECORD_MISC_##m) == PERF_RECORD_MISC_##m
877
878 if (has(KERNEL))
879 ret += fprintf(fp, "K");
880 if (has(USER))
881 ret += fprintf(fp, "U");
882 if (has(HYPERVISOR))
883 ret += fprintf(fp, "H");
884 if (has(GUEST_KERNEL))
885 ret += fprintf(fp, "G");
886 if (has(GUEST_USER))
887 ret += fprintf(fp, "g");
888
889 switch (type) {
890 case PERF_RECORD_MMAP:
891 case PERF_RECORD_MMAP2:
892 if (has(MMAP_DATA))
893 ret += fprintf(fp, "M");
894 break;
895 case PERF_RECORD_COMM:
896 if (has(COMM_EXEC))
897 ret += fprintf(fp, "E");
898 break;
899 case PERF_RECORD_SWITCH:
900 case PERF_RECORD_SWITCH_CPU_WIDE:
901 if (has(SWITCH_OUT)) {
902 ret += fprintf(fp, "S");
903 if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT)
904 ret += fprintf(fp, "p");
905 }
906 default:
907 break;
908 }
909
910 #undef has
911
912 ret += fprintf(fp, "%*s", 6 - ret, " ");
913 printed += ret;
914 }
915
916 if (PRINT_FIELD(TOD)) {
917 tod_scnprintf(script, tstr, sizeof(tstr), sample->time);
918 printed += fprintf(fp, "%s ", tstr);
919 }
920
921 if (PRINT_FIELD(TIME)) {
922 u64 t = sample->time;
923 if (reltime) {
924 if (!initial_time)
925 initial_time = sample->time;
926 t = sample->time - initial_time;
927 } else if (deltatime) {
928 if (previous_time)
929 t = sample->time - previous_time;
930 else {
931 t = 0;
932 }
933 previous_time = sample->time;
934 }
935 nsecs = t;
936 secs = nsecs / NSEC_PER_SEC;
937 nsecs -= secs * NSEC_PER_SEC;
938
939 if (symbol_conf.nanosecs)
940 printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
941 else {
942 char sample_time[32];
943 timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time));
944 printed += fprintf(fp, "%12s: ", sample_time);
945 }
946 }
947
948 return printed;
949 }
950
951 static inline size_t
bstack_event_str(struct branch_entry * br,char * buf,size_t sz)952 bstack_event_str(struct branch_entry *br, char *buf, size_t sz)
953 {
954 if (!(br->flags.mispred || br->flags.predicted || br->flags.not_taken))
955 return snprintf(buf, sz, "-");
956
957 return snprintf(buf, sz, "%s%s",
958 br->flags.predicted ? "P" : "M",
959 br->flags.not_taken ? "N" : "");
960 }
961
print_bstack_flags(FILE * fp,struct branch_entry * br)962 static int print_bstack_flags(FILE *fp, struct branch_entry *br)
963 {
964 char events[16] = { 0 };
965 size_t pos;
966
967 pos = bstack_event_str(br, events, sizeof(events));
968 return fprintf(fp, "/%s/%c/%c/%d/%s/%s ",
969 pos < 0 ? "-" : events,
970 br->flags.in_tx ? 'X' : '-',
971 br->flags.abort ? 'A' : '-',
972 br->flags.cycles,
973 get_branch_type(br),
974 br->flags.spec ? branch_spec_desc(br->flags.spec) : "-");
975 }
976
perf_sample__fprintf_brstack(struct perf_sample * sample,struct thread * thread,struct evsel * evsel,FILE * fp)977 static int perf_sample__fprintf_brstack(struct perf_sample *sample,
978 struct thread *thread,
979 struct evsel *evsel, FILE *fp)
980 {
981 struct branch_stack *br = sample->branch_stack;
982 struct branch_entry *entries = perf_sample__branch_entries(sample);
983 u64 i, from, to;
984 int printed = 0;
985
986 if (!(br && br->nr))
987 return 0;
988
989 for (i = 0; i < br->nr; i++) {
990 from = entries[i].from;
991 to = entries[i].to;
992
993 printed += fprintf(fp, " 0x%"PRIx64, from);
994 if (PRINT_FIELD(DSO)) {
995 struct addr_location alf, alt;
996
997 addr_location__init(&alf);
998 addr_location__init(&alt);
999 thread__find_map_fb(thread, sample->cpumode, from, &alf);
1000 thread__find_map_fb(thread, sample->cpumode, to, &alt);
1001
1002 printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
1003 printed += fprintf(fp, "/0x%"PRIx64, to);
1004 printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
1005 addr_location__exit(&alt);
1006 addr_location__exit(&alf);
1007 } else
1008 printed += fprintf(fp, "/0x%"PRIx64, to);
1009
1010 printed += print_bstack_flags(fp, entries + i);
1011 }
1012
1013 return printed;
1014 }
1015
perf_sample__fprintf_brstacksym(struct perf_sample * sample,struct thread * thread,struct evsel * evsel,FILE * fp)1016 static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
1017 struct thread *thread,
1018 struct evsel *evsel, FILE *fp)
1019 {
1020 struct branch_stack *br = sample->branch_stack;
1021 struct branch_entry *entries = perf_sample__branch_entries(sample);
1022 u64 i, from, to;
1023 int printed = 0;
1024
1025 if (!(br && br->nr))
1026 return 0;
1027
1028 for (i = 0; i < br->nr; i++) {
1029 struct addr_location alf, alt;
1030
1031 addr_location__init(&alf);
1032 addr_location__init(&alt);
1033 from = entries[i].from;
1034 to = entries[i].to;
1035
1036 thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
1037 thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
1038
1039 printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
1040 if (PRINT_FIELD(DSO))
1041 printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
1042 printed += fprintf(fp, "%c", '/');
1043 printed += symbol__fprintf_symname_offs(alt.sym, &alt, fp);
1044 if (PRINT_FIELD(DSO))
1045 printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
1046 printed += print_bstack_flags(fp, entries + i);
1047 addr_location__exit(&alt);
1048 addr_location__exit(&alf);
1049 }
1050
1051 return printed;
1052 }
1053
perf_sample__fprintf_brstackoff(struct perf_sample * sample,struct thread * thread,struct evsel * evsel,FILE * fp)1054 static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
1055 struct thread *thread,
1056 struct evsel *evsel, FILE *fp)
1057 {
1058 struct branch_stack *br = sample->branch_stack;
1059 struct branch_entry *entries = perf_sample__branch_entries(sample);
1060 u64 i, from, to;
1061 int printed = 0;
1062
1063 if (!(br && br->nr))
1064 return 0;
1065
1066 for (i = 0; i < br->nr; i++) {
1067 struct addr_location alf, alt;
1068
1069 addr_location__init(&alf);
1070 addr_location__init(&alt);
1071 from = entries[i].from;
1072 to = entries[i].to;
1073
1074 if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
1075 !dso__adjust_symbols(map__dso(alf.map)))
1076 from = map__dso_map_ip(alf.map, from);
1077
1078 if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
1079 !dso__adjust_symbols(map__dso(alt.map)))
1080 to = map__dso_map_ip(alt.map, to);
1081
1082 printed += fprintf(fp, " 0x%"PRIx64, from);
1083 if (PRINT_FIELD(DSO))
1084 printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
1085 printed += fprintf(fp, "/0x%"PRIx64, to);
1086 if (PRINT_FIELD(DSO))
1087 printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
1088 printed += print_bstack_flags(fp, entries + i);
1089 addr_location__exit(&alt);
1090 addr_location__exit(&alf);
1091 }
1092
1093 return printed;
1094 }
1095 #define MAXBB 16384UL
1096
grab_bb(u8 * buffer,u64 start,u64 end,struct machine * machine,struct thread * thread,bool * is64bit,u8 * cpumode,bool last)1097 static int grab_bb(u8 *buffer, u64 start, u64 end,
1098 struct machine *machine, struct thread *thread,
1099 bool *is64bit, u8 *cpumode, bool last)
1100 {
1101 long offset, len;
1102 struct addr_location al;
1103 bool kernel;
1104 struct dso *dso;
1105 int ret = 0;
1106
1107 if (!start || !end)
1108 return 0;
1109
1110 kernel = machine__kernel_ip(machine, start);
1111 if (kernel)
1112 *cpumode = PERF_RECORD_MISC_KERNEL;
1113 else
1114 *cpumode = PERF_RECORD_MISC_USER;
1115
1116 /*
1117 * Block overlaps between kernel and user.
1118 * This can happen due to ring filtering
1119 * On Intel CPUs the entry into the kernel is filtered,
1120 * but the exit is not. Let the caller patch it up.
1121 */
1122 if (kernel != machine__kernel_ip(machine, end)) {
1123 pr_debug("\tblock %" PRIx64 "-%" PRIx64 " transfers between kernel and user\n", start, end);
1124 return -ENXIO;
1125 }
1126
1127 if (end - start > MAXBB - MAXINSN) {
1128 if (last)
1129 pr_debug("\tbrstack does not reach to final jump (%" PRIx64 "-%" PRIx64 ")\n", start, end);
1130 else
1131 pr_debug("\tblock %" PRIx64 "-%" PRIx64 " (%" PRIu64 ") too long to dump\n", start, end, end - start);
1132 return 0;
1133 }
1134
1135 addr_location__init(&al);
1136 if (!thread__find_map(thread, *cpumode, start, &al) || (dso = map__dso(al.map)) == NULL) {
1137 pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
1138 goto out;
1139 }
1140 if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) {
1141 pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
1142 goto out;
1143 }
1144
1145 /* Load maps to ensure dso->is_64_bit has been updated */
1146 map__load(al.map);
1147
1148 offset = map__map_ip(al.map, start);
1149 len = dso__data_read_offset(dso, machine, offset, (u8 *)buffer,
1150 end - start + MAXINSN);
1151
1152 *is64bit = dso__is_64_bit(dso);
1153 if (len <= 0)
1154 pr_debug("\tcannot fetch code for block at %" PRIx64 "-%" PRIx64 "\n",
1155 start, end);
1156 ret = len;
1157 out:
1158 addr_location__exit(&al);
1159 return ret;
1160 }
1161
map__fprintf_srccode(struct map * map,u64 addr,FILE * fp,struct srccode_state * state)1162 static int map__fprintf_srccode(struct map *map, u64 addr, FILE *fp, struct srccode_state *state)
1163 {
1164 char *srcfile;
1165 int ret = 0;
1166 unsigned line;
1167 int len;
1168 char *srccode;
1169 struct dso *dso;
1170
1171 if (!map || (dso = map__dso(map)) == NULL)
1172 return 0;
1173 srcfile = get_srcline_split(dso,
1174 map__rip_2objdump(map, addr),
1175 &line);
1176 if (!srcfile)
1177 return 0;
1178
1179 /* Avoid redundant printing */
1180 if (state &&
1181 state->srcfile &&
1182 !strcmp(state->srcfile, srcfile) &&
1183 state->line == line) {
1184 free(srcfile);
1185 return 0;
1186 }
1187
1188 srccode = find_sourceline(srcfile, line, &len);
1189 if (!srccode)
1190 goto out_free_line;
1191
1192 ret = fprintf(fp, "|%-8d %.*s", line, len, srccode);
1193
1194 if (state) {
1195 state->srcfile = srcfile;
1196 state->line = line;
1197 }
1198 return ret;
1199
1200 out_free_line:
1201 free(srcfile);
1202 return ret;
1203 }
1204
print_srccode(struct thread * thread,u8 cpumode,uint64_t addr)1205 static int print_srccode(struct thread *thread, u8 cpumode, uint64_t addr)
1206 {
1207 struct addr_location al;
1208 int ret = 0;
1209
1210 addr_location__init(&al);
1211 thread__find_map(thread, cpumode, addr, &al);
1212 if (!al.map)
1213 goto out;
1214 ret = map__fprintf_srccode(al.map, al.addr, stdout,
1215 thread__srccode_state(thread));
1216 if (ret)
1217 ret += printf("\n");
1218 out:
1219 addr_location__exit(&al);
1220 return ret;
1221 }
1222
any_dump_insn(struct evsel * evsel __maybe_unused,struct perf_insn * x,uint64_t ip,u8 * inbuf,int inlen,int * lenp,FILE * fp)1223 static int any_dump_insn(struct evsel *evsel __maybe_unused,
1224 struct perf_insn *x, uint64_t ip,
1225 u8 *inbuf, int inlen, int *lenp,
1226 FILE *fp)
1227 {
1228 if (PRINT_FIELD(BRSTACKDISASM)) {
1229 int printed = fprintf_insn_asm(x->machine, x->thread, x->cpumode, x->is64bit,
1230 (uint8_t *)inbuf, inlen, ip, lenp,
1231 PRINT_INSN_IMM_HEX, fp);
1232
1233 if (printed > 0)
1234 return printed;
1235 }
1236 return fprintf(fp, "%s", dump_insn(x, ip, inbuf, inlen, lenp));
1237 }
1238
add_padding(FILE * fp,int printed,int padding)1239 static int add_padding(FILE *fp, int printed, int padding)
1240 {
1241 if (printed >= 0 && printed < padding)
1242 printed += fprintf(fp, "%*s", padding - printed, "");
1243 return printed;
1244 }
1245
ip__fprintf_jump(uint64_t ip,struct branch_entry * en,struct perf_insn * x,u8 * inbuf,int len,int insn,FILE * fp,int * total_cycles,struct evsel * evsel,struct thread * thread,u64 br_cntr)1246 static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
1247 struct perf_insn *x, u8 *inbuf, int len,
1248 int insn, FILE *fp, int *total_cycles,
1249 struct evsel *evsel,
1250 struct thread *thread,
1251 u64 br_cntr)
1252 {
1253 int ilen = 0;
1254 int printed = fprintf(fp, "\t%016" PRIx64 "\t", ip);
1255
1256 printed += add_padding(fp, any_dump_insn(evsel, x, ip, inbuf, len, &ilen, fp), 30);
1257 printed += fprintf(fp, "\t");
1258
1259 if (PRINT_FIELD(BRSTACKINSNLEN))
1260 printed += fprintf(fp, "ilen: %d\t", ilen);
1261
1262 if (PRINT_FIELD(SRCLINE)) {
1263 struct addr_location al;
1264
1265 addr_location__init(&al);
1266 thread__find_map(thread, x->cpumode, ip, &al);
1267 printed += map__fprintf_srcline(al.map, al.addr, " srcline: ", fp);
1268 printed += fprintf(fp, "\t");
1269 addr_location__exit(&al);
1270 }
1271
1272 if (PRINT_FIELD(BRCNTR)) {
1273 struct evsel *pos = evsel__leader(evsel);
1274 unsigned int i = 0, j, num, mask, width;
1275
1276 perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width);
1277 mask = (1L << width) - 1;
1278 printed += fprintf(fp, "br_cntr: ");
1279 evlist__for_each_entry_from(evsel->evlist, pos) {
1280 if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
1281 continue;
1282 if (evsel__leader(pos) != evsel__leader(evsel))
1283 break;
1284
1285 num = (br_cntr >> (i++ * width)) & mask;
1286 if (!verbose) {
1287 for (j = 0; j < num; j++)
1288 printed += fprintf(fp, "%s", pos->abbr_name);
1289 } else
1290 printed += fprintf(fp, "%s %d ", pos->name, num);
1291 }
1292 printed += fprintf(fp, "\t");
1293 }
1294
1295 printed += fprintf(fp, "#%s%s%s%s",
1296 en->flags.predicted ? " PRED" : "",
1297 en->flags.mispred ? " MISPRED" : "",
1298 en->flags.in_tx ? " INTX" : "",
1299 en->flags.abort ? " ABORT" : "");
1300 if (en->flags.cycles) {
1301 *total_cycles += en->flags.cycles;
1302 printed += fprintf(fp, " %d cycles [%d]", en->flags.cycles, *total_cycles);
1303 if (insn)
1304 printed += fprintf(fp, " %.2f IPC", (float)insn / en->flags.cycles);
1305 }
1306
1307 return printed + fprintf(fp, "\n");
1308 }
1309
ip__fprintf_sym(uint64_t addr,struct thread * thread,u8 cpumode,int cpu,struct symbol ** lastsym,struct evsel * evsel,FILE * fp)1310 static int ip__fprintf_sym(uint64_t addr, struct thread *thread,
1311 u8 cpumode, int cpu, struct symbol **lastsym,
1312 struct evsel *evsel, FILE *fp)
1313 {
1314 struct addr_location al;
1315 int off, printed = 0, ret = 0;
1316
1317 addr_location__init(&al);
1318 thread__find_map(thread, cpumode, addr, &al);
1319
1320 if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end)
1321 goto out;
1322
1323 al.cpu = cpu;
1324 al.sym = NULL;
1325 if (al.map)
1326 al.sym = map__find_symbol(al.map, al.addr);
1327
1328 if (!al.sym)
1329 goto out;
1330
1331 if (al.addr < al.sym->end)
1332 off = al.addr - al.sym->start;
1333 else
1334 off = al.addr - map__start(al.map) - al.sym->start;
1335 printed += fprintf(fp, "\t%s", al.sym->name);
1336 if (off)
1337 printed += fprintf(fp, "%+d", off);
1338 printed += fprintf(fp, ":");
1339 if (PRINT_FIELD(SRCLINE))
1340 printed += map__fprintf_srcline(al.map, al.addr, "\t", fp);
1341 printed += fprintf(fp, "\n");
1342 *lastsym = al.sym;
1343
1344 ret = printed;
1345 out:
1346 addr_location__exit(&al);
1347 return ret;
1348 }
1349
perf_sample__fprintf_brstackinsn(struct perf_sample * sample,struct evsel * evsel,struct thread * thread,struct perf_event_attr * attr,struct machine * machine,FILE * fp)1350 static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
1351 struct evsel *evsel,
1352 struct thread *thread,
1353 struct perf_event_attr *attr,
1354 struct machine *machine, FILE *fp)
1355 {
1356 struct branch_stack *br = sample->branch_stack;
1357 struct branch_entry *entries = perf_sample__branch_entries(sample);
1358 u64 start, end;
1359 int i, insn, len, nr, ilen, printed = 0;
1360 struct perf_insn x;
1361 u8 buffer[MAXBB];
1362 unsigned off;
1363 struct symbol *lastsym = NULL;
1364 int total_cycles = 0;
1365 u64 br_cntr = 0;
1366
1367 if (!(br && br->nr))
1368 return 0;
1369 nr = br->nr;
1370 if (max_blocks && nr > max_blocks + 1)
1371 nr = max_blocks + 1;
1372
1373 x.thread = thread;
1374 x.machine = machine;
1375 x.cpu = sample->cpu;
1376
1377 if (PRINT_FIELD(BRCNTR) && sample->branch_stack_cntr)
1378 br_cntr = sample->branch_stack_cntr[nr - 1];
1379
1380 printed += fprintf(fp, "%c", '\n');
1381
1382 /* Handle first from jump, of which we don't know the entry. */
1383 len = grab_bb(buffer, entries[nr-1].from,
1384 entries[nr-1].from,
1385 machine, thread, &x.is64bit, &x.cpumode, false);
1386 if (len > 0) {
1387 printed += ip__fprintf_sym(entries[nr - 1].from, thread,
1388 x.cpumode, x.cpu, &lastsym, evsel, fp);
1389 printed += ip__fprintf_jump(entries[nr - 1].from, &entries[nr - 1],
1390 &x, buffer, len, 0, fp, &total_cycles,
1391 evsel, thread, br_cntr);
1392 if (PRINT_FIELD(SRCCODE))
1393 printed += print_srccode(thread, x.cpumode, entries[nr - 1].from);
1394 }
1395
1396 /* Print all blocks */
1397 for (i = nr - 2; i >= 0; i--) {
1398 if (entries[i].from || entries[i].to)
1399 pr_debug("%d: %" PRIx64 "-%" PRIx64 "\n", i,
1400 entries[i].from,
1401 entries[i].to);
1402 start = entries[i + 1].to;
1403 end = entries[i].from;
1404
1405 len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false);
1406 /* Patch up missing kernel transfers due to ring filters */
1407 if (len == -ENXIO && i > 0) {
1408 end = entries[--i].from;
1409 pr_debug("\tpatching up to %" PRIx64 "-%" PRIx64 "\n", start, end);
1410 len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false);
1411 }
1412 if (len <= 0)
1413 continue;
1414
1415 insn = 0;
1416 for (off = 0; off < (unsigned)len; off += ilen) {
1417 uint64_t ip = start + off;
1418
1419 printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, evsel, fp);
1420 if (ip == end) {
1421 if (PRINT_FIELD(BRCNTR) && sample->branch_stack_cntr)
1422 br_cntr = sample->branch_stack_cntr[i];
1423 printed += ip__fprintf_jump(ip, &entries[i], &x, buffer + off, len - off, ++insn, fp,
1424 &total_cycles, evsel, thread, br_cntr);
1425 if (PRINT_FIELD(SRCCODE))
1426 printed += print_srccode(thread, x.cpumode, ip);
1427 break;
1428 } else {
1429 ilen = 0;
1430 printed += fprintf(fp, "\t%016" PRIx64 "\t", ip);
1431 printed += any_dump_insn(evsel, &x, ip, buffer + off, len - off, &ilen, fp);
1432 if (PRINT_FIELD(BRSTACKINSNLEN))
1433 printed += fprintf(fp, "\tilen: %d", ilen);
1434 printed += fprintf(fp, "\n");
1435 if (ilen == 0)
1436 break;
1437 if (PRINT_FIELD(SRCCODE))
1438 print_srccode(thread, x.cpumode, ip);
1439 insn++;
1440 }
1441 }
1442 if (off != end - start)
1443 printed += fprintf(fp, "\tmismatch of LBR data and executable\n");
1444 }
1445
1446 /*
1447 * Hit the branch? In this case we are already done, and the target
1448 * has not been executed yet.
1449 */
1450 if (entries[0].from == sample->ip)
1451 goto out;
1452 if (entries[0].flags.abort)
1453 goto out;
1454
1455 /*
1456 * Print final block up to sample
1457 *
1458 * Due to pipeline delays the LBRs might be missing a branch
1459 * or two, which can result in very large or negative blocks
1460 * between final branch and sample. When this happens just
1461 * continue walking after the last TO.
1462 */
1463 start = entries[0].to;
1464 end = sample->ip;
1465 if (end < start) {
1466 /* Missing jump. Scan 128 bytes for the next branch */
1467 end = start + 128;
1468 }
1469 len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true);
1470 printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, evsel, fp);
1471 if (len <= 0) {
1472 /* Print at least last IP if basic block did not work */
1473 len = grab_bb(buffer, sample->ip, sample->ip,
1474 machine, thread, &x.is64bit, &x.cpumode, false);
1475 if (len <= 0)
1476 goto out;
1477 ilen = 0;
1478 printed += fprintf(fp, "\t%016" PRIx64 "\t", sample->ip);
1479 printed += any_dump_insn(evsel, &x, sample->ip, buffer, len, &ilen, fp);
1480 if (PRINT_FIELD(BRSTACKINSNLEN))
1481 printed += fprintf(fp, "\tilen: %d", ilen);
1482 printed += fprintf(fp, "\n");
1483 if (PRINT_FIELD(SRCCODE))
1484 print_srccode(thread, x.cpumode, sample->ip);
1485 goto out;
1486 }
1487 for (off = 0; off <= end - start; off += ilen) {
1488 ilen = 0;
1489 printed += fprintf(fp, "\t%016" PRIx64 "\t", start + off);
1490 printed += any_dump_insn(evsel, &x, start + off, buffer + off, len - off, &ilen, fp);
1491 if (PRINT_FIELD(BRSTACKINSNLEN))
1492 printed += fprintf(fp, "\tilen: %d", ilen);
1493 printed += fprintf(fp, "\n");
1494 if (ilen == 0)
1495 break;
1496 if ((attr->branch_sample_type == 0 || attr->branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
1497 && arch_is_uncond_branch(buffer + off, len - off, x.is64bit)
1498 && start + off != sample->ip) {
1499 /*
1500 * Hit a missing branch. Just stop.
1501 */
1502 printed += fprintf(fp, "\t... not reaching sample ...\n");
1503 break;
1504 }
1505 if (PRINT_FIELD(SRCCODE))
1506 print_srccode(thread, x.cpumode, start + off);
1507 }
1508 out:
1509 return printed;
1510 }
1511
perf_sample__fprintf_addr(struct perf_sample * sample,struct thread * thread,struct evsel * evsel,FILE * fp)1512 static int perf_sample__fprintf_addr(struct perf_sample *sample,
1513 struct thread *thread,
1514 struct evsel *evsel, FILE *fp)
1515 {
1516 struct addr_location al;
1517 int printed = fprintf(fp, "%16" PRIx64, sample->addr);
1518
1519 addr_location__init(&al);
1520 if (!sample_addr_correlates_sym(&evsel->core.attr))
1521 goto out;
1522
1523 thread__resolve(thread, &al, sample);
1524
1525 if (PRINT_FIELD(SYM)) {
1526 printed += fprintf(fp, " ");
1527 if (PRINT_FIELD(SYMOFFSET))
1528 printed += symbol__fprintf_symname_offs(al.sym, &al, fp);
1529 else
1530 printed += symbol__fprintf_symname(al.sym, fp);
1531 }
1532
1533 if (PRINT_FIELD(DSO))
1534 printed += map__fprintf_dsoname_dsoff(al.map, PRINT_FIELD(DSOFF), al.addr, fp);
1535 out:
1536 addr_location__exit(&al);
1537 return printed;
1538 }
1539
resolve_branch_sym(struct perf_sample * sample,struct evsel * evsel,struct thread * thread,struct addr_location * al,struct addr_location * addr_al,u64 * ip)1540 static const char *resolve_branch_sym(struct perf_sample *sample,
1541 struct evsel *evsel,
1542 struct thread *thread,
1543 struct addr_location *al,
1544 struct addr_location *addr_al,
1545 u64 *ip)
1546 {
1547 const char *name = NULL;
1548
1549 if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
1550 if (sample_addr_correlates_sym(&evsel->core.attr)) {
1551 if (!addr_al->thread)
1552 thread__resolve(thread, addr_al, sample);
1553 if (addr_al->sym)
1554 name = addr_al->sym->name;
1555 else
1556 *ip = sample->addr;
1557 } else {
1558 *ip = sample->addr;
1559 }
1560 } else if (sample->flags & (PERF_IP_FLAG_RETURN | PERF_IP_FLAG_TRACE_END)) {
1561 if (al->sym)
1562 name = al->sym->name;
1563 else
1564 *ip = sample->ip;
1565 }
1566 return name;
1567 }
1568
perf_sample__fprintf_callindent(struct perf_sample * sample,struct evsel * evsel,struct thread * thread,struct addr_location * al,struct addr_location * addr_al,FILE * fp)1569 static int perf_sample__fprintf_callindent(struct perf_sample *sample,
1570 struct evsel *evsel,
1571 struct thread *thread,
1572 struct addr_location *al,
1573 struct addr_location *addr_al,
1574 FILE *fp)
1575 {
1576 size_t depth = thread_stack__depth(thread, sample->cpu);
1577 const char *name = NULL;
1578 static int spacing;
1579 int len = 0;
1580 int dlen = 0;
1581 u64 ip = 0;
1582
1583 /*
1584 * The 'return' has already been popped off the stack so the depth has
1585 * to be adjusted to match the 'call'.
1586 */
1587 if (thread__ts(thread) && sample->flags & PERF_IP_FLAG_RETURN)
1588 depth += 1;
1589
1590 name = resolve_branch_sym(sample, evsel, thread, al, addr_al, &ip);
1591
1592 if (PRINT_FIELD(DSO) && !(PRINT_FIELD(IP) || PRINT_FIELD(ADDR))) {
1593 dlen += fprintf(fp, "(");
1594 dlen += map__fprintf_dsoname(al->map, fp);
1595 dlen += fprintf(fp, ")\t");
1596 }
1597
1598 if (name)
1599 len = fprintf(fp, "%*s%s", (int)depth * 4, "", name);
1600 else if (ip)
1601 len = fprintf(fp, "%*s%16" PRIx64, (int)depth * 4, "", ip);
1602
1603 if (len < 0)
1604 return len;
1605
1606 /*
1607 * Try to keep the output length from changing frequently so that the
1608 * output lines up more nicely.
1609 */
1610 if (len > spacing || (len && len < spacing - 52))
1611 spacing = round_up(len + 4, 32);
1612
1613 if (len < spacing)
1614 len += fprintf(fp, "%*s", spacing - len, "");
1615
1616 return len + dlen;
1617 }
1618
perf_sample__fprintf_insn(struct perf_sample * sample,struct evsel * evsel,struct perf_event_attr * attr,struct thread * thread,struct machine * machine,FILE * fp,struct addr_location * al)1619 static int perf_sample__fprintf_insn(struct perf_sample *sample,
1620 struct evsel *evsel,
1621 struct perf_event_attr *attr,
1622 struct thread *thread,
1623 struct machine *machine, FILE *fp,
1624 struct addr_location *al)
1625 {
1626 int printed = 0;
1627
1628 perf_sample__fetch_insn(sample, thread, machine);
1629
1630 if (PRINT_FIELD(INSNLEN))
1631 printed += fprintf(fp, " ilen: %d", sample->insn_len);
1632 if (PRINT_FIELD(INSN) && sample->insn_len) {
1633 printed += fprintf(fp, " insn: ");
1634 printed += sample__fprintf_insn_raw(sample, fp);
1635 }
1636 if (PRINT_FIELD(DISASM) && sample->insn_len) {
1637 printed += fprintf(fp, "\t\t");
1638 printed += sample__fprintf_insn_asm(sample, thread, machine, fp, al);
1639 }
1640 if (PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN) || PRINT_FIELD(BRSTACKDISASM))
1641 printed += perf_sample__fprintf_brstackinsn(sample, evsel, thread, attr, machine, fp);
1642
1643 return printed;
1644 }
1645
perf_sample__fprintf_ipc(struct perf_sample * sample,struct evsel * evsel,FILE * fp)1646 static int perf_sample__fprintf_ipc(struct perf_sample *sample,
1647 struct evsel *evsel, FILE *fp)
1648 {
1649 unsigned int ipc;
1650
1651 if (!PRINT_FIELD(IPC) || !sample->cyc_cnt || !sample->insn_cnt)
1652 return 0;
1653
1654 ipc = (sample->insn_cnt * 100) / sample->cyc_cnt;
1655
1656 return fprintf(fp, " \t IPC: %u.%02u (%" PRIu64 "/%" PRIu64 ") ",
1657 ipc / 100, ipc % 100, sample->insn_cnt, sample->cyc_cnt);
1658 }
1659
perf_sample__fprintf_bts(struct perf_sample * sample,struct evsel * evsel,struct thread * thread,struct addr_location * al,struct addr_location * addr_al,struct machine * machine,FILE * fp)1660 static int perf_sample__fprintf_bts(struct perf_sample *sample,
1661 struct evsel *evsel,
1662 struct thread *thread,
1663 struct addr_location *al,
1664 struct addr_location *addr_al,
1665 struct machine *machine, FILE *fp)
1666 {
1667 struct perf_event_attr *attr = &evsel->core.attr;
1668 unsigned int type = evsel__output_type(evsel);
1669 bool print_srcline_last = false;
1670 int printed = 0;
1671
1672 if (PRINT_FIELD(CALLINDENT))
1673 printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, addr_al, fp);
1674
1675 /* print branch_from information */
1676 if (PRINT_FIELD(IP)) {
1677 unsigned int print_opts = output[type].print_ip_opts;
1678 struct callchain_cursor *cursor = NULL;
1679
1680 if (symbol_conf.use_callchain && sample->callchain) {
1681 cursor = get_tls_callchain_cursor();
1682 if (thread__resolve_callchain(al->thread, cursor, evsel,
1683 sample, NULL, NULL,
1684 scripting_max_stack))
1685 cursor = NULL;
1686 }
1687 if (cursor == NULL) {
1688 printed += fprintf(fp, " ");
1689 if (print_opts & EVSEL__PRINT_SRCLINE) {
1690 print_srcline_last = true;
1691 print_opts &= ~EVSEL__PRINT_SRCLINE;
1692 }
1693 } else
1694 printed += fprintf(fp, "\n");
1695
1696 printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor,
1697 symbol_conf.bt_stop_list, fp);
1698 }
1699
1700 /* print branch_to information */
1701 if (PRINT_FIELD(ADDR) ||
1702 ((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
1703 !output[type].user_set)) {
1704 printed += fprintf(fp, " => ");
1705 printed += perf_sample__fprintf_addr(sample, thread, evsel, fp);
1706 }
1707
1708 printed += perf_sample__fprintf_ipc(sample, evsel, fp);
1709
1710 if (print_srcline_last)
1711 printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
1712
1713 printed += perf_sample__fprintf_insn(sample, evsel, attr, thread, machine, fp, al);
1714 printed += fprintf(fp, "\n");
1715 if (PRINT_FIELD(SRCCODE)) {
1716 int ret = map__fprintf_srccode(al->map, al->addr, stdout,
1717 thread__srccode_state(thread));
1718 if (ret) {
1719 printed += ret;
1720 printed += printf("\n");
1721 }
1722 }
1723 return printed;
1724 }
1725
perf_sample__fprintf_flags(u32 flags,FILE * fp)1726 static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
1727 {
1728 char str[SAMPLE_FLAGS_BUF_SIZE];
1729 int ret;
1730
1731 ret = perf_sample__sprintf_flags(flags, str, sizeof(str));
1732 if (ret < 0)
1733 return fprintf(fp, " raw flags:0x%-*x ",
1734 SAMPLE_FLAGS_STR_ALIGNED_SIZE - 12, flags);
1735
1736 return fprintf(fp, " %-*s ", SAMPLE_FLAGS_STR_ALIGNED_SIZE, str);
1737 }
1738
1739 struct printer_data {
1740 int line_no;
1741 bool hit_nul;
1742 bool is_printable;
1743 };
1744
sample__fprintf_bpf_output(enum binary_printer_ops op,unsigned int val,void * extra,FILE * fp)1745 static int sample__fprintf_bpf_output(enum binary_printer_ops op,
1746 unsigned int val,
1747 void *extra, FILE *fp)
1748 {
1749 unsigned char ch = (unsigned char)val;
1750 struct printer_data *printer_data = extra;
1751 int printed = 0;
1752
1753 switch (op) {
1754 case BINARY_PRINT_DATA_BEGIN:
1755 printed += fprintf(fp, "\n");
1756 break;
1757 case BINARY_PRINT_LINE_BEGIN:
1758 printed += fprintf(fp, "%17s", !printer_data->line_no ? "BPF output:" :
1759 " ");
1760 break;
1761 case BINARY_PRINT_ADDR:
1762 printed += fprintf(fp, " %04x:", val);
1763 break;
1764 case BINARY_PRINT_NUM_DATA:
1765 printed += fprintf(fp, " %02x", val);
1766 break;
1767 case BINARY_PRINT_NUM_PAD:
1768 printed += fprintf(fp, " ");
1769 break;
1770 case BINARY_PRINT_SEP:
1771 printed += fprintf(fp, " ");
1772 break;
1773 case BINARY_PRINT_CHAR_DATA:
1774 if (printer_data->hit_nul && ch)
1775 printer_data->is_printable = false;
1776
1777 if (!isprint(ch)) {
1778 printed += fprintf(fp, "%c", '.');
1779
1780 if (!printer_data->is_printable)
1781 break;
1782
1783 if (ch == '\0')
1784 printer_data->hit_nul = true;
1785 else
1786 printer_data->is_printable = false;
1787 } else {
1788 printed += fprintf(fp, "%c", ch);
1789 }
1790 break;
1791 case BINARY_PRINT_CHAR_PAD:
1792 printed += fprintf(fp, " ");
1793 break;
1794 case BINARY_PRINT_LINE_END:
1795 printed += fprintf(fp, "\n");
1796 printer_data->line_no++;
1797 break;
1798 case BINARY_PRINT_DATA_END:
1799 default:
1800 break;
1801 }
1802
1803 return printed;
1804 }
1805
perf_sample__fprintf_bpf_output(struct perf_sample * sample,FILE * fp)1806 static int perf_sample__fprintf_bpf_output(struct perf_sample *sample, FILE *fp)
1807 {
1808 unsigned int nr_bytes = sample->raw_size;
1809 struct printer_data printer_data = {0, false, true};
1810 int printed = binary__fprintf(sample->raw_data, nr_bytes, 8,
1811 sample__fprintf_bpf_output, &printer_data, fp);
1812
1813 if (printer_data.is_printable && printer_data.hit_nul)
1814 printed += fprintf(fp, "%17s \"%s\"\n", "BPF string:", (char *)(sample->raw_data));
1815
1816 return printed;
1817 }
1818
perf_sample__fprintf_spacing(int len,int spacing,FILE * fp)1819 static int perf_sample__fprintf_spacing(int len, int spacing, FILE *fp)
1820 {
1821 if (len > 0 && len < spacing)
1822 return fprintf(fp, "%*s", spacing - len, "");
1823
1824 return 0;
1825 }
1826
perf_sample__fprintf_pt_spacing(int len,FILE * fp)1827 static int perf_sample__fprintf_pt_spacing(int len, FILE *fp)
1828 {
1829 return perf_sample__fprintf_spacing(len, 34, fp);
1830 }
1831
1832 /* If a value contains only printable ASCII characters padded with NULLs */
ptw_is_prt(u64 val)1833 static bool ptw_is_prt(u64 val)
1834 {
1835 char c;
1836 u32 i;
1837
1838 for (i = 0; i < sizeof(val); i++) {
1839 c = ((char *)&val)[i];
1840 if (!c)
1841 break;
1842 if (!isprint(c) || !isascii(c))
1843 return false;
1844 }
1845 for (; i < sizeof(val); i++) {
1846 c = ((char *)&val)[i];
1847 if (c)
1848 return false;
1849 }
1850 return true;
1851 }
1852
perf_sample__fprintf_synth_ptwrite(struct perf_sample * sample,FILE * fp)1853 static int perf_sample__fprintf_synth_ptwrite(struct perf_sample *sample, FILE *fp)
1854 {
1855 struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
1856 char str[sizeof(u64) + 1] = "";
1857 int len;
1858 u64 val;
1859
1860 if (perf_sample__bad_synth_size(sample, *data))
1861 return 0;
1862
1863 val = le64_to_cpu(data->payload);
1864 if (ptw_is_prt(val)) {
1865 memcpy(str, &val, sizeof(val));
1866 str[sizeof(val)] = 0;
1867 }
1868 len = fprintf(fp, " IP: %u payload: %#" PRIx64 " %s ",
1869 data->ip, val, str);
1870 return len + perf_sample__fprintf_pt_spacing(len, fp);
1871 }
1872
perf_sample__fprintf_synth_mwait(struct perf_sample * sample,FILE * fp)1873 static int perf_sample__fprintf_synth_mwait(struct perf_sample *sample, FILE *fp)
1874 {
1875 struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample);
1876 int len;
1877
1878 if (perf_sample__bad_synth_size(sample, *data))
1879 return 0;
1880
1881 len = fprintf(fp, " hints: %#x extensions: %#x ",
1882 data->hints, data->extensions);
1883 return len + perf_sample__fprintf_pt_spacing(len, fp);
1884 }
1885
perf_sample__fprintf_synth_pwre(struct perf_sample * sample,FILE * fp)1886 static int perf_sample__fprintf_synth_pwre(struct perf_sample *sample, FILE *fp)
1887 {
1888 struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample);
1889 int len;
1890
1891 if (perf_sample__bad_synth_size(sample, *data))
1892 return 0;
1893
1894 len = fprintf(fp, " hw: %u cstate: %u sub-cstate: %u ",
1895 data->hw, data->cstate, data->subcstate);
1896 return len + perf_sample__fprintf_pt_spacing(len, fp);
1897 }
1898
perf_sample__fprintf_synth_exstop(struct perf_sample * sample,FILE * fp)1899 static int perf_sample__fprintf_synth_exstop(struct perf_sample *sample, FILE *fp)
1900 {
1901 struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample);
1902 int len;
1903
1904 if (perf_sample__bad_synth_size(sample, *data))
1905 return 0;
1906
1907 len = fprintf(fp, " IP: %u ", data->ip);
1908 return len + perf_sample__fprintf_pt_spacing(len, fp);
1909 }
1910
perf_sample__fprintf_synth_pwrx(struct perf_sample * sample,FILE * fp)1911 static int perf_sample__fprintf_synth_pwrx(struct perf_sample *sample, FILE *fp)
1912 {
1913 struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample);
1914 int len;
1915
1916 if (perf_sample__bad_synth_size(sample, *data))
1917 return 0;
1918
1919 len = fprintf(fp, " deepest cstate: %u last cstate: %u wake reason: %#x ",
1920 data->deepest_cstate, data->last_cstate,
1921 data->wake_reason);
1922 return len + perf_sample__fprintf_pt_spacing(len, fp);
1923 }
1924
perf_sample__fprintf_synth_cbr(struct perf_sample * sample,FILE * fp)1925 static int perf_sample__fprintf_synth_cbr(struct perf_sample *sample, FILE *fp)
1926 {
1927 struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample);
1928 unsigned int percent, freq;
1929 int len;
1930
1931 if (perf_sample__bad_synth_size(sample, *data))
1932 return 0;
1933
1934 freq = (le32_to_cpu(data->freq) + 500) / 1000;
1935 len = fprintf(fp, " cbr: %2u freq: %4u MHz ", data->cbr, freq);
1936 if (data->max_nonturbo) {
1937 percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10;
1938 len += fprintf(fp, "(%3u%%) ", percent);
1939 }
1940 return len + perf_sample__fprintf_pt_spacing(len, fp);
1941 }
1942
perf_sample__fprintf_synth_psb(struct perf_sample * sample,FILE * fp)1943 static int perf_sample__fprintf_synth_psb(struct perf_sample *sample, FILE *fp)
1944 {
1945 struct perf_synth_intel_psb *data = perf_sample__synth_ptr(sample);
1946 int len;
1947
1948 if (perf_sample__bad_synth_size(sample, *data))
1949 return 0;
1950
1951 len = fprintf(fp, " psb offs: %#" PRIx64, data->offset);
1952 return len + perf_sample__fprintf_pt_spacing(len, fp);
1953 }
1954
1955 /* Intel PT Event Trace */
perf_sample__fprintf_synth_evt(struct perf_sample * sample,FILE * fp)1956 static int perf_sample__fprintf_synth_evt(struct perf_sample *sample, FILE *fp)
1957 {
1958 struct perf_synth_intel_evt *data = perf_sample__synth_ptr(sample);
1959 const char *cfe[32] = {NULL, "INTR", "IRET", "SMI", "RSM", "SIPI",
1960 "INIT", "VMENTRY", "VMEXIT", "VMEXIT_INTR",
1961 "SHUTDOWN", NULL, "UINTR", "UIRET"};
1962 const char *evd[64] = {"PFA", "VMXQ", "VMXR"};
1963 const char *s;
1964 int len, i;
1965
1966 if (perf_sample__bad_synth_size(sample, *data))
1967 return 0;
1968
1969 s = cfe[data->type];
1970 if (s) {
1971 len = fprintf(fp, " cfe: %s IP: %d vector: %u",
1972 s, data->ip, data->vector);
1973 } else {
1974 len = fprintf(fp, " cfe: %u IP: %d vector: %u",
1975 data->type, data->ip, data->vector);
1976 }
1977 for (i = 0; i < data->evd_cnt; i++) {
1978 unsigned int et = data->evd[i].evd_type & 0x3f;
1979
1980 s = evd[et];
1981 if (s) {
1982 len += fprintf(fp, " %s: %#" PRIx64,
1983 s, data->evd[i].payload);
1984 } else {
1985 len += fprintf(fp, " EVD_%u: %#" PRIx64,
1986 et, data->evd[i].payload);
1987 }
1988 }
1989 return len + perf_sample__fprintf_pt_spacing(len, fp);
1990 }
1991
perf_sample__fprintf_synth_iflag_chg(struct perf_sample * sample,FILE * fp)1992 static int perf_sample__fprintf_synth_iflag_chg(struct perf_sample *sample, FILE *fp)
1993 {
1994 struct perf_synth_intel_iflag_chg *data = perf_sample__synth_ptr(sample);
1995 int len;
1996
1997 if (perf_sample__bad_synth_size(sample, *data))
1998 return 0;
1999
2000 len = fprintf(fp, " IFLAG: %d->%d %s branch", !data->iflag, data->iflag,
2001 data->via_branch ? "via" : "non");
2002 return len + perf_sample__fprintf_pt_spacing(len, fp);
2003 }
2004
perf_sample__fprintf_synth_vpadtl(struct perf_sample * data,FILE * fp)2005 static int perf_sample__fprintf_synth_vpadtl(struct perf_sample *data, FILE *fp)
2006 {
2007 struct powerpc_vpadtl_entry *dtl = (struct powerpc_vpadtl_entry *)data->raw_data;
2008 int len;
2009
2010 len = fprintf(fp, "timebase: %" PRIu64 " dispatch_reason:%s, preempt_reason:%s,\n"
2011 "enqueue_to_dispatch_time:%d, ready_to_enqueue_time:%d,"
2012 "waiting_to_ready_time:%d, processor_id: %d",
2013 get_unaligned_be64(&dtl->timebase),
2014 dispatch_reasons[dtl->dispatch_reason],
2015 preempt_reasons[dtl->preempt_reason],
2016 be32_to_cpu(dtl->enqueue_to_dispatch_time),
2017 be32_to_cpu(dtl->ready_to_enqueue_time),
2018 be32_to_cpu(dtl->waiting_to_ready_time),
2019 be16_to_cpu(dtl->processor_id));
2020
2021 return len;
2022 }
2023
perf_sample__fprintf_synth(struct perf_sample * sample,struct evsel * evsel,FILE * fp)2024 static int perf_sample__fprintf_synth(struct perf_sample *sample,
2025 struct evsel *evsel, FILE *fp)
2026 {
2027 switch (evsel->core.attr.config) {
2028 case PERF_SYNTH_INTEL_PTWRITE:
2029 return perf_sample__fprintf_synth_ptwrite(sample, fp);
2030 case PERF_SYNTH_INTEL_MWAIT:
2031 return perf_sample__fprintf_synth_mwait(sample, fp);
2032 case PERF_SYNTH_INTEL_PWRE:
2033 return perf_sample__fprintf_synth_pwre(sample, fp);
2034 case PERF_SYNTH_INTEL_EXSTOP:
2035 return perf_sample__fprintf_synth_exstop(sample, fp);
2036 case PERF_SYNTH_INTEL_PWRX:
2037 return perf_sample__fprintf_synth_pwrx(sample, fp);
2038 case PERF_SYNTH_INTEL_CBR:
2039 return perf_sample__fprintf_synth_cbr(sample, fp);
2040 case PERF_SYNTH_INTEL_PSB:
2041 return perf_sample__fprintf_synth_psb(sample, fp);
2042 case PERF_SYNTH_INTEL_EVT:
2043 return perf_sample__fprintf_synth_evt(sample, fp);
2044 case PERF_SYNTH_INTEL_IFLAG_CHG:
2045 return perf_sample__fprintf_synth_iflag_chg(sample, fp);
2046 case PERF_SYNTH_POWERPC_VPA_DTL:
2047 return perf_sample__fprintf_synth_vpadtl(sample, fp);
2048 default:
2049 break;
2050 }
2051
2052 return 0;
2053 }
2054
evlist__max_name_len(struct evlist * evlist)2055 static int evlist__max_name_len(struct evlist *evlist)
2056 {
2057 struct evsel *evsel;
2058 int max = 0;
2059
2060 evlist__for_each_entry(evlist, evsel) {
2061 int len = strlen(evsel__name(evsel));
2062
2063 max = MAX(len, max);
2064 }
2065
2066 return max;
2067 }
2068
data_src__fprintf(u64 data_src,FILE * fp)2069 static int data_src__fprintf(u64 data_src, FILE *fp)
2070 {
2071 struct mem_info *mi = mem_info__new();
2072 char decode[100];
2073 char out[100];
2074 static int maxlen;
2075 int len;
2076
2077 if (!mi)
2078 return -ENOMEM;
2079
2080 mem_info__data_src(mi)->val = data_src;
2081 perf_script__meminfo_scnprintf(decode, 100, mi);
2082 mem_info__put(mi);
2083
2084 len = scnprintf(out, 100, "%16" PRIx64 " %s", data_src, decode);
2085 if (maxlen < len)
2086 maxlen = len;
2087
2088 return fprintf(fp, "%-*s", maxlen, out);
2089 }
2090
2091 struct metric_ctx {
2092 struct perf_sample *sample;
2093 struct thread *thread;
2094 struct evsel *evsel;
2095 FILE *fp;
2096 };
2097
script_print_metric(struct perf_stat_config * config __maybe_unused,void * ctx,enum metric_threshold_classify thresh,const char * fmt,const char * unit,double val)2098 static void script_print_metric(struct perf_stat_config *config __maybe_unused,
2099 void *ctx, enum metric_threshold_classify thresh,
2100 const char *fmt, const char *unit, double val)
2101 {
2102 struct metric_ctx *mctx = ctx;
2103 const char *color = metric_threshold_classify__color(thresh);
2104
2105 if (!fmt)
2106 return;
2107 perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
2108 PERF_RECORD_SAMPLE, mctx->fp);
2109 fputs("\tmetric: ", mctx->fp);
2110 if (color)
2111 color_fprintf(mctx->fp, color, fmt, val);
2112 else
2113 printf(fmt, val);
2114 fprintf(mctx->fp, " %s\n", unit);
2115 }
2116
script_new_line(struct perf_stat_config * config __maybe_unused,void * ctx)2117 static void script_new_line(struct perf_stat_config *config __maybe_unused,
2118 void *ctx)
2119 {
2120 struct metric_ctx *mctx = ctx;
2121
2122 perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
2123 PERF_RECORD_SAMPLE, mctx->fp);
2124 fputs("\tmetric: ", mctx->fp);
2125 }
2126
2127 struct script_find_metrics_args {
2128 struct evlist *evlist;
2129 bool system_wide;
2130 };
2131
map_metric_evsel_to_script_evsel(struct evlist * script_evlist,struct evsel * metric_evsel)2132 static struct evsel *map_metric_evsel_to_script_evsel(struct evlist *script_evlist,
2133 struct evsel *metric_evsel)
2134 {
2135 struct evsel *script_evsel;
2136
2137 evlist__for_each_entry(script_evlist, script_evsel) {
2138 /* Skip if perf_event_attr differ. */
2139 if (metric_evsel->core.attr.type != script_evsel->core.attr.type)
2140 continue;
2141 if (metric_evsel->core.attr.config != script_evsel->core.attr.config)
2142 continue;
2143 /* Skip if the script event has a metric_id that doesn't match. */
2144 if (script_evsel->metric_id &&
2145 strcmp(evsel__metric_id(metric_evsel), evsel__metric_id(script_evsel))) {
2146 pr_debug("Skipping matching evsel due to differing metric ids '%s' vs '%s'\n",
2147 evsel__metric_id(metric_evsel), evsel__metric_id(script_evsel));
2148 continue;
2149 }
2150 return script_evsel;
2151 }
2152 return NULL;
2153 }
2154
script_find_metrics(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * data)2155 static int script_find_metrics(const struct pmu_metric *pm,
2156 const struct pmu_metrics_table *table __maybe_unused,
2157 void *data)
2158 {
2159 struct script_find_metrics_args *args = data;
2160 struct evlist *script_evlist = args->evlist;
2161 struct evlist *metric_evlist = evlist__new();
2162 struct evsel *metric_evsel;
2163 int ret = metricgroup__parse_groups(metric_evlist,
2164 /*pmu=*/"all",
2165 pm->metric_name,
2166 /*metric_no_group=*/false,
2167 /*metric_no_merge=*/false,
2168 /*metric_no_threshold=*/true,
2169 /*user_requested_cpu_list=*/NULL,
2170 args->system_wide,
2171 /*hardware_aware_grouping=*/false);
2172
2173 if (ret) {
2174 /* Metric parsing failed but continue the search. */
2175 goto out;
2176 }
2177
2178 /*
2179 * Check the script_evlist has an entry for each metric_evlist entry. If
2180 * the script evsel was already set up avoid changing data that may
2181 * break it.
2182 */
2183 evlist__for_each_entry(metric_evlist, metric_evsel) {
2184 struct evsel *script_evsel =
2185 map_metric_evsel_to_script_evsel(script_evlist, metric_evsel);
2186 struct evsel *new_metric_leader;
2187
2188 if (!script_evsel) {
2189 pr_debug("Skipping metric '%s' as evsel '%s' / '%s' is missing\n",
2190 pm->metric_name, evsel__name(metric_evsel),
2191 evsel__metric_id(metric_evsel));
2192 goto out;
2193 }
2194
2195 if (script_evsel->metric_leader == NULL)
2196 continue;
2197
2198 if (metric_evsel->metric_leader == metric_evsel) {
2199 new_metric_leader = script_evsel;
2200 } else {
2201 new_metric_leader =
2202 map_metric_evsel_to_script_evsel(script_evlist,
2203 metric_evsel->metric_leader);
2204 }
2205 /* Mismatching evsel leaders. */
2206 if (script_evsel->metric_leader != new_metric_leader) {
2207 pr_debug("Skipping metric '%s' due to mismatching evsel metric leaders '%s' vs '%s'\n",
2208 pm->metric_name, evsel__metric_id(metric_evsel),
2209 evsel__metric_id(script_evsel));
2210 goto out;
2211 }
2212 }
2213 /*
2214 * Metric events match those in the script evlist, copy metric evsel
2215 * data into the script evlist.
2216 */
2217 evlist__for_each_entry(metric_evlist, metric_evsel) {
2218 struct evsel *script_evsel =
2219 map_metric_evsel_to_script_evsel(script_evlist, metric_evsel);
2220 struct metric_event *metric_me = metricgroup__lookup(&metric_evlist->metric_events,
2221 metric_evsel,
2222 /*create=*/false);
2223
2224 if (script_evsel->metric_id == NULL) {
2225 script_evsel->metric_id = metric_evsel->metric_id;
2226 metric_evsel->metric_id = NULL;
2227 }
2228
2229 if (script_evsel->metric_leader == NULL) {
2230 if (metric_evsel->metric_leader == metric_evsel) {
2231 script_evsel->metric_leader = script_evsel;
2232 } else {
2233 script_evsel->metric_leader =
2234 map_metric_evsel_to_script_evsel(script_evlist,
2235 metric_evsel->metric_leader);
2236 }
2237 }
2238
2239 if (metric_me) {
2240 struct metric_expr *expr;
2241 struct metric_event *script_me =
2242 metricgroup__lookup(&script_evlist->metric_events,
2243 script_evsel,
2244 /*create=*/true);
2245
2246 if (!script_me) {
2247 /*
2248 * As the metric_expr is created, the only
2249 * failure is a lack of memory.
2250 */
2251 goto out;
2252 }
2253 list_splice_init(&metric_me->head, &script_me->head);
2254 list_for_each_entry(expr, &script_me->head, nd) {
2255 for (int i = 0; expr->metric_events[i]; i++) {
2256 expr->metric_events[i] =
2257 map_metric_evsel_to_script_evsel(script_evlist,
2258 expr->metric_events[i]);
2259 }
2260 }
2261 }
2262 }
2263 pr_debug("Found metric '%s' whose evsels match those of in the perf data\n",
2264 pm->metric_name);
2265 evlist__delete(metric_evlist);
2266 out:
2267 return 0;
2268 }
2269
script_aggr_cpu_id_get(struct perf_stat_config * config __maybe_unused,struct perf_cpu cpu)2270 static struct aggr_cpu_id script_aggr_cpu_id_get(struct perf_stat_config *config __maybe_unused,
2271 struct perf_cpu cpu)
2272 {
2273 return aggr_cpu_id__global(cpu, /*data=*/NULL);
2274 }
2275
perf_sample__fprint_metric(struct thread * thread,struct evsel * evsel,struct perf_sample * sample,FILE * fp)2276 static void perf_sample__fprint_metric(struct thread *thread,
2277 struct evsel *evsel,
2278 struct perf_sample *sample,
2279 FILE *fp)
2280 {
2281 static bool init_metrics;
2282 struct perf_stat_output_ctx ctx = {
2283 .print_metric = script_print_metric,
2284 .new_line = script_new_line,
2285 .ctx = &(struct metric_ctx) {
2286 .sample = sample,
2287 .thread = thread,
2288 .evsel = evsel,
2289 .fp = fp,
2290 },
2291 .force_header = false,
2292 };
2293 struct perf_counts_values *count, *old_count;
2294 int cpu_map_idx, thread_map_idx, aggr_idx;
2295 struct evsel *pos;
2296
2297 if (!init_metrics) {
2298 /* One time initialization of stat_config and metric data. */
2299 struct script_find_metrics_args args = {
2300 .evlist = evsel->evlist,
2301 .system_wide = perf_thread_map__pid(evsel->core.threads, /*idx=*/0) == -1,
2302
2303 };
2304 if (!stat_config.output)
2305 stat_config.output = stdout;
2306
2307 if (!stat_config.aggr_map) {
2308 /* TODO: currently only global aggregation is supported. */
2309 assert(stat_config.aggr_mode == AGGR_GLOBAL);
2310 stat_config.aggr_get_id = script_aggr_cpu_id_get;
2311 stat_config.aggr_map =
2312 cpu_aggr_map__new(evsel->evlist->core.user_requested_cpus,
2313 aggr_cpu_id__global, /*data=*/NULL,
2314 /*needs_sort=*/false);
2315 }
2316
2317 metricgroup__for_each_metric(pmu_metrics_table__find(), script_find_metrics, &args);
2318 init_metrics = true;
2319 }
2320
2321 if (!evsel->stats) {
2322 if (evlist__alloc_stats(&stat_config, evsel->evlist, /*alloc_raw=*/true) < 0)
2323 return;
2324 }
2325 if (!evsel->stats->aggr) {
2326 if (evlist__alloc_aggr_stats(evsel->evlist, stat_config.aggr_map->nr) < 0)
2327 return;
2328 }
2329
2330 /* Update the evsel's count using the sample's data. */
2331 cpu_map_idx = perf_cpu_map__idx(evsel->core.cpus, (struct perf_cpu){sample->cpu});
2332 if (cpu_map_idx < 0) {
2333 /* Missing CPU, check for any CPU. */
2334 if (perf_cpu_map__cpu(evsel->core.cpus, /*idx=*/0).cpu == -1 ||
2335 sample->cpu == (u32)-1) {
2336 /* Place the counts in the which ever CPU is first in the map. */
2337 cpu_map_idx = 0;
2338 } else {
2339 pr_info("Missing CPU map entry for CPU %d\n", sample->cpu);
2340 return;
2341 }
2342 }
2343 thread_map_idx = perf_thread_map__idx(evsel->core.threads, sample->tid);
2344 if (thread_map_idx < 0) {
2345 /* Missing thread, check for any thread. */
2346 if (perf_thread_map__pid(evsel->core.threads, /*idx=*/0) == -1 ||
2347 sample->tid == (u32)-1) {
2348 /* Place the counts in the which ever thread is first in the map. */
2349 thread_map_idx = 0;
2350 } else {
2351 pr_info("Missing thread map entry for thread %d\n", sample->tid);
2352 return;
2353 }
2354 }
2355 count = perf_counts(evsel->counts, cpu_map_idx, thread_map_idx);
2356 old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread_map_idx);
2357 count->val = old_count->val + sample->period;
2358 count->run = old_count->run + 1;
2359 count->ena = old_count->ena + 1;
2360
2361 /* Update the aggregated stats. */
2362 perf_stat_process_counter(&stat_config, evsel);
2363
2364 /* Display all metrics. */
2365 evlist__for_each_entry(evsel->evlist, pos) {
2366 cpu_aggr_map__for_each_idx(aggr_idx, stat_config.aggr_map) {
2367 perf_stat__print_shadow_stats(&stat_config, pos,
2368 aggr_idx,
2369 &ctx);
2370 }
2371 }
2372 }
2373
show_event(struct perf_sample * sample,struct evsel * evsel,struct thread * thread,struct addr_location * al,struct addr_location * addr_al)2374 static bool show_event(struct perf_sample *sample,
2375 struct evsel *evsel,
2376 struct thread *thread,
2377 struct addr_location *al,
2378 struct addr_location *addr_al)
2379 {
2380 int depth = thread_stack__depth(thread, sample->cpu);
2381
2382 if (!symbol_conf.graph_function)
2383 return true;
2384
2385 if (thread__filter(thread)) {
2386 if (depth <= thread__filter_entry_depth(thread)) {
2387 thread__set_filter(thread, false);
2388 return false;
2389 }
2390 return true;
2391 } else {
2392 const char *s = symbol_conf.graph_function;
2393 u64 ip;
2394 const char *name = resolve_branch_sym(sample, evsel, thread, al, addr_al,
2395 &ip);
2396 unsigned nlen;
2397
2398 if (!name)
2399 return false;
2400 nlen = strlen(name);
2401 while (*s) {
2402 unsigned len = strcspn(s, ",");
2403 if (nlen == len && !strncmp(name, s, len)) {
2404 thread__set_filter(thread, true);
2405 thread__set_filter_entry_depth(thread, depth);
2406 return true;
2407 }
2408 s += len;
2409 if (*s == ',')
2410 s++;
2411 }
2412 return false;
2413 }
2414 }
2415
process_event(struct perf_script * script,struct perf_sample * sample,struct evsel * evsel,struct addr_location * al,struct addr_location * addr_al,struct machine * machine)2416 static void process_event(struct perf_script *script,
2417 struct perf_sample *sample, struct evsel *evsel,
2418 struct addr_location *al,
2419 struct addr_location *addr_al,
2420 struct machine *machine)
2421 {
2422 struct thread *thread = al->thread;
2423 struct perf_event_attr *attr = &evsel->core.attr;
2424 unsigned int type = evsel__output_type(evsel);
2425 struct evsel_script *es = evsel->priv;
2426 FILE *fp = es->fp;
2427 char str[PAGE_SIZE_NAME_LEN];
2428 uint32_t e_flags;
2429
2430 if (output[type].fields == 0)
2431 return;
2432
2433 ++es->samples;
2434
2435 perf_sample__fprintf_start(script, sample, thread, evsel,
2436 PERF_RECORD_SAMPLE, fp);
2437
2438 if (PRINT_FIELD(PERIOD))
2439 fprintf(fp, "%10" PRIu64 " ", sample->period);
2440
2441 if (PRINT_FIELD(EVNAME)) {
2442 const char *evname = evsel__name(evsel);
2443
2444 if (!script->name_width)
2445 script->name_width = evlist__max_name_len(script->session->evlist);
2446
2447 fprintf(fp, "%*s: ", script->name_width, evname ?: "[unknown]");
2448 }
2449
2450 if (print_flags)
2451 perf_sample__fprintf_flags(sample->flags, fp);
2452
2453 if (is_bts_event(attr)) {
2454 perf_sample__fprintf_bts(sample, evsel, thread, al, addr_al, machine, fp);
2455 return;
2456 }
2457 #ifdef HAVE_LIBTRACEEVENT
2458 if (PRINT_FIELD(TRACE) && sample->raw_data) {
2459 const struct tep_event *tp_format = evsel__tp_format(evsel);
2460
2461 if (tp_format) {
2462 event_format__fprintf(tp_format, sample->cpu,
2463 sample->raw_data, sample->raw_size,
2464 fp);
2465 }
2466 }
2467 #endif
2468 if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH))
2469 perf_sample__fprintf_synth(sample, evsel, fp);
2470
2471 if (PRINT_FIELD(ADDR))
2472 perf_sample__fprintf_addr(sample, thread, evsel, fp);
2473
2474 if (PRINT_FIELD(DATA_SRC))
2475 data_src__fprintf(sample->data_src, fp);
2476
2477 if (PRINT_FIELD(WEIGHT))
2478 fprintf(fp, "%16" PRIu64, sample->weight);
2479
2480 if (PRINT_FIELD(INS_LAT))
2481 fprintf(fp, "%16" PRIu16, sample->ins_lat);
2482
2483 if (PRINT_FIELD(RETIRE_LAT))
2484 fprintf(fp, "%16" PRIu16, sample->weight3);
2485
2486 if (PRINT_FIELD(CGROUP)) {
2487 const char *cgrp_name;
2488 struct cgroup *cgrp = cgroup__find(machine->env,
2489 sample->cgroup);
2490 if (cgrp != NULL)
2491 cgrp_name = cgrp->name;
2492 else
2493 cgrp_name = "unknown";
2494 fprintf(fp, " %s", cgrp_name);
2495 }
2496
2497 if (PRINT_FIELD(IP)) {
2498 struct callchain_cursor *cursor = NULL;
2499
2500 if (script->stitch_lbr)
2501 thread__set_lbr_stitch_enable(al->thread, true);
2502
2503 if (symbol_conf.use_callchain && sample->callchain) {
2504 cursor = get_tls_callchain_cursor();
2505 if (thread__resolve_callchain(al->thread, cursor, evsel,
2506 sample, NULL, NULL,
2507 scripting_max_stack))
2508 cursor = NULL;
2509 }
2510 fputc(cursor ? '\n' : ' ', fp);
2511 sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor,
2512 symbol_conf.bt_stop_list, fp);
2513 }
2514
2515 if (PRINT_FIELD(IREGS)) {
2516 perf_sample__fprintf_iregs(sample, attr,
2517 thread__e_machine(thread, machine, &e_flags),
2518 e_flags,
2519 fp);
2520 }
2521
2522 if (PRINT_FIELD(UREGS)) {
2523 perf_sample__fprintf_uregs(sample, attr,
2524 thread__e_machine(thread, machine, &e_flags),
2525 e_flags,
2526 fp);
2527 }
2528
2529 if (PRINT_FIELD(BRSTACK))
2530 perf_sample__fprintf_brstack(sample, thread, evsel, fp);
2531 else if (PRINT_FIELD(BRSTACKSYM))
2532 perf_sample__fprintf_brstacksym(sample, thread, evsel, fp);
2533 else if (PRINT_FIELD(BRSTACKOFF))
2534 perf_sample__fprintf_brstackoff(sample, thread, evsel, fp);
2535
2536 if (evsel__is_bpf_output(evsel) && !evsel__is_offcpu_event(evsel) && PRINT_FIELD(BPF_OUTPUT))
2537 perf_sample__fprintf_bpf_output(sample, fp);
2538 perf_sample__fprintf_insn(sample, evsel, attr, thread, machine, fp, al);
2539
2540 if (PRINT_FIELD(PHYS_ADDR))
2541 fprintf(fp, "%16" PRIx64, sample->phys_addr);
2542
2543 if (PRINT_FIELD(DATA_PAGE_SIZE))
2544 fprintf(fp, " %s", get_page_size_name(sample->data_page_size, str));
2545
2546 if (PRINT_FIELD(CODE_PAGE_SIZE))
2547 fprintf(fp, " %s", get_page_size_name(sample->code_page_size, str));
2548
2549 perf_sample__fprintf_ipc(sample, evsel, fp);
2550
2551 fprintf(fp, "\n");
2552
2553 if (PRINT_FIELD(SRCCODE)) {
2554 if (map__fprintf_srccode(al->map, al->addr, stdout,
2555 thread__srccode_state(thread)))
2556 printf("\n");
2557 }
2558
2559 if (PRINT_FIELD(METRIC))
2560 perf_sample__fprint_metric(thread, evsel, sample, fp);
2561
2562 if (verbose > 0)
2563 fflush(fp);
2564 }
2565
2566 static struct scripting_ops *scripting_ops;
2567
__process_stat(struct evsel * counter,u64 tstamp)2568 static void __process_stat(struct evsel *counter, u64 tstamp)
2569 {
2570 int nthreads = perf_thread_map__nr(counter->core.threads);
2571 int idx, thread;
2572 struct perf_cpu cpu;
2573 static int header_printed;
2574
2575 if (!header_printed) {
2576 printf("%3s %8s %15s %15s %15s %15s %s\n",
2577 "CPU", "THREAD", "VAL", "ENA", "RUN", "TIME", "EVENT");
2578 header_printed = 1;
2579 }
2580
2581 for (thread = 0; thread < nthreads; thread++) {
2582 perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) {
2583 struct perf_counts_values *counts;
2584
2585 counts = perf_counts(counter->counts, idx, thread);
2586
2587 printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n",
2588 cpu.cpu,
2589 perf_thread_map__pid(counter->core.threads, thread),
2590 counts->val,
2591 counts->ena,
2592 counts->run,
2593 tstamp,
2594 evsel__name(counter));
2595 }
2596 }
2597 }
2598
process_stat(struct evsel * counter,u64 tstamp)2599 static void process_stat(struct evsel *counter, u64 tstamp)
2600 {
2601 if (scripting_ops && scripting_ops->process_stat)
2602 scripting_ops->process_stat(&stat_config, counter, tstamp);
2603 else
2604 __process_stat(counter, tstamp);
2605 }
2606
process_stat_interval(u64 tstamp)2607 static void process_stat_interval(u64 tstamp)
2608 {
2609 if (scripting_ops && scripting_ops->process_stat_interval)
2610 scripting_ops->process_stat_interval(tstamp);
2611 }
2612
setup_scripting(void)2613 static void setup_scripting(void)
2614 {
2615 #ifdef HAVE_LIBTRACEEVENT
2616 setup_perl_scripting();
2617 #endif
2618 setup_python_scripting();
2619 }
2620
flush_scripting(void)2621 static int flush_scripting(void)
2622 {
2623 return scripting_ops ? scripting_ops->flush_script() : 0;
2624 }
2625
cleanup_scripting(void)2626 static int cleanup_scripting(void)
2627 {
2628 pr_debug("\nperf script stopped\n");
2629
2630 return scripting_ops ? scripting_ops->stop_script() : 0;
2631 }
2632
filter_cpu(struct perf_sample * sample)2633 static bool filter_cpu(struct perf_sample *sample)
2634 {
2635 if (cpu_list && sample->cpu != (u32)-1)
2636 return !test_bit(sample->cpu, cpu_bitmap);
2637 return false;
2638 }
2639
process_sample_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)2640 static int process_sample_event(const struct perf_tool *tool,
2641 union perf_event *event,
2642 struct perf_sample *sample,
2643 struct evsel *evsel,
2644 struct machine *machine)
2645 {
2646 struct perf_script *scr = container_of(tool, struct perf_script, tool);
2647 struct addr_location al;
2648 struct addr_location addr_al;
2649 int ret = 0;
2650
2651 /* Set thread to NULL to indicate addr_al and al are not initialized */
2652 addr_location__init(&al);
2653 addr_location__init(&addr_al);
2654
2655 ret = dlfilter__filter_event_early(dlfilter, event, sample, evsel, machine, &al, &addr_al);
2656 if (ret) {
2657 if (ret > 0)
2658 ret = 0;
2659 goto out_put;
2660 }
2661
2662 if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
2663 sample->time)) {
2664 goto out_put;
2665 }
2666
2667 if (debug_mode) {
2668 if (sample->time < last_timestamp) {
2669 pr_err("Samples misordered, previous: %" PRIu64
2670 " this: %" PRIu64 "\n", last_timestamp,
2671 sample->time);
2672 nr_unordered++;
2673 }
2674 last_timestamp = sample->time;
2675 goto out_put;
2676 }
2677
2678 if (filter_cpu(sample))
2679 goto out_put;
2680
2681 if (!al.thread && machine__resolve(machine, &al, sample) < 0) {
2682 pr_err("problem processing %d event, skipping it.\n",
2683 event->header.type);
2684 ret = -1;
2685 goto out_put;
2686 }
2687
2688 if (al.filtered)
2689 goto out_put;
2690
2691 if (!show_event(sample, evsel, al.thread, &al, &addr_al))
2692 goto out_put;
2693
2694 if (evswitch__discard(&scr->evswitch, evsel))
2695 goto out_put;
2696
2697 ret = dlfilter__filter_event(dlfilter, event, sample, evsel, machine, &al, &addr_al);
2698 if (ret) {
2699 if (ret > 0)
2700 ret = 0;
2701 goto out_put;
2702 }
2703
2704 if (scripting_ops) {
2705 struct addr_location *addr_al_ptr = NULL;
2706
2707 if ((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
2708 sample_addr_correlates_sym(&evsel->core.attr)) {
2709 if (!addr_al.thread)
2710 thread__resolve(al.thread, &addr_al, sample);
2711 addr_al_ptr = &addr_al;
2712 }
2713 scripting_ops->process_event(event, sample, evsel, &al, addr_al_ptr);
2714 } else {
2715 process_event(scr, sample, evsel, &al, &addr_al, machine);
2716 }
2717
2718 out_put:
2719 addr_location__exit(&addr_al);
2720 addr_location__exit(&al);
2721 return ret;
2722 }
2723
process_deferred_sample_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)2724 static int process_deferred_sample_event(const struct perf_tool *tool,
2725 union perf_event *event,
2726 struct perf_sample *sample,
2727 struct evsel *evsel,
2728 struct machine *machine)
2729 {
2730 struct perf_script *scr = container_of(tool, struct perf_script, tool);
2731 struct perf_event_attr *attr = &evsel->core.attr;
2732 struct evsel_script *es = evsel->priv;
2733 unsigned int type = output_type(attr->type);
2734 struct addr_location al;
2735 FILE *fp = es->fp;
2736 int ret = 0;
2737
2738 if (output[type].fields == 0)
2739 return 0;
2740
2741 /* Set thread to NULL to indicate addr_al and al are not initialized */
2742 addr_location__init(&al);
2743
2744 if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
2745 sample->time)) {
2746 goto out_put;
2747 }
2748
2749 if (debug_mode) {
2750 if (sample->time < last_timestamp) {
2751 pr_err("Samples misordered, previous: %" PRIu64
2752 " this: %" PRIu64 "\n", last_timestamp,
2753 sample->time);
2754 nr_unordered++;
2755 }
2756 last_timestamp = sample->time;
2757 goto out_put;
2758 }
2759
2760 if (filter_cpu(sample))
2761 goto out_put;
2762
2763 if (machine__resolve(machine, &al, sample) < 0) {
2764 pr_err("problem processing %d event, skipping it.\n",
2765 event->header.type);
2766 ret = -1;
2767 goto out_put;
2768 }
2769
2770 if (al.filtered)
2771 goto out_put;
2772
2773 if (!show_event(sample, evsel, al.thread, &al, NULL))
2774 goto out_put;
2775
2776 if (evswitch__discard(&scr->evswitch, evsel))
2777 goto out_put;
2778
2779 perf_sample__fprintf_start(scr, sample, al.thread, evsel,
2780 PERF_RECORD_CALLCHAIN_DEFERRED, fp);
2781 fprintf(fp, "DEFERRED CALLCHAIN [cookie: %llx]",
2782 (unsigned long long)event->callchain_deferred.cookie);
2783
2784 if (PRINT_FIELD(IP)) {
2785 struct callchain_cursor *cursor = NULL;
2786
2787 if (symbol_conf.use_callchain && sample->callchain) {
2788 cursor = get_tls_callchain_cursor();
2789 if (thread__resolve_callchain(al.thread, cursor, evsel,
2790 sample, NULL, NULL,
2791 scripting_max_stack)) {
2792 pr_info("cannot resolve deferred callchains\n");
2793 cursor = NULL;
2794 }
2795 }
2796
2797 fputc(cursor ? '\n' : ' ', fp);
2798 sample__fprintf_sym(sample, &al, 0, output[type].print_ip_opts,
2799 cursor, symbol_conf.bt_stop_list, fp);
2800 }
2801
2802 fprintf(fp, "\n");
2803
2804 if (verbose > 0)
2805 fflush(fp);
2806
2807 out_put:
2808 addr_location__exit(&al);
2809 return ret;
2810 }
2811
2812 // Used when scr->per_event_dump is not set
2813 static struct evsel_script es_stdout;
2814
process_attr(const struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist)2815 static int process_attr(const struct perf_tool *tool, union perf_event *event,
2816 struct evlist **pevlist)
2817 {
2818 struct perf_script *scr = container_of(tool, struct perf_script, tool);
2819 struct evlist *evlist;
2820 struct evsel *evsel, *pos;
2821 uint16_t e_machine;
2822 u64 sample_type;
2823 int err;
2824
2825 err = perf_event__process_attr(tool, event, pevlist);
2826 if (err)
2827 return err;
2828
2829 evlist = *pevlist;
2830 evsel = evlist__last(*pevlist);
2831
2832 if (!evsel->priv) {
2833 if (scr->per_event_dump) {
2834 evsel->priv = evsel_script__new(evsel, scr->session->data);
2835 if (!evsel->priv)
2836 return -ENOMEM;
2837 } else { // Replicate what is done in perf_script__setup_per_event_dump()
2838 es_stdout.fp = stdout;
2839 evsel->priv = &es_stdout;
2840 }
2841 }
2842
2843 if (evsel->core.attr.type >= PERF_TYPE_MAX &&
2844 evsel->core.attr.type != PERF_TYPE_SYNTH)
2845 return 0;
2846
2847 evlist__for_each_entry(evlist, pos) {
2848 if (pos->core.attr.type == evsel->core.attr.type && pos != evsel)
2849 return 0;
2850 }
2851
2852 if (evsel->core.attr.sample_type) {
2853 err = evsel__check_attr(evsel, scr->session);
2854 if (err)
2855 return err;
2856 }
2857
2858 /*
2859 * Check if we need to enable callchains based
2860 * on events sample_type.
2861 */
2862 sample_type = evlist__combined_sample_type(evlist);
2863 e_machine = perf_session__e_machine(evsel__session(evsel), /*e_flags=*/NULL);
2864 callchain_param_setup(sample_type, e_machine);
2865
2866 /* Enable fields for callchain entries */
2867 if (symbol_conf.use_callchain &&
2868 (sample_type & PERF_SAMPLE_CALLCHAIN ||
2869 sample_type & PERF_SAMPLE_BRANCH_STACK ||
2870 (sample_type & PERF_SAMPLE_REGS_USER &&
2871 sample_type & PERF_SAMPLE_STACK_USER))) {
2872 int type = evsel__output_type(evsel);
2873
2874 if (!(output[type].user_unset_fields & PERF_OUTPUT_IP))
2875 output[type].fields |= PERF_OUTPUT_IP;
2876 if (!(output[type].user_unset_fields & PERF_OUTPUT_SYM))
2877 output[type].fields |= PERF_OUTPUT_SYM;
2878 }
2879 evsel__set_print_ip_opts(evsel);
2880 return 0;
2881 }
2882
print_event_with_time(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine,pid_t pid,pid_t tid,u64 timestamp)2883 static int print_event_with_time(const struct perf_tool *tool,
2884 union perf_event *event,
2885 struct perf_sample *sample,
2886 struct machine *machine,
2887 pid_t pid, pid_t tid, u64 timestamp)
2888 {
2889 struct perf_script *script = container_of(tool, struct perf_script, tool);
2890 struct perf_session *session = script->session;
2891 struct evsel *evsel = evlist__id2evsel(session->evlist, sample->id);
2892 struct thread *thread = NULL;
2893
2894 if (evsel && !evsel->core.attr.sample_id_all) {
2895 sample->cpu = 0;
2896 sample->time = timestamp;
2897 sample->pid = pid;
2898 sample->tid = tid;
2899 }
2900
2901 if (filter_cpu(sample))
2902 return 0;
2903
2904 if (tid != -1)
2905 thread = machine__findnew_thread(machine, pid, tid);
2906
2907 if (evsel) {
2908 perf_sample__fprintf_start(script, sample, thread, evsel,
2909 event->header.type, stdout);
2910 }
2911
2912 perf_event__fprintf(event, machine, stdout);
2913
2914 thread__put(thread);
2915
2916 return 0;
2917 }
2918
print_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine,pid_t pid,pid_t tid)2919 static int print_event(const struct perf_tool *tool, union perf_event *event,
2920 struct perf_sample *sample, struct machine *machine,
2921 pid_t pid, pid_t tid)
2922 {
2923 return print_event_with_time(tool, event, sample, machine, pid, tid, 0);
2924 }
2925
process_comm_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)2926 static int process_comm_event(const struct perf_tool *tool,
2927 union perf_event *event,
2928 struct perf_sample *sample,
2929 struct machine *machine)
2930 {
2931 if (perf_event__process_comm(tool, event, sample, machine) < 0)
2932 return -1;
2933
2934 return print_event(tool, event, sample, machine, event->comm.pid,
2935 event->comm.tid);
2936 }
2937
process_namespaces_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)2938 static int process_namespaces_event(const struct perf_tool *tool,
2939 union perf_event *event,
2940 struct perf_sample *sample,
2941 struct machine *machine)
2942 {
2943 if (perf_event__process_namespaces(tool, event, sample, machine) < 0)
2944 return -1;
2945
2946 return print_event(tool, event, sample, machine, event->namespaces.pid,
2947 event->namespaces.tid);
2948 }
2949
process_cgroup_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)2950 static int process_cgroup_event(const struct perf_tool *tool,
2951 union perf_event *event,
2952 struct perf_sample *sample,
2953 struct machine *machine)
2954 {
2955 if (perf_event__process_cgroup(tool, event, sample, machine) < 0)
2956 return -1;
2957
2958 return print_event(tool, event, sample, machine, sample->pid,
2959 sample->tid);
2960 }
2961
process_fork_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)2962 static int process_fork_event(const struct perf_tool *tool,
2963 union perf_event *event,
2964 struct perf_sample *sample,
2965 struct machine *machine)
2966 {
2967 if (perf_event__process_fork(tool, event, sample, machine) < 0)
2968 return -1;
2969
2970 return print_event_with_time(tool, event, sample, machine,
2971 event->fork.pid, event->fork.tid,
2972 event->fork.time);
2973 }
process_exit_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)2974 static int process_exit_event(const struct perf_tool *tool,
2975 union perf_event *event,
2976 struct perf_sample *sample,
2977 struct machine *machine)
2978 {
2979 /* Print before 'exit' deletes anything */
2980 if (print_event_with_time(tool, event, sample, machine, event->fork.pid,
2981 event->fork.tid, event->fork.time))
2982 return -1;
2983
2984 return perf_event__process_exit(tool, event, sample, machine);
2985 }
2986
process_mmap_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)2987 static int process_mmap_event(const struct perf_tool *tool,
2988 union perf_event *event,
2989 struct perf_sample *sample,
2990 struct machine *machine)
2991 {
2992 if (perf_event__process_mmap(tool, event, sample, machine) < 0)
2993 return -1;
2994
2995 return print_event(tool, event, sample, machine, event->mmap.pid,
2996 event->mmap.tid);
2997 }
2998
process_mmap2_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)2999 static int process_mmap2_event(const struct perf_tool *tool,
3000 union perf_event *event,
3001 struct perf_sample *sample,
3002 struct machine *machine)
3003 {
3004 if (perf_event__process_mmap2(tool, event, sample, machine) < 0)
3005 return -1;
3006
3007 return print_event(tool, event, sample, machine, event->mmap2.pid,
3008 event->mmap2.tid);
3009 }
3010
process_switch_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)3011 static int process_switch_event(const struct perf_tool *tool,
3012 union perf_event *event,
3013 struct perf_sample *sample,
3014 struct machine *machine)
3015 {
3016 struct perf_script *script = container_of(tool, struct perf_script, tool);
3017
3018 if (perf_event__process_switch(tool, event, sample, machine) < 0)
3019 return -1;
3020
3021 if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
3022 scripting_ops->process_switch(event, sample, machine);
3023
3024 if (!script->show_switch_events)
3025 return 0;
3026
3027 return print_event(tool, event, sample, machine, sample->pid,
3028 sample->tid);
3029 }
3030
process_auxtrace_error(const struct perf_tool * tool,struct perf_session * session,union perf_event * event)3031 static int process_auxtrace_error(const struct perf_tool *tool,
3032 struct perf_session *session,
3033 union perf_event *event)
3034 {
3035 if (scripting_ops && scripting_ops->process_auxtrace_error) {
3036 scripting_ops->process_auxtrace_error(session, event);
3037 return 0;
3038 }
3039
3040 return perf_event__process_auxtrace_error(tool, session, event);
3041 }
3042
3043 static int
process_lost_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)3044 process_lost_event(const struct perf_tool *tool,
3045 union perf_event *event,
3046 struct perf_sample *sample,
3047 struct machine *machine)
3048 {
3049 return print_event(tool, event, sample, machine, sample->pid,
3050 sample->tid);
3051 }
3052
3053 static int
process_throttle_event(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine)3054 process_throttle_event(const struct perf_tool *tool __maybe_unused,
3055 union perf_event *event,
3056 struct perf_sample *sample,
3057 struct machine *machine)
3058 {
3059 if (scripting_ops && scripting_ops->process_throttle)
3060 scripting_ops->process_throttle(event, sample, machine);
3061 return 0;
3062 }
3063
3064 static int
process_finished_round_event(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct ordered_events * oe __maybe_unused)3065 process_finished_round_event(const struct perf_tool *tool __maybe_unused,
3066 union perf_event *event,
3067 struct ordered_events *oe __maybe_unused)
3068
3069 {
3070 perf_event__fprintf(event, NULL, stdout);
3071 return 0;
3072 }
3073
3074 static int
process_bpf_events(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine)3075 process_bpf_events(const struct perf_tool *tool __maybe_unused,
3076 union perf_event *event,
3077 struct perf_sample *sample,
3078 struct machine *machine)
3079 {
3080 if (machine__process_ksymbol(machine, event, sample) < 0)
3081 return -1;
3082
3083 return print_event(tool, event, sample, machine, sample->pid,
3084 sample->tid);
3085 }
3086
3087 static int
process_bpf_metadata_event(const struct perf_tool * tool __maybe_unused,struct perf_session * session __maybe_unused,union perf_event * event)3088 process_bpf_metadata_event(const struct perf_tool *tool __maybe_unused,
3089 struct perf_session *session __maybe_unused,
3090 union perf_event *event)
3091 {
3092 perf_event__fprintf(event, NULL, stdout);
3093 return 0;
3094 }
3095
process_text_poke_events(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)3096 static int process_text_poke_events(const struct perf_tool *tool,
3097 union perf_event *event,
3098 struct perf_sample *sample,
3099 struct machine *machine)
3100 {
3101 if (perf_event__process_text_poke(tool, event, sample, machine) < 0)
3102 return -1;
3103
3104 return print_event(tool, event, sample, machine, sample->pid,
3105 sample->tid);
3106 }
3107
sig_handler(int sig __maybe_unused)3108 static void sig_handler(int sig __maybe_unused)
3109 {
3110 session_done = 1;
3111 }
3112
perf_script__fclose_per_event_dump(struct perf_script * script)3113 static void perf_script__fclose_per_event_dump(struct perf_script *script)
3114 {
3115 struct evlist *evlist = script->session->evlist;
3116 struct evsel *evsel;
3117
3118 evlist__for_each_entry(evlist, evsel) {
3119 if (!evsel->priv)
3120 break;
3121 evsel_script__delete(evsel->priv);
3122 evsel->priv = NULL;
3123 }
3124 }
3125
perf_script__fopen_per_event_dump(struct perf_script * script)3126 static int perf_script__fopen_per_event_dump(struct perf_script *script)
3127 {
3128 struct evsel *evsel;
3129
3130 evlist__for_each_entry(script->session->evlist, evsel) {
3131 /*
3132 * Already setup? I.e. we may be called twice in cases like
3133 * Intel PT, one for the intel_pt// and dummy events, then
3134 * for the evsels synthesized from the auxtrace info.
3135 *
3136 * Ses perf_script__process_auxtrace_info.
3137 */
3138 if (evsel->priv != NULL)
3139 continue;
3140
3141 evsel->priv = evsel_script__new(evsel, script->session->data);
3142 if (evsel->priv == NULL)
3143 goto out_err_fclose;
3144 }
3145
3146 return 0;
3147
3148 out_err_fclose:
3149 perf_script__fclose_per_event_dump(script);
3150 return -1;
3151 }
3152
perf_script__setup_per_event_dump(struct perf_script * script)3153 static int perf_script__setup_per_event_dump(struct perf_script *script)
3154 {
3155 struct evsel *evsel;
3156
3157 if (script->per_event_dump)
3158 return perf_script__fopen_per_event_dump(script);
3159
3160 es_stdout.fp = stdout;
3161
3162 evlist__for_each_entry(script->session->evlist, evsel)
3163 evsel->priv = &es_stdout;
3164
3165 return 0;
3166 }
3167
perf_script__exit_per_event_dump_stats(struct perf_script * script)3168 static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
3169 {
3170 struct evsel *evsel;
3171
3172 evlist__for_each_entry(script->session->evlist, evsel) {
3173 struct evsel_script *es = evsel->priv;
3174
3175 evsel_script__fprintf(es, stdout);
3176 evsel_script__delete(es);
3177 evsel->priv = NULL;
3178 }
3179 }
3180
perf_script__exit(struct perf_script * script)3181 static void perf_script__exit(struct perf_script *script)
3182 {
3183 perf_thread_map__put(script->threads);
3184 perf_cpu_map__put(script->cpus);
3185 }
3186
__cmd_script(struct perf_script * script)3187 static int __cmd_script(struct perf_script *script)
3188 {
3189 int ret;
3190
3191 signal(SIGINT, sig_handler);
3192
3193 /* override event processing functions */
3194 if (script->show_task_events) {
3195 script->tool.comm = process_comm_event;
3196 script->tool.fork = process_fork_event;
3197 script->tool.exit = process_exit_event;
3198 }
3199 if (script->show_mmap_events) {
3200 script->tool.mmap = process_mmap_event;
3201 script->tool.mmap2 = process_mmap2_event;
3202 }
3203 if (script->show_switch_events || (scripting_ops && scripting_ops->process_switch))
3204 script->tool.context_switch = process_switch_event;
3205 if (scripting_ops && scripting_ops->process_auxtrace_error)
3206 script->tool.auxtrace_error = process_auxtrace_error;
3207 if (script->show_namespace_events)
3208 script->tool.namespaces = process_namespaces_event;
3209 if (script->show_cgroup_events)
3210 script->tool.cgroup = process_cgroup_event;
3211 if (script->show_lost_events)
3212 script->tool.lost = process_lost_event;
3213 if (script->show_round_events) {
3214 script->tool.ordered_events = false;
3215 script->tool.finished_round = process_finished_round_event;
3216 }
3217 if (script->show_bpf_events) {
3218 script->tool.ksymbol = process_bpf_events;
3219 script->tool.bpf = process_bpf_events;
3220 script->tool.bpf_metadata = process_bpf_metadata_event;
3221 }
3222 if (script->show_text_poke_events) {
3223 script->tool.ksymbol = process_bpf_events;
3224 script->tool.text_poke = process_text_poke_events;
3225 }
3226
3227 if (perf_script__setup_per_event_dump(script)) {
3228 pr_err("Couldn't create the per event dump files\n");
3229 return -1;
3230 }
3231
3232 ret = perf_session__process_events(script->session);
3233
3234 if (script->per_event_dump)
3235 perf_script__exit_per_event_dump_stats(script);
3236
3237 if (debug_mode)
3238 pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
3239
3240 return ret;
3241 }
3242
list_available_languages_cb(struct scripting_ops * ops,const char * spec)3243 static int list_available_languages_cb(struct scripting_ops *ops, const char *spec)
3244 {
3245 fprintf(stderr, " %-42s [%s]\n", spec, ops->name);
3246 return 0;
3247 }
3248
list_available_languages(void)3249 static void list_available_languages(void)
3250 {
3251 fprintf(stderr, "\n");
3252 fprintf(stderr, "Scripting language extensions (used in "
3253 "perf script -s [spec:]script.[spec]):\n\n");
3254 script_spec__for_each(&list_available_languages_cb);
3255 fprintf(stderr, "\n");
3256 }
3257
3258 /* Find script file relative to current directory or exec path */
find_script(const char * script)3259 static char *find_script(const char *script)
3260 {
3261 char path[PATH_MAX];
3262
3263 if (!scripting_ops) {
3264 const char *ext = strrchr(script, '.');
3265
3266 if (!ext)
3267 return NULL;
3268
3269 scripting_ops = script_spec__lookup(++ext);
3270 if (!scripting_ops)
3271 return NULL;
3272 }
3273
3274 if (access(script, R_OK)) {
3275 char *exec_path = get_argv_exec_path();
3276
3277 if (!exec_path)
3278 return NULL;
3279 snprintf(path, sizeof(path), "%s/scripts/%s/%s",
3280 exec_path, scripting_ops->dirname, script);
3281 free(exec_path);
3282 script = path;
3283 if (access(script, R_OK))
3284 return NULL;
3285 }
3286 return strdup(script);
3287 }
3288
parse_scriptname(const struct option * opt __maybe_unused,const char * str,int unset __maybe_unused)3289 static int parse_scriptname(const struct option *opt __maybe_unused,
3290 const char *str, int unset __maybe_unused)
3291 {
3292 char spec[PATH_MAX];
3293 const char *script, *ext;
3294 int len;
3295
3296 if (strcmp(str, "lang") == 0) {
3297 list_available_languages();
3298 exit(0);
3299 }
3300
3301 script = strchr(str, ':');
3302 if (script) {
3303 len = script - str;
3304 if (len >= PATH_MAX) {
3305 fprintf(stderr, "invalid language specifier");
3306 return -1;
3307 }
3308 strncpy(spec, str, len);
3309 spec[len] = '\0';
3310 scripting_ops = script_spec__lookup(spec);
3311 if (!scripting_ops) {
3312 fprintf(stderr, "invalid language specifier");
3313 return -1;
3314 }
3315 script++;
3316 } else {
3317 script = str;
3318 ext = strrchr(script, '.');
3319 if (!ext) {
3320 fprintf(stderr, "invalid script extension");
3321 return -1;
3322 }
3323 scripting_ops = script_spec__lookup(++ext);
3324 if (!scripting_ops) {
3325 fprintf(stderr, "invalid script extension");
3326 return -1;
3327 }
3328 }
3329
3330 script_name = find_script(script);
3331 if (!script_name)
3332 script_name = strdup(script);
3333
3334 return 0;
3335 }
3336
parse_output_fields(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)3337 static int parse_output_fields(const struct option *opt __maybe_unused,
3338 const char *arg, int unset __maybe_unused)
3339 {
3340 char *tok, *strtok_saveptr = NULL;
3341 int i, imax = ARRAY_SIZE(all_output_options);
3342 int j;
3343 int rc = 0;
3344 char *str = strdup(arg);
3345 int type = -1;
3346 enum { DEFAULT, SET, ADD, REMOVE } change = DEFAULT;
3347
3348 if (!str)
3349 return -ENOMEM;
3350
3351 /* first word can state for which event type the user is specifying
3352 * the fields. If no type exists, the specified fields apply to all
3353 * event types found in the file minus the invalid fields for a type.
3354 */
3355 tok = strchr(str, ':');
3356 if (tok) {
3357 *tok = '\0';
3358 tok++;
3359 if (!strcmp(str, "hw"))
3360 type = PERF_TYPE_HARDWARE;
3361 else if (!strcmp(str, "sw"))
3362 type = PERF_TYPE_SOFTWARE;
3363 else if (!strcmp(str, "trace"))
3364 type = PERF_TYPE_TRACEPOINT;
3365 else if (!strcmp(str, "raw"))
3366 type = PERF_TYPE_RAW;
3367 else if (!strcmp(str, "break"))
3368 type = PERF_TYPE_BREAKPOINT;
3369 else if (!strcmp(str, "synth"))
3370 type = OUTPUT_TYPE_SYNTH;
3371 else {
3372 fprintf(stderr, "Invalid event type in field string.\n");
3373 rc = -EINVAL;
3374 goto out;
3375 }
3376
3377 if (output[type].user_set)
3378 pr_warning("Overriding previous field request for %s events.\n",
3379 event_type(type));
3380
3381 /* Don't override defaults for +- */
3382 if (strchr(tok, '+') || strchr(tok, '-'))
3383 goto parse;
3384
3385 output[type].fields = 0;
3386 output[type].user_set = true;
3387 output[type].wildcard_set = false;
3388
3389 } else {
3390 tok = str;
3391 if (strlen(str) == 0) {
3392 fprintf(stderr,
3393 "Cannot set fields to 'none' for all event types.\n");
3394 rc = -EINVAL;
3395 goto out;
3396 }
3397
3398 /* Don't override defaults for +- */
3399 if (strchr(str, '+') || strchr(str, '-'))
3400 goto parse;
3401
3402 if (output_set_by_user())
3403 pr_warning("Overriding previous field request for all events.\n");
3404
3405 for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
3406 output[j].fields = 0;
3407 output[j].user_set = true;
3408 output[j].wildcard_set = true;
3409 }
3410 }
3411
3412 parse:
3413 for (tok = strtok_r(tok, ",", &strtok_saveptr); tok; tok = strtok_r(NULL, ",", &strtok_saveptr)) {
3414 if (*tok == '+') {
3415 if (change == SET)
3416 goto out_badmix;
3417 change = ADD;
3418 tok++;
3419 } else if (*tok == '-') {
3420 if (change == SET)
3421 goto out_badmix;
3422 change = REMOVE;
3423 tok++;
3424 } else {
3425 if (change != SET && change != DEFAULT)
3426 goto out_badmix;
3427 change = SET;
3428 }
3429
3430 for (i = 0; i < imax; ++i) {
3431 if (strcmp(tok, all_output_options[i].str) == 0)
3432 break;
3433 }
3434 if (i == imax && strcmp(tok, "flags") == 0) {
3435 print_flags = change != REMOVE;
3436 continue;
3437 }
3438 if (i == imax) {
3439 fprintf(stderr, "Invalid field requested.\n");
3440 rc = -EINVAL;
3441 goto out;
3442 }
3443 #ifndef HAVE_LIBCAPSTONE_SUPPORT
3444 if (change != REMOVE && strcmp(tok, "disasm") == 0) {
3445 fprintf(stderr, "Field \"disasm\" requires perf to be built with libcapstone support.\n");
3446 rc = -EINVAL;
3447 goto out;
3448 }
3449 #endif
3450
3451 if (type == -1) {
3452 /* add user option to all events types for
3453 * which it is valid
3454 */
3455 for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
3456 if (output[j].invalid_fields & all_output_options[i].field) {
3457 pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
3458 all_output_options[i].str, event_type(j));
3459 } else {
3460 if (change == REMOVE) {
3461 output[j].fields &= ~all_output_options[i].field;
3462 output[j].user_set_fields &= ~all_output_options[i].field;
3463 output[j].user_unset_fields |= all_output_options[i].field;
3464 } else {
3465 output[j].fields |= all_output_options[i].field;
3466 output[j].user_set_fields |= all_output_options[i].field;
3467 output[j].user_unset_fields &= ~all_output_options[i].field;
3468 }
3469 output[j].user_set = true;
3470 output[j].wildcard_set = true;
3471 }
3472 }
3473 } else {
3474 if (output[type].invalid_fields & all_output_options[i].field) {
3475 fprintf(stderr, "\'%s\' not valid for %s events.\n",
3476 all_output_options[i].str, event_type(type));
3477
3478 rc = -EINVAL;
3479 goto out;
3480 }
3481 if (change == REMOVE)
3482 output[type].fields &= ~all_output_options[i].field;
3483 else
3484 output[type].fields |= all_output_options[i].field;
3485 output[type].user_set = true;
3486 output[type].wildcard_set = true;
3487 }
3488 }
3489
3490 if (type >= 0) {
3491 if (output[type].fields == 0) {
3492 pr_debug("No fields requested for %s type. "
3493 "Events will not be displayed.\n", event_type(type));
3494 }
3495 }
3496 goto out;
3497
3498 out_badmix:
3499 fprintf(stderr, "Cannot mix +-field with overridden fields\n");
3500 rc = -EINVAL;
3501 out:
3502 free(str);
3503 return rc;
3504 }
3505
3506 #define for_each_lang(scripts_path, scripts_dir, lang_dirent) \
3507 while ((lang_dirent = readdir(scripts_dir)) != NULL) \
3508 if ((lang_dirent->d_type == DT_DIR || \
3509 (lang_dirent->d_type == DT_UNKNOWN && \
3510 is_directory(scripts_path, lang_dirent))) && \
3511 (strcmp(lang_dirent->d_name, ".")) && \
3512 (strcmp(lang_dirent->d_name, "..")))
3513
3514 #define for_each_script(lang_path, lang_dir, script_dirent) \
3515 while ((script_dirent = readdir(lang_dir)) != NULL) \
3516 if (script_dirent->d_type != DT_DIR && \
3517 (script_dirent->d_type != DT_UNKNOWN || \
3518 !is_directory(lang_path, script_dirent)))
3519
3520
3521 #define RECORD_SUFFIX "-record"
3522 #define REPORT_SUFFIX "-report"
3523
3524 struct script_desc {
3525 struct list_head node;
3526 char *name;
3527 char *half_liner;
3528 char *args;
3529 };
3530
3531 static LIST_HEAD(script_descs);
3532
script_desc__new(const char * name)3533 static struct script_desc *script_desc__new(const char *name)
3534 {
3535 struct script_desc *s = zalloc(sizeof(*s));
3536
3537 if (s != NULL && name)
3538 s->name = strdup(name);
3539
3540 return s;
3541 }
3542
script_desc__delete(struct script_desc * s)3543 static void script_desc__delete(struct script_desc *s)
3544 {
3545 zfree(&s->name);
3546 zfree(&s->half_liner);
3547 zfree(&s->args);
3548 free(s);
3549 }
3550
script_desc__add(struct script_desc * s)3551 static void script_desc__add(struct script_desc *s)
3552 {
3553 list_add_tail(&s->node, &script_descs);
3554 }
3555
script_desc__find(const char * name)3556 static struct script_desc *script_desc__find(const char *name)
3557 {
3558 struct script_desc *s;
3559
3560 list_for_each_entry(s, &script_descs, node)
3561 if (strcasecmp(s->name, name) == 0)
3562 return s;
3563 return NULL;
3564 }
3565
script_desc__findnew(const char * name)3566 static struct script_desc *script_desc__findnew(const char *name)
3567 {
3568 struct script_desc *s = script_desc__find(name);
3569
3570 if (s)
3571 return s;
3572
3573 s = script_desc__new(name);
3574 if (!s)
3575 return NULL;
3576
3577 script_desc__add(s);
3578
3579 return s;
3580 }
3581
ends_with(const char * str,const char * suffix)3582 static const char *ends_with(const char *str, const char *suffix)
3583 {
3584 size_t suffix_len = strlen(suffix);
3585 const char *p = str;
3586
3587 if (strlen(str) > suffix_len) {
3588 p = str + strlen(str) - suffix_len;
3589 if (!strncmp(p, suffix, suffix_len))
3590 return p;
3591 }
3592
3593 return NULL;
3594 }
3595
read_script_info(struct script_desc * desc,const char * filename)3596 static int read_script_info(struct script_desc *desc, const char *filename)
3597 {
3598 char line[BUFSIZ], *p;
3599 FILE *fp;
3600
3601 fp = fopen(filename, "r");
3602 if (!fp)
3603 return -1;
3604
3605 while (fgets(line, sizeof(line), fp)) {
3606 p = skip_spaces(line);
3607 if (strlen(p) == 0)
3608 continue;
3609 if (*p != '#')
3610 continue;
3611 p++;
3612 if (strlen(p) && *p == '!')
3613 continue;
3614
3615 p = skip_spaces(p);
3616 if (strlen(p) && p[strlen(p) - 1] == '\n')
3617 p[strlen(p) - 1] = '\0';
3618
3619 if (!strncmp(p, "description:", strlen("description:"))) {
3620 p += strlen("description:");
3621 desc->half_liner = strdup(skip_spaces(p));
3622 continue;
3623 }
3624
3625 if (!strncmp(p, "args:", strlen("args:"))) {
3626 p += strlen("args:");
3627 desc->args = strdup(skip_spaces(p));
3628 continue;
3629 }
3630 }
3631
3632 fclose(fp);
3633
3634 return 0;
3635 }
3636
get_script_root(struct dirent * script_dirent,const char * suffix)3637 static char *get_script_root(struct dirent *script_dirent, const char *suffix)
3638 {
3639 char *script_root, *str;
3640
3641 script_root = strdup(script_dirent->d_name);
3642 if (!script_root)
3643 return NULL;
3644
3645 str = (char *)ends_with(script_root, suffix);
3646 if (!str) {
3647 free(script_root);
3648 return NULL;
3649 }
3650
3651 *str = '\0';
3652 return script_root;
3653 }
3654
list_available_scripts(const struct option * opt __maybe_unused,const char * s __maybe_unused,int unset __maybe_unused)3655 static int list_available_scripts(const struct option *opt __maybe_unused,
3656 const char *s __maybe_unused,
3657 int unset __maybe_unused)
3658 {
3659 struct dirent *script_dirent, *lang_dirent;
3660 char *buf, *scripts_path, *script_path, *lang_path, *first_half;
3661 DIR *scripts_dir, *lang_dir;
3662 struct script_desc *desc;
3663 char *script_root;
3664
3665 buf = malloc(3 * MAXPATHLEN + BUFSIZ);
3666 if (!buf) {
3667 pr_err("malloc failed\n");
3668 exit(-1);
3669 }
3670 scripts_path = buf;
3671 script_path = buf + MAXPATHLEN;
3672 lang_path = buf + 2 * MAXPATHLEN;
3673 first_half = buf + 3 * MAXPATHLEN;
3674
3675 snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
3676
3677 scripts_dir = opendir(scripts_path);
3678 if (!scripts_dir) {
3679 fprintf(stdout,
3680 "open(%s) failed.\n"
3681 "Check \"PERF_EXEC_PATH\" env to set scripts dir.\n",
3682 scripts_path);
3683 free(buf);
3684 exit(-1);
3685 }
3686
3687 for_each_lang(scripts_path, scripts_dir, lang_dirent) {
3688 scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
3689 lang_dirent->d_name);
3690 lang_dir = opendir(lang_path);
3691 if (!lang_dir)
3692 continue;
3693
3694 for_each_script(lang_path, lang_dir, script_dirent) {
3695 script_root = get_script_root(script_dirent, REPORT_SUFFIX);
3696 if (script_root) {
3697 desc = script_desc__findnew(script_root);
3698 scnprintf(script_path, MAXPATHLEN, "%s/%s",
3699 lang_path, script_dirent->d_name);
3700 read_script_info(desc, script_path);
3701 free(script_root);
3702 }
3703 }
3704 }
3705
3706 fprintf(stdout, "List of available trace scripts:\n");
3707 list_for_each_entry(desc, &script_descs, node) {
3708 sprintf(first_half, "%s %s", desc->name,
3709 desc->args ? desc->args : "");
3710 fprintf(stdout, " %-36s %s\n", first_half,
3711 desc->half_liner ? desc->half_liner : "");
3712 }
3713
3714 free(buf);
3715 exit(0);
3716 }
3717
add_dlarg(const struct option * opt __maybe_unused,const char * s,int unset __maybe_unused)3718 static int add_dlarg(const struct option *opt __maybe_unused,
3719 const char *s, int unset __maybe_unused)
3720 {
3721 char *arg = strdup(s);
3722 void *a;
3723
3724 if (!arg)
3725 return -1;
3726
3727 a = realloc(dlargv, sizeof(dlargv[0]) * (dlargc + 1));
3728 if (!a) {
3729 free(arg);
3730 return -1;
3731 }
3732
3733 dlargv = a;
3734 dlargv[dlargc++] = arg;
3735
3736 return 0;
3737 }
3738
free_dlarg(void)3739 static void free_dlarg(void)
3740 {
3741 while (dlargc--)
3742 free(dlargv[dlargc]);
3743 free(dlargv);
3744 }
3745
get_script_path(const char * script_root,const char * suffix)3746 static char *get_script_path(const char *script_root, const char *suffix)
3747 {
3748 struct dirent *script_dirent, *lang_dirent;
3749 char scripts_path[MAXPATHLEN];
3750 char script_path[MAXPATHLEN];
3751 DIR *scripts_dir, *lang_dir;
3752 char lang_path[MAXPATHLEN];
3753 char *__script_root;
3754
3755 snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
3756
3757 scripts_dir = opendir(scripts_path);
3758 if (!scripts_dir)
3759 return NULL;
3760
3761 for_each_lang(scripts_path, scripts_dir, lang_dirent) {
3762 scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
3763 lang_dirent->d_name);
3764 lang_dir = opendir(lang_path);
3765 if (!lang_dir)
3766 continue;
3767
3768 for_each_script(lang_path, lang_dir, script_dirent) {
3769 __script_root = get_script_root(script_dirent, suffix);
3770 if (__script_root && !strcmp(script_root, __script_root)) {
3771 free(__script_root);
3772 closedir(scripts_dir);
3773 scnprintf(script_path, MAXPATHLEN, "%s/%s",
3774 lang_path, script_dirent->d_name);
3775 closedir(lang_dir);
3776 return strdup(script_path);
3777 }
3778 free(__script_root);
3779 }
3780 closedir(lang_dir);
3781 }
3782 closedir(scripts_dir);
3783
3784 return NULL;
3785 }
3786
is_top_script(const char * script_path)3787 static bool is_top_script(const char *script_path)
3788 {
3789 return ends_with(script_path, "top") != NULL;
3790 }
3791
has_required_arg(char * script_path)3792 static int has_required_arg(char *script_path)
3793 {
3794 struct script_desc *desc;
3795 int n_args = 0;
3796 char *p;
3797
3798 desc = script_desc__new(NULL);
3799
3800 if (read_script_info(desc, script_path))
3801 goto out;
3802
3803 if (!desc->args)
3804 goto out;
3805
3806 for (p = desc->args; *p; p++)
3807 if (*p == '<')
3808 n_args++;
3809 out:
3810 script_desc__delete(desc);
3811
3812 return n_args;
3813 }
3814
have_cmd(int argc,const char ** argv)3815 static int have_cmd(int argc, const char **argv)
3816 {
3817 char **__argv = malloc(sizeof(const char *) * argc);
3818
3819 if (!__argv) {
3820 pr_err("malloc failed\n");
3821 return -1;
3822 }
3823
3824 memcpy(__argv, argv, sizeof(const char *) * argc);
3825 argc = parse_options(argc, (const char **)__argv, record_options,
3826 NULL, PARSE_OPT_STOP_AT_NON_OPTION);
3827 free(__argv);
3828
3829 system_wide = (argc == 0);
3830
3831 return 0;
3832 }
3833
script__setup_sample_type(struct perf_script * script)3834 static void script__setup_sample_type(struct perf_script *script)
3835 {
3836 struct perf_session *session = script->session;
3837 u64 sample_type = evlist__combined_sample_type(session->evlist);
3838
3839 callchain_param_setup(sample_type, perf_session__e_machine(session, /*e_flags=*/NULL));
3840
3841 if (script->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
3842 pr_warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
3843 "Please apply --call-graph lbr when recording.\n");
3844 script->stitch_lbr = false;
3845 }
3846 }
3847
process_stat_round_event(const struct perf_tool * tool __maybe_unused,struct perf_session * session,union perf_event * event)3848 static int process_stat_round_event(const struct perf_tool *tool __maybe_unused,
3849 struct perf_session *session,
3850 union perf_event *event)
3851 {
3852 struct perf_record_stat_round *round = &event->stat_round;
3853 struct evsel *counter;
3854
3855 evlist__for_each_entry(session->evlist, counter) {
3856 perf_stat_process_counter(&stat_config, counter);
3857 process_stat(counter, round->time);
3858 }
3859
3860 process_stat_interval(round->time);
3861 return 0;
3862 }
3863
process_stat_config_event(const struct perf_tool * tool __maybe_unused,struct perf_session * session __maybe_unused,union perf_event * event)3864 static int process_stat_config_event(const struct perf_tool *tool __maybe_unused,
3865 struct perf_session *session __maybe_unused,
3866 union perf_event *event)
3867 {
3868 perf_event__read_stat_config(&stat_config, &event->stat_config);
3869
3870 /*
3871 * Aggregation modes are not used since post-processing scripts are
3872 * supposed to take care of such requirements
3873 */
3874 stat_config.aggr_mode = AGGR_NONE;
3875
3876 return 0;
3877 }
3878
set_maps(struct perf_script * script)3879 static int set_maps(struct perf_script *script)
3880 {
3881 struct evlist *evlist = script->session->evlist;
3882
3883 if (!script->cpus || !script->threads)
3884 return 0;
3885
3886 if (WARN_ONCE(script->allocated, "stats double allocation\n"))
3887 return -EINVAL;
3888
3889 perf_evlist__set_maps(&evlist->core, script->cpus, script->threads);
3890
3891 if (evlist__alloc_stats(&stat_config, evlist, /*alloc_raw=*/true))
3892 return -ENOMEM;
3893
3894 script->allocated = true;
3895 return 0;
3896 }
3897
3898 static
process_thread_map_event(const struct perf_tool * tool,struct perf_session * session __maybe_unused,union perf_event * event)3899 int process_thread_map_event(const struct perf_tool *tool,
3900 struct perf_session *session __maybe_unused,
3901 union perf_event *event)
3902 {
3903 struct perf_script *script = container_of(tool, struct perf_script, tool);
3904
3905 if (dump_trace)
3906 perf_event__fprintf_thread_map(event, stdout);
3907
3908 if (script->threads) {
3909 pr_warning("Extra thread map event, ignoring.\n");
3910 return 0;
3911 }
3912
3913 script->threads = thread_map__new_event(&event->thread_map);
3914 if (!script->threads)
3915 return -ENOMEM;
3916
3917 return set_maps(script);
3918 }
3919
3920 static
process_cpu_map_event(const struct perf_tool * tool,struct perf_session * session __maybe_unused,union perf_event * event)3921 int process_cpu_map_event(const struct perf_tool *tool,
3922 struct perf_session *session __maybe_unused,
3923 union perf_event *event)
3924 {
3925 struct perf_script *script = container_of(tool, struct perf_script, tool);
3926
3927 if (dump_trace)
3928 perf_event__fprintf_cpu_map(event, stdout);
3929
3930 if (script->cpus) {
3931 pr_warning("Extra cpu map event, ignoring.\n");
3932 return 0;
3933 }
3934
3935 script->cpus = cpu_map__new_data(&event->cpu_map.data);
3936 if (!script->cpus)
3937 return -ENOMEM;
3938
3939 return set_maps(script);
3940 }
3941
process_feature_event(const struct perf_tool * tool __maybe_unused,struct perf_session * session,union perf_event * event)3942 static int process_feature_event(const struct perf_tool *tool __maybe_unused,
3943 struct perf_session *session,
3944 union perf_event *event)
3945 {
3946 if (event->feat.feat_id < HEADER_LAST_FEATURE)
3947 return perf_event__process_feature(session, event);
3948 return 0;
3949 }
3950
perf_script__process_auxtrace_info(const struct perf_tool * tool,struct perf_session * session,union perf_event * event)3951 static int perf_script__process_auxtrace_info(const struct perf_tool *tool,
3952 struct perf_session *session,
3953 union perf_event *event)
3954 {
3955 int ret = perf_event__process_auxtrace_info(tool, session, event);
3956
3957 if (ret == 0) {
3958 struct perf_script *script = container_of(tool, struct perf_script, tool);
3959
3960 ret = perf_script__setup_per_event_dump(script);
3961 }
3962
3963 return ret;
3964 }
3965
parse_insn_trace(const struct option * opt __maybe_unused,const char * str,int unset __maybe_unused)3966 static int parse_insn_trace(const struct option *opt __maybe_unused,
3967 const char *str, int unset __maybe_unused)
3968 {
3969 const char *fields = "+insn,-event,-period";
3970 int ret;
3971
3972 if (str) {
3973 if (strcmp(str, "disasm") == 0)
3974 fields = "+disasm,-event,-period";
3975 else if (strlen(str) != 0 && strcmp(str, "raw") != 0) {
3976 fprintf(stderr, "Only accept raw|disasm\n");
3977 return -EINVAL;
3978 }
3979 }
3980
3981 ret = parse_output_fields(NULL, fields, 0);
3982 if (ret < 0)
3983 return ret;
3984
3985 itrace_parse_synth_opts(opt, "i0nse", 0);
3986 symbol_conf.nanosecs = true;
3987 return 0;
3988 }
3989
parse_xed(const struct option * opt __maybe_unused,const char * str __maybe_unused,int unset __maybe_unused)3990 static int parse_xed(const struct option *opt __maybe_unused,
3991 const char *str __maybe_unused,
3992 int unset __maybe_unused)
3993 {
3994 if (isatty(1))
3995 force_pager("xed -F insn: -A -64 | less");
3996 else
3997 force_pager("xed -F insn: -A -64");
3998 return 0;
3999 }
4000
parse_call_trace(const struct option * opt __maybe_unused,const char * str __maybe_unused,int unset __maybe_unused)4001 static int parse_call_trace(const struct option *opt __maybe_unused,
4002 const char *str __maybe_unused,
4003 int unset __maybe_unused)
4004 {
4005 parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0);
4006 itrace_parse_synth_opts(opt, "cewp", 0);
4007 symbol_conf.nanosecs = true;
4008 symbol_conf.pad_output_len_dso = 50;
4009 return 0;
4010 }
4011
parse_callret_trace(const struct option * opt __maybe_unused,const char * str __maybe_unused,int unset __maybe_unused)4012 static int parse_callret_trace(const struct option *opt __maybe_unused,
4013 const char *str __maybe_unused,
4014 int unset __maybe_unused)
4015 {
4016 parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0);
4017 itrace_parse_synth_opts(opt, "crewp", 0);
4018 symbol_conf.nanosecs = true;
4019 return 0;
4020 }
4021
cmd_script(int argc,const char ** argv)4022 int cmd_script(int argc, const char **argv)
4023 {
4024 bool show_full_info = false;
4025 bool header = false;
4026 bool header_only = false;
4027 bool script_started = false;
4028 bool unsorted_dump = false;
4029 bool merge_deferred_callchains = true;
4030 char *rec_script_path = NULL;
4031 char *rep_script_path = NULL;
4032 struct perf_session *session;
4033 struct itrace_synth_opts itrace_synth_opts = {
4034 .set = false,
4035 .default_no_sample = true,
4036 };
4037 char *script_path = NULL;
4038 const char *dlfilter_file = NULL;
4039 const char **__argv;
4040 int i, j, err = 0;
4041 struct perf_script script = {};
4042 struct perf_data data = {
4043 .mode = PERF_DATA_MODE_READ,
4044 };
4045 const struct option options[] = {
4046 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
4047 "dump raw trace in ASCII"),
4048 OPT_BOOLEAN(0, "dump-unsorted-raw-trace", &unsorted_dump,
4049 "dump unsorted raw trace in ASCII"),
4050 OPT_INCR('v', "verbose", &verbose,
4051 "be more verbose (show symbol address, etc)"),
4052 OPT_BOOLEAN('L', "Latency", &latency_format,
4053 "show latency attributes (irqs/preemption disabled, etc)"),
4054 OPT_CALLBACK_NOOPT('l', "list", NULL, NULL, "list available scripts",
4055 list_available_scripts),
4056 OPT_CALLBACK_NOOPT(0, "list-dlfilters", NULL, NULL, "list available dlfilters",
4057 list_available_dlfilters),
4058 OPT_CALLBACK('s', "script", NULL, "name",
4059 "script file name (lang:script name, script name, or *)",
4060 parse_scriptname),
4061 OPT_STRING('g', "gen-script", &generate_script_lang, "lang",
4062 "generate perf-script.xx script in specified language"),
4063 OPT_STRING(0, "dlfilter", &dlfilter_file, "file", "filter .so file name"),
4064 OPT_CALLBACK(0, "dlarg", NULL, "argument", "filter argument",
4065 add_dlarg),
4066 OPT_STRING('i', "input", &input_name, "file", "input file name"),
4067 OPT_BOOLEAN('d', "debug-mode", &debug_mode,
4068 "do various checks like samples ordering and lost events"),
4069 OPT_BOOLEAN(0, "header", &header, "Show data header."),
4070 OPT_BOOLEAN(0, "header-only", &header_only, "Show only data header."),
4071 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
4072 "file", "vmlinux pathname"),
4073 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
4074 "file", "kallsyms pathname"),
4075 OPT_BOOLEAN('G', "hide-call-graph", &no_callchain,
4076 "When printing symbols do not display call chain"),
4077 OPT_CALLBACK(0, "symfs", NULL, "directory",
4078 "Look for files with symbols relative to this directory",
4079 symbol__config_symfs),
4080 OPT_CALLBACK('F', "fields", NULL, "str",
4081 "comma separated output fields prepend with 'type:'. "
4082 "+field to add and -field to remove."
4083 "Valid types: hw,sw,trace,raw,synth. "
4084 "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,dsoff,"
4085 "addr,symoff,srcline,period,iregs,uregs,brstack,"
4086 "brstacksym,flags,data_src,weight,bpf-output,brstackinsn,"
4087 "brstackinsnlen,brstackdisasm,brstackoff,callindent,insn,disasm,insnlen,synth,"
4088 "phys_addr,metric,misc,srccode,ipc,tod,data_page_size,"
4089 "code_page_size,ins_lat,machine_pid,vcpu,cgroup,retire_lat,"
4090 "brcntr",
4091 parse_output_fields),
4092 OPT_BOOLEAN('a', "all-cpus", &system_wide,
4093 "system-wide collection from all CPUs"),
4094 OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
4095 "only consider symbols in these DSOs"),
4096 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
4097 "only consider these symbols"),
4098 OPT_INTEGER(0, "addr-range", &symbol_conf.addr_range,
4099 "Use with -S to list traced records within address range"),
4100 OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, "raw|disasm",
4101 "Decode instructions from itrace", parse_insn_trace),
4102 OPT_CALLBACK_OPTARG(0, "xed", NULL, NULL, NULL,
4103 "Run xed disassembler on output", parse_xed),
4104 OPT_CALLBACK_OPTARG(0, "call-trace", &itrace_synth_opts, NULL, NULL,
4105 "Decode calls from itrace", parse_call_trace),
4106 OPT_CALLBACK_OPTARG(0, "call-ret-trace", &itrace_synth_opts, NULL, NULL,
4107 "Decode calls and returns from itrace", parse_callret_trace),
4108 OPT_STRING(0, "graph-function", &symbol_conf.graph_function, "symbol[,symbol...]",
4109 "Only print symbols and callees with --call-trace/--call-ret-trace"),
4110 OPT_STRING(0, "stop-bt", &symbol_conf.bt_stop_list_str, "symbol[,symbol...]",
4111 "Stop display of callgraph at these symbols"),
4112 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
4113 OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
4114 "only display events for these comms"),
4115 OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
4116 "only consider symbols in these pids"),
4117 OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
4118 "only consider symbols in these tids"),
4119 OPT_UINTEGER(0, "max-stack", &scripting_max_stack,
4120 "Set the maximum stack depth when parsing the callchain, "
4121 "anything beyond the specified depth will be ignored. "
4122 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4123 OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"),
4124 OPT_BOOLEAN(0, "deltatime", &deltatime, "Show time stamps relative to previous event"),
4125 OPT_BOOLEAN('I', "show-info", &show_full_info,
4126 "display extended information from perf.data file"),
4127 OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
4128 "Show the path of [kernel.kallsyms]"),
4129 OPT_BOOLEAN('\0', "show-task-events", &script.show_task_events,
4130 "Show the fork/comm/exit events"),
4131 OPT_BOOLEAN('\0', "show-mmap-events", &script.show_mmap_events,
4132 "Show the mmap events"),
4133 OPT_BOOLEAN('\0', "show-switch-events", &script.show_switch_events,
4134 "Show context switch events (if recorded)"),
4135 OPT_BOOLEAN('\0', "show-namespace-events", &script.show_namespace_events,
4136 "Show namespace events (if recorded)"),
4137 OPT_BOOLEAN('\0', "show-cgroup-events", &script.show_cgroup_events,
4138 "Show cgroup events (if recorded)"),
4139 OPT_BOOLEAN('\0', "show-lost-events", &script.show_lost_events,
4140 "Show lost events (if recorded)"),
4141 OPT_BOOLEAN('\0', "show-round-events", &script.show_round_events,
4142 "Show round events (if recorded)"),
4143 OPT_BOOLEAN('\0', "show-bpf-events", &script.show_bpf_events,
4144 "Show bpf related events (if recorded)"),
4145 OPT_BOOLEAN('\0', "show-text-poke-events", &script.show_text_poke_events,
4146 "Show text poke related events (if recorded)"),
4147 OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
4148 "Dump trace output to files named by the monitored events"),
4149 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
4150 OPT_INTEGER(0, "max-blocks", &max_blocks,
4151 "Maximum number of code blocks to dump with brstackinsn"),
4152 OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs,
4153 "Use 9 decimal places when displaying time"),
4154 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
4155 "Instruction Tracing options\n" ITRACE_HELP,
4156 itrace_parse_synth_opts),
4157 OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
4158 "Show full source file name path for source lines"),
4159 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
4160 "Enable symbol demangling"),
4161 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
4162 "Enable kernel symbol demangling"),
4163 OPT_STRING(0, "addr2line", &symbol_conf.addr2line_path, "path",
4164 "addr2line binary to use for line numbers"),
4165 OPT_STRING(0, "time", &script.time_str, "str",
4166 "Time span of interest (start,stop)"),
4167 OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
4168 "Show inline function"),
4169 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
4170 "guest mount directory under which every guest os"
4171 " instance has a subdir"),
4172 OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
4173 "file", "file saving guest os vmlinux"),
4174 OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
4175 "file", "file saving guest os /proc/kallsyms"),
4176 OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
4177 "file", "file saving guest os /proc/modules"),
4178 OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code,
4179 "Guest code can be found in hypervisor process"),
4180 OPT_BOOLEAN('\0', "stitch-lbr", &script.stitch_lbr,
4181 "Enable LBR callgraph stitching approach"),
4182 OPT_BOOLEAN('\0', "merge-callchains", &merge_deferred_callchains,
4183 "Enable merge deferred user callchains"),
4184 OPTS_EVSWITCH(&script.evswitch),
4185 OPT_END()
4186 };
4187 const char * const script_subcommands[] = { "record", "report", NULL };
4188 const char *script_usage[] = {
4189 "perf script [<options>]",
4190 "perf script [<options>] record <script> [<record-options>] <command>",
4191 "perf script [<options>] report <script> [script-args]",
4192 "perf script [<options>] <script> [<record-options>] <command>",
4193 "perf script [<options>] <top-script> [script-args]",
4194 NULL
4195 };
4196 struct perf_env *env;
4197
4198 perf_set_singlethreaded();
4199
4200 setup_scripting();
4201
4202 argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
4203 PARSE_OPT_STOP_AT_NON_OPTION);
4204
4205 if (symbol_conf.guestmount ||
4206 symbol_conf.default_guest_vmlinux_name ||
4207 symbol_conf.default_guest_kallsyms ||
4208 symbol_conf.default_guest_modules ||
4209 symbol_conf.guest_code) {
4210 /*
4211 * Enable guest sample processing.
4212 */
4213 perf_guest = true;
4214 }
4215
4216 data.path = input_name;
4217 data.force = symbol_conf.force;
4218
4219 if (unsorted_dump)
4220 dump_trace = true;
4221
4222 if (symbol__validate_sym_arguments())
4223 return -1;
4224
4225 if (argc > 1 && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
4226 rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
4227 if (!rec_script_path)
4228 return cmd_record(argc, argv);
4229 }
4230
4231 if (argc > 1 && strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
4232 rep_script_path = get_script_path(argv[1], REPORT_SUFFIX);
4233 if (!rep_script_path) {
4234 fprintf(stderr,
4235 "Please specify a valid report script"
4236 "(see 'perf script -l' for listing)\n");
4237 return -1;
4238 }
4239 }
4240
4241 if (reltime && deltatime) {
4242 fprintf(stderr,
4243 "reltime and deltatime - the two don't get along well. "
4244 "Please limit to --reltime or --deltatime.\n");
4245 return -1;
4246 }
4247
4248 if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
4249 itrace_synth_opts.callchain_sz > scripting_max_stack)
4250 scripting_max_stack = itrace_synth_opts.callchain_sz;
4251
4252 /* make sure PERF_EXEC_PATH is set for scripts */
4253 set_argv_exec_path(get_argv_exec_path());
4254
4255 if (argc && !script_name && !rec_script_path && !rep_script_path) {
4256 int live_pipe[2];
4257 int rep_args;
4258 pid_t pid;
4259
4260 rec_script_path = get_script_path(argv[0], RECORD_SUFFIX);
4261 rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
4262
4263 if (!rec_script_path && !rep_script_path) {
4264 script_name = find_script(argv[0]);
4265 if (script_name) {
4266 argc -= 1;
4267 argv += 1;
4268 goto script_found;
4269 }
4270 usage_with_options_msg(script_usage, options,
4271 "Couldn't find script `%s'\n\n See perf"
4272 " script -l for available scripts.\n", argv[0]);
4273 }
4274
4275 if (is_top_script(argv[0])) {
4276 rep_args = argc - 1;
4277 } else {
4278 int rec_args;
4279
4280 rep_args = has_required_arg(rep_script_path);
4281 rec_args = (argc - 1) - rep_args;
4282 if (rec_args < 0) {
4283 usage_with_options_msg(script_usage, options,
4284 "`%s' script requires options."
4285 "\n\n See perf script -l for available "
4286 "scripts and options.\n", argv[0]);
4287 }
4288 }
4289
4290 if (pipe(live_pipe) < 0) {
4291 perror("failed to create pipe");
4292 return -1;
4293 }
4294
4295 pid = fork();
4296 if (pid < 0) {
4297 perror("failed to fork");
4298 return -1;
4299 }
4300
4301 if (!pid) {
4302 j = 0;
4303
4304 dup2(live_pipe[1], 1);
4305 close(live_pipe[0]);
4306
4307 if (is_top_script(argv[0])) {
4308 system_wide = true;
4309 } else if (!system_wide) {
4310 if (have_cmd(argc - rep_args, &argv[rep_args]) != 0) {
4311 err = -1;
4312 goto out;
4313 }
4314 }
4315
4316 __argv = malloc((argc + 6) * sizeof(const char *));
4317 if (!__argv) {
4318 pr_err("malloc failed\n");
4319 err = -ENOMEM;
4320 goto out;
4321 }
4322
4323 __argv[j++] = "/bin/sh";
4324 __argv[j++] = rec_script_path;
4325 if (system_wide)
4326 __argv[j++] = "-a";
4327 __argv[j++] = "-q";
4328 __argv[j++] = "-o";
4329 __argv[j++] = "-";
4330 for (i = rep_args + 1; i < argc; i++)
4331 __argv[j++] = argv[i];
4332 __argv[j++] = NULL;
4333
4334 execvp("/bin/sh", (char **)__argv);
4335 free(__argv);
4336 exit(-1);
4337 }
4338
4339 dup2(live_pipe[0], 0);
4340 close(live_pipe[1]);
4341
4342 __argv = malloc((argc + 4) * sizeof(const char *));
4343 if (!__argv) {
4344 pr_err("malloc failed\n");
4345 err = -ENOMEM;
4346 goto out;
4347 }
4348
4349 j = 0;
4350 __argv[j++] = "/bin/sh";
4351 __argv[j++] = rep_script_path;
4352 for (i = 1; i < rep_args + 1; i++)
4353 __argv[j++] = argv[i];
4354 __argv[j++] = "-i";
4355 __argv[j++] = "-";
4356 __argv[j++] = NULL;
4357
4358 execvp("/bin/sh", (char **)__argv);
4359 free(__argv);
4360 exit(-1);
4361 }
4362 script_found:
4363 if (rec_script_path)
4364 script_path = rec_script_path;
4365 if (rep_script_path)
4366 script_path = rep_script_path;
4367
4368 if (script_path) {
4369 j = 0;
4370
4371 if (!rec_script_path)
4372 system_wide = false;
4373 else if (!system_wide) {
4374 if (have_cmd(argc - 1, &argv[1]) != 0) {
4375 err = -1;
4376 goto out;
4377 }
4378 }
4379
4380 __argv = malloc((argc + 2) * sizeof(const char *));
4381 if (!__argv) {
4382 pr_err("malloc failed\n");
4383 err = -ENOMEM;
4384 goto out;
4385 }
4386
4387 __argv[j++] = "/bin/sh";
4388 __argv[j++] = script_path;
4389 if (system_wide)
4390 __argv[j++] = "-a";
4391 for (i = 2; i < argc; i++)
4392 __argv[j++] = argv[i];
4393 __argv[j++] = NULL;
4394
4395 execvp("/bin/sh", (char **)__argv);
4396 free(__argv);
4397 exit(-1);
4398 }
4399
4400 if (dlfilter_file) {
4401 dlfilter = dlfilter__new(dlfilter_file, dlargc, dlargv);
4402 if (!dlfilter)
4403 return -1;
4404 }
4405
4406 if (!script_name) {
4407 setup_pager();
4408 use_browser = 0;
4409 }
4410
4411 perf_tool__init(&script.tool, !unsorted_dump);
4412 script.tool.sample = process_sample_event;
4413 script.tool.callchain_deferred = process_deferred_sample_event;
4414 script.tool.mmap = perf_event__process_mmap;
4415 script.tool.mmap2 = perf_event__process_mmap2;
4416 script.tool.comm = perf_event__process_comm;
4417 script.tool.namespaces = perf_event__process_namespaces;
4418 script.tool.cgroup = perf_event__process_cgroup;
4419 script.tool.exit = perf_event__process_exit;
4420 script.tool.fork = perf_event__process_fork;
4421 script.tool.attr = process_attr;
4422 script.tool.event_update = perf_event__process_event_update;
4423 #ifdef HAVE_LIBTRACEEVENT
4424 script.tool.tracing_data = perf_event__process_tracing_data;
4425 #endif
4426 script.tool.feature = process_feature_event;
4427 script.tool.build_id = perf_event__process_build_id;
4428 script.tool.id_index = perf_event__process_id_index;
4429 script.tool.auxtrace_info = perf_script__process_auxtrace_info;
4430 script.tool.auxtrace = perf_event__process_auxtrace;
4431 script.tool.auxtrace_error = perf_event__process_auxtrace_error;
4432 script.tool.stat = perf_event__process_stat_event;
4433 script.tool.stat_round = process_stat_round_event;
4434 script.tool.stat_config = process_stat_config_event;
4435 script.tool.thread_map = process_thread_map_event;
4436 script.tool.cpu_map = process_cpu_map_event;
4437 script.tool.throttle = process_throttle_event;
4438 script.tool.unthrottle = process_throttle_event;
4439 script.tool.ordering_requires_timestamps = true;
4440 script.tool.merge_deferred_callchains = merge_deferred_callchains;
4441 session = perf_session__new(&data, &script.tool);
4442 if (IS_ERR(session))
4443 return PTR_ERR(session);
4444
4445 env = perf_session__env(session);
4446 if (header || header_only) {
4447 script.tool.show_feat_hdr = SHOW_FEAT_HEADER;
4448 perf_session__fprintf_info(session, stdout, show_full_info);
4449 if (header_only)
4450 goto out_delete;
4451 }
4452 if (show_full_info)
4453 script.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
4454
4455 if (symbol__init(env) < 0)
4456 goto out_delete;
4457
4458 script.session = session;
4459 script__setup_sample_type(&script);
4460
4461 if ((output[PERF_TYPE_HARDWARE].fields & PERF_OUTPUT_CALLINDENT) ||
4462 symbol_conf.graph_function)
4463 itrace_synth_opts.thread_stack = true;
4464
4465 session->itrace_synth_opts = &itrace_synth_opts;
4466
4467 if (cpu_list) {
4468 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
4469 if (err < 0)
4470 goto out_delete;
4471 itrace_synth_opts.cpu_bitmap = cpu_bitmap;
4472 }
4473
4474 if (!no_callchain)
4475 symbol_conf.use_callchain = true;
4476 else
4477 symbol_conf.use_callchain = false;
4478
4479 #ifdef HAVE_LIBTRACEEVENT
4480 if (session->tevent.pevent &&
4481 tep_set_function_resolver(session->tevent.pevent,
4482 machine__resolve_kernel_addr,
4483 &session->machines.host) < 0) {
4484 pr_err("%s: failed to set libtraceevent function resolver\n", __func__);
4485 err = -1;
4486 goto out_delete;
4487 }
4488 #endif
4489 if (generate_script_lang) {
4490 struct stat perf_stat;
4491 int input;
4492 char *filename = strdup("perf-script");
4493
4494 if (output_set_by_user()) {
4495 fprintf(stderr,
4496 "custom fields not supported for generated scripts");
4497 err = -EINVAL;
4498 goto out_delete;
4499 }
4500
4501 input = open(data.path, O_RDONLY); /* input_name */
4502 if (input < 0) {
4503 err = -errno;
4504 perror("failed to open file");
4505 goto out_delete;
4506 }
4507
4508 err = fstat(input, &perf_stat);
4509 if (err < 0) {
4510 perror("failed to stat file");
4511 goto out_delete;
4512 }
4513
4514 if (!perf_stat.st_size) {
4515 fprintf(stderr, "zero-sized file, nothing to do!\n");
4516 goto out_delete;
4517 }
4518
4519 scripting_ops = script_spec__lookup(generate_script_lang);
4520 if (!scripting_ops && ends_with(generate_script_lang, ".py")) {
4521 scripting_ops = script_spec__lookup("python");
4522 free(filename);
4523 filename = strdup(generate_script_lang);
4524 filename[strlen(filename) - 3] = '\0';
4525 } else if (!scripting_ops && ends_with(generate_script_lang, ".pl")) {
4526 scripting_ops = script_spec__lookup("perl");
4527 free(filename);
4528 filename = strdup(generate_script_lang);
4529 filename[strlen(filename) - 3] = '\0';
4530 }
4531 if (!scripting_ops) {
4532 fprintf(stderr, "invalid language specifier '%s'\n", generate_script_lang);
4533 err = -ENOENT;
4534 goto out_delete;
4535 }
4536 if (!filename) {
4537 err = -ENOMEM;
4538 goto out_delete;
4539 }
4540 #ifdef HAVE_LIBTRACEEVENT
4541 err = scripting_ops->generate_script(session->tevent.pevent, filename);
4542 #else
4543 err = scripting_ops->generate_script(NULL, filename);
4544 #endif
4545 free(filename);
4546 goto out_delete;
4547 }
4548
4549 err = dlfilter__start(dlfilter, session);
4550 if (err)
4551 goto out_delete;
4552
4553 if (script_name) {
4554 err = scripting_ops->start_script(script_name, argc, argv, session);
4555 if (err)
4556 goto out_delete;
4557 pr_debug("perf script started with script %s\n\n", script_name);
4558 script_started = true;
4559 }
4560
4561
4562 err = perf_session__check_output_opt(session);
4563 if (err < 0)
4564 goto out_delete;
4565
4566 if (script.time_str) {
4567 err = perf_time__parse_for_ranges_reltime(script.time_str, session,
4568 &script.ptime_range,
4569 &script.range_size,
4570 &script.range_num,
4571 reltime);
4572 if (err < 0)
4573 goto out_delete;
4574
4575 itrace_synth_opts__set_time_range(&itrace_synth_opts,
4576 script.ptime_range,
4577 script.range_num);
4578 }
4579
4580 err = evswitch__init(&script.evswitch, session->evlist, stderr);
4581 if (err)
4582 goto out_delete;
4583
4584 if (zstd_init(&(session->zstd_data), 0) < 0)
4585 pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
4586
4587 err = __cmd_script(&script);
4588
4589 flush_scripting();
4590
4591 if (verbose > 2 || debug_kmaps)
4592 perf_session__dump_kmaps(session);
4593
4594 out_delete:
4595 if (script.ptime_range) {
4596 itrace_synth_opts__clear_time_range(&itrace_synth_opts);
4597 zfree(&script.ptime_range);
4598 }
4599
4600 zstd_fini(&(session->zstd_data));
4601 evlist__free_stats(session->evlist);
4602 perf_session__delete(session);
4603 perf_script__exit(&script);
4604
4605 if (script_started)
4606 cleanup_scripting();
4607 dlfilter__cleanup(dlfilter);
4608 free_dlarg();
4609 out:
4610 return err;
4611 }
4612