1 /* 2 * builtin-trace.c 3 * 4 * Builtin 'trace' command: 5 * 6 * Display a continuously updated trace of any workload, CPU, specific PID, 7 * system wide, etc. Default format is loosely strace like, but any other 8 * event may be specified using --event. 9 * 10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 11 * 12 * Initially based on the 'trace' prototype by Thomas Gleixner: 13 * 14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'") 15 */ 16 17 #include "util/record.h" 18 #include <api/fs/tracing_path.h> 19 #ifdef HAVE_LIBBPF_SUPPORT 20 #include <bpf/bpf.h> 21 #include <bpf/libbpf.h> 22 #include <bpf/btf.h> 23 #ifdef HAVE_BPF_SKEL 24 #include "bpf_skel/augmented_raw_syscalls.skel.h" 25 #endif 26 #endif 27 #include "util/bpf_map.h" 28 #include "util/rlimit.h" 29 #include "builtin.h" 30 #include "util/cgroup.h" 31 #include "util/color.h" 32 #include "util/config.h" 33 #include "util/debug.h" 34 #include "util/dso.h" 35 #include "util/env.h" 36 #include "util/event.h" 37 #include "util/evsel.h" 38 #include "util/evsel_fprintf.h" 39 #include "util/synthetic-events.h" 40 #include "util/evlist.h" 41 #include "util/evswitch.h" 42 #include "util/mmap.h" 43 #include <subcmd/pager.h> 44 #include <subcmd/exec-cmd.h> 45 #include "util/machine.h" 46 #include "util/map.h" 47 #include "util/symbol.h" 48 #include "util/path.h" 49 #include "util/session.h" 50 #include "util/thread.h" 51 #include <subcmd/parse-options.h> 52 #include "util/strlist.h" 53 #include "util/intlist.h" 54 #include "util/thread_map.h" 55 #include "util/stat.h" 56 #include "util/tool.h" 57 #include "util/util.h" 58 #include "trace/beauty/beauty.h" 59 #include "trace-event.h" 60 #include "util/parse-events.h" 61 #include "util/tracepoint.h" 62 #include "callchain.h" 63 #include "print_binary.h" 64 #include "string2.h" 65 #include "syscalltbl.h" 66 #include "rb_resort.h" 67 #include "../perf.h" 68 69 #include <errno.h> 70 #include <inttypes.h> 71 #include <poll.h> 72 #include <signal.h> 73 #include <stdlib.h> 74 #include <string.h> 75 #include <linux/err.h> 76 #include <linux/filter.h> 77 #include <linux/kernel.h> 78 #include <linux/list_sort.h> 79 #include <linux/random.h> 80 #include <linux/stringify.h> 81 #include <linux/time64.h> 82 #include <linux/zalloc.h> 83 #include <fcntl.h> 84 #include <sys/sysmacros.h> 85 86 #include <linux/ctype.h> 87 #include <perf/mmap.h> 88 89 #ifdef HAVE_LIBTRACEEVENT 90 #include <traceevent/event-parse.h> 91 #endif 92 93 #ifndef O_CLOEXEC 94 # define O_CLOEXEC 02000000 95 #endif 96 97 #ifndef F_LINUX_SPECIFIC_BASE 98 # define F_LINUX_SPECIFIC_BASE 1024 99 #endif 100 101 #define RAW_SYSCALL_ARGS_NUM 6 102 103 /* 104 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100 105 */ 106 struct syscall_arg_fmt { 107 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 108 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val); 109 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val); 110 void *parm; 111 const char *name; 112 u16 nr_entries; // for arrays 113 bool show_zero; 114 #ifdef HAVE_LIBBPF_SUPPORT 115 const struct btf_type *type; 116 #endif 117 }; 118 119 struct syscall_fmt { 120 const char *name; 121 const char *alias; 122 struct { 123 const char *sys_enter, 124 *sys_exit; 125 } bpf_prog_name; 126 struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM]; 127 u8 nr_args; 128 bool errpid; 129 bool timeout; 130 bool hexret; 131 }; 132 133 struct trace { 134 struct perf_tool tool; 135 struct syscalltbl *sctbl; 136 struct { 137 struct syscall *table; 138 struct { 139 struct evsel *sys_enter, 140 *sys_exit, 141 *bpf_output; 142 } events; 143 } syscalls; 144 #ifdef HAVE_BPF_SKEL 145 struct augmented_raw_syscalls_bpf *skel; 146 #endif 147 #ifdef HAVE_LIBBPF_SUPPORT 148 struct btf *btf; 149 #endif 150 struct record_opts opts; 151 struct evlist *evlist; 152 struct machine *host; 153 struct thread *current; 154 struct cgroup *cgroup; 155 u64 base_time; 156 FILE *output; 157 unsigned long nr_events; 158 unsigned long nr_events_printed; 159 unsigned long max_events; 160 struct evswitch evswitch; 161 struct strlist *ev_qualifier; 162 struct { 163 size_t nr; 164 int *entries; 165 } ev_qualifier_ids; 166 struct { 167 size_t nr; 168 pid_t *entries; 169 struct bpf_map *map; 170 } filter_pids; 171 double duration_filter; 172 double runtime_ms; 173 struct { 174 u64 vfs_getname, 175 proc_getname; 176 } stats; 177 unsigned int max_stack; 178 unsigned int min_stack; 179 int raw_augmented_syscalls_args_size; 180 bool raw_augmented_syscalls; 181 bool fd_path_disabled; 182 bool sort_events; 183 bool not_ev_qualifier; 184 bool live; 185 bool full_time; 186 bool sched; 187 bool multiple_threads; 188 bool summary; 189 bool summary_only; 190 bool errno_summary; 191 bool failure_only; 192 bool show_comm; 193 bool print_sample; 194 bool show_tool_stats; 195 bool trace_syscalls; 196 bool libtraceevent_print; 197 bool kernel_syscallchains; 198 s16 args_alignment; 199 bool show_tstamp; 200 bool show_duration; 201 bool show_zeros; 202 bool show_arg_names; 203 bool show_string_prefix; 204 bool force; 205 bool vfs_getname; 206 int trace_pgfaults; 207 char *perfconfig_events; 208 struct { 209 struct ordered_events data; 210 u64 last; 211 } oe; 212 }; 213 214 static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused) 215 { 216 #ifdef HAVE_LIBBPF_SUPPORT 217 if (trace->btf != NULL) 218 return; 219 220 trace->btf = btf__load_vmlinux_btf(); 221 if (verbose > 0) { 222 fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" : 223 "Failed to load vmlinux BTF\n"); 224 } 225 #endif 226 } 227 228 struct tp_field { 229 int offset; 230 union { 231 u64 (*integer)(struct tp_field *field, struct perf_sample *sample); 232 void *(*pointer)(struct tp_field *field, struct perf_sample *sample); 233 }; 234 }; 235 236 #define TP_UINT_FIELD(bits) \ 237 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \ 238 { \ 239 u##bits value; \ 240 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 241 return value; \ 242 } 243 244 TP_UINT_FIELD(8); 245 TP_UINT_FIELD(16); 246 TP_UINT_FIELD(32); 247 TP_UINT_FIELD(64); 248 249 #define TP_UINT_FIELD__SWAPPED(bits) \ 250 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \ 251 { \ 252 u##bits value; \ 253 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 254 return bswap_##bits(value);\ 255 } 256 257 TP_UINT_FIELD__SWAPPED(16); 258 TP_UINT_FIELD__SWAPPED(32); 259 TP_UINT_FIELD__SWAPPED(64); 260 261 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap) 262 { 263 field->offset = offset; 264 265 switch (size) { 266 case 1: 267 field->integer = tp_field__u8; 268 break; 269 case 2: 270 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; 271 break; 272 case 4: 273 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; 274 break; 275 case 8: 276 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; 277 break; 278 default: 279 return -1; 280 } 281 282 return 0; 283 } 284 285 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap) 286 { 287 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); 288 } 289 290 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample) 291 { 292 return sample->raw_data + field->offset; 293 } 294 295 static int __tp_field__init_ptr(struct tp_field *field, int offset) 296 { 297 field->offset = offset; 298 field->pointer = tp_field__ptr; 299 return 0; 300 } 301 302 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field) 303 { 304 return __tp_field__init_ptr(field, format_field->offset); 305 } 306 307 struct syscall_tp { 308 struct tp_field id; 309 union { 310 struct tp_field args, ret; 311 }; 312 }; 313 314 /* 315 * The evsel->priv as used by 'perf trace' 316 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME 317 * fmt: for all the other tracepoints 318 */ 319 struct evsel_trace { 320 struct syscall_tp sc; 321 struct syscall_arg_fmt *fmt; 322 }; 323 324 static struct evsel_trace *evsel_trace__new(void) 325 { 326 return zalloc(sizeof(struct evsel_trace)); 327 } 328 329 static void evsel_trace__delete(struct evsel_trace *et) 330 { 331 if (et == NULL) 332 return; 333 334 zfree(&et->fmt); 335 free(et); 336 } 337 338 /* 339 * Used with raw_syscalls:sys_{enter,exit} and with the 340 * syscalls:sys_{enter,exit}_SYSCALL tracepoints 341 */ 342 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel) 343 { 344 struct evsel_trace *et = evsel->priv; 345 346 return &et->sc; 347 } 348 349 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel) 350 { 351 if (evsel->priv == NULL) { 352 evsel->priv = evsel_trace__new(); 353 if (evsel->priv == NULL) 354 return NULL; 355 } 356 357 return __evsel__syscall_tp(evsel); 358 } 359 360 /* 361 * Used with all the other tracepoints. 362 */ 363 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel) 364 { 365 struct evsel_trace *et = evsel->priv; 366 367 return et->fmt; 368 } 369 370 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel) 371 { 372 struct evsel_trace *et = evsel->priv; 373 374 if (evsel->priv == NULL) { 375 et = evsel->priv = evsel_trace__new(); 376 377 if (et == NULL) 378 return NULL; 379 } 380 381 if (et->fmt == NULL) { 382 et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt)); 383 if (et->fmt == NULL) 384 goto out_delete; 385 } 386 387 return __evsel__syscall_arg_fmt(evsel); 388 389 out_delete: 390 evsel_trace__delete(evsel->priv); 391 evsel->priv = NULL; 392 return NULL; 393 } 394 395 static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name) 396 { 397 struct tep_format_field *format_field = evsel__field(evsel, name); 398 399 if (format_field == NULL) 400 return -1; 401 402 return tp_field__init_uint(field, format_field, evsel->needs_swap); 403 } 404 405 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \ 406 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 407 evsel__init_tp_uint_field(evsel, &sc->name, #name); }) 408 409 static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name) 410 { 411 struct tep_format_field *format_field = evsel__field(evsel, name); 412 413 if (format_field == NULL) 414 return -1; 415 416 return tp_field__init_ptr(field, format_field); 417 } 418 419 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \ 420 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 421 evsel__init_tp_ptr_field(evsel, &sc->name, #name); }) 422 423 static void evsel__delete_priv(struct evsel *evsel) 424 { 425 zfree(&evsel->priv); 426 evsel__delete(evsel); 427 } 428 429 static int evsel__init_syscall_tp(struct evsel *evsel) 430 { 431 struct syscall_tp *sc = evsel__syscall_tp(evsel); 432 433 if (sc != NULL) { 434 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") && 435 evsel__init_tp_uint_field(evsel, &sc->id, "nr")) 436 return -ENOENT; 437 438 return 0; 439 } 440 441 return -ENOMEM; 442 } 443 444 static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp) 445 { 446 struct syscall_tp *sc = evsel__syscall_tp(evsel); 447 448 if (sc != NULL) { 449 struct tep_format_field *syscall_id = evsel__field(tp, "id"); 450 if (syscall_id == NULL) 451 syscall_id = evsel__field(tp, "__syscall_nr"); 452 if (syscall_id == NULL || 453 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) 454 return -EINVAL; 455 456 return 0; 457 } 458 459 return -ENOMEM; 460 } 461 462 static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel) 463 { 464 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 465 466 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); 467 } 468 469 static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel) 470 { 471 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 472 473 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap); 474 } 475 476 static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler) 477 { 478 if (evsel__syscall_tp(evsel) != NULL) { 479 if (perf_evsel__init_sc_tp_uint_field(evsel, id)) 480 return -ENOENT; 481 482 evsel->handler = handler; 483 return 0; 484 } 485 486 return -ENOMEM; 487 } 488 489 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler) 490 { 491 struct evsel *evsel = evsel__newtp("raw_syscalls", direction); 492 493 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */ 494 if (IS_ERR(evsel)) 495 evsel = evsel__newtp("syscalls", direction); 496 497 if (IS_ERR(evsel)) 498 return NULL; 499 500 if (evsel__init_raw_syscall_tp(evsel, handler)) 501 goto out_delete; 502 503 return evsel; 504 505 out_delete: 506 evsel__delete_priv(evsel); 507 return NULL; 508 } 509 510 #define perf_evsel__sc_tp_uint(evsel, name, sample) \ 511 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 512 fields->name.integer(&fields->name, sample); }) 513 514 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \ 515 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 516 fields->name.pointer(&fields->name, sample); }) 517 518 size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val) 519 { 520 int idx = val - sa->offset; 521 522 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 523 size_t printed = scnprintf(bf, size, intfmt, val); 524 if (show_suffix) 525 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 526 return printed; 527 } 528 529 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : ""); 530 } 531 532 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 533 { 534 int idx = val - sa->offset; 535 536 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 537 size_t printed = scnprintf(bf, size, intfmt, val); 538 if (show_prefix) 539 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 540 return printed; 541 } 542 543 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 544 } 545 546 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size, 547 const char *intfmt, 548 struct syscall_arg *arg) 549 { 550 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val); 551 } 552 553 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, 554 struct syscall_arg *arg) 555 { 556 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg); 557 } 558 559 #define SCA_STRARRAY syscall_arg__scnprintf_strarray 560 561 bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 562 { 563 return strarray__strtoul(arg->parm, bf, size, ret); 564 } 565 566 bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 567 { 568 return strarray__strtoul_flags(arg->parm, bf, size, ret); 569 } 570 571 bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 572 { 573 return strarrays__strtoul(arg->parm, bf, size, ret); 574 } 575 576 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg) 577 { 578 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val); 579 } 580 581 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 582 { 583 size_t printed; 584 int i; 585 586 for (i = 0; i < sas->nr_entries; ++i) { 587 struct strarray *sa = sas->entries[i]; 588 int idx = val - sa->offset; 589 590 if (idx >= 0 && idx < sa->nr_entries) { 591 if (sa->entries[idx] == NULL) 592 break; 593 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 594 } 595 } 596 597 printed = scnprintf(bf, size, intfmt, val); 598 if (show_prefix) 599 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); 600 return printed; 601 } 602 603 bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret) 604 { 605 int i; 606 607 for (i = 0; i < sa->nr_entries; ++i) { 608 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') { 609 *ret = sa->offset + i; 610 return true; 611 } 612 } 613 614 return false; 615 } 616 617 bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret) 618 { 619 u64 val = 0; 620 char *tok = bf, *sep, *end; 621 622 *ret = 0; 623 624 while (size != 0) { 625 int toklen = size; 626 627 sep = memchr(tok, '|', size); 628 if (sep != NULL) { 629 size -= sep - tok + 1; 630 631 end = sep - 1; 632 while (end > tok && isspace(*end)) 633 --end; 634 635 toklen = end - tok + 1; 636 } 637 638 while (isspace(*tok)) 639 ++tok; 640 641 if (isalpha(*tok) || *tok == '_') { 642 if (!strarray__strtoul(sa, tok, toklen, &val)) 643 return false; 644 } else 645 val = strtoul(tok, NULL, 0); 646 647 *ret |= (1 << (val - 1)); 648 649 if (sep == NULL) 650 break; 651 tok = sep + 1; 652 } 653 654 return true; 655 } 656 657 bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret) 658 { 659 int i; 660 661 for (i = 0; i < sas->nr_entries; ++i) { 662 struct strarray *sa = sas->entries[i]; 663 664 if (strarray__strtoul(sa, bf, size, ret)) 665 return true; 666 } 667 668 return false; 669 } 670 671 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size, 672 struct syscall_arg *arg) 673 { 674 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val); 675 } 676 677 #ifndef AT_FDCWD 678 #define AT_FDCWD -100 679 #endif 680 681 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size, 682 struct syscall_arg *arg) 683 { 684 int fd = arg->val; 685 const char *prefix = "AT_FD"; 686 687 if (fd == AT_FDCWD) 688 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD"); 689 690 return syscall_arg__scnprintf_fd(bf, size, arg); 691 } 692 693 #define SCA_FDAT syscall_arg__scnprintf_fd_at 694 695 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 696 struct syscall_arg *arg); 697 698 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd 699 700 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg) 701 { 702 return scnprintf(bf, size, "%#lx", arg->val); 703 } 704 705 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg) 706 { 707 if (arg->val == 0) 708 return scnprintf(bf, size, "NULL"); 709 return syscall_arg__scnprintf_hex(bf, size, arg); 710 } 711 712 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg) 713 { 714 return scnprintf(bf, size, "%d", arg->val); 715 } 716 717 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg) 718 { 719 return scnprintf(bf, size, "%ld", arg->val); 720 } 721 722 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg) 723 { 724 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can 725 // fill missing comms using thread__set_comm()... 726 // here or in a special syscall_arg__scnprintf_pid_sched_tp... 727 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val); 728 } 729 730 #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array 731 732 static const char *bpf_cmd[] = { 733 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM", 734 "MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH", 735 "PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID", 736 "PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD", 737 "PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID", 738 "TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE", 739 "BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH", 740 "MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE", 741 "LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE", 742 "LINK_DETACH", "PROG_BIND_MAP", 743 }; 744 static DEFINE_STRARRAY(bpf_cmd, "BPF_"); 745 746 static const char *fsmount_flags[] = { 747 [1] = "CLOEXEC", 748 }; 749 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_"); 750 751 #include "trace/beauty/generated/fsconfig_arrays.c" 752 753 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_"); 754 755 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", }; 756 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1); 757 758 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", }; 759 static DEFINE_STRARRAY(itimers, "ITIMER_"); 760 761 static const char *keyctl_options[] = { 762 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN", 763 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ", 764 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT", 765 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT", 766 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT", 767 }; 768 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_"); 769 770 static const char *whences[] = { "SET", "CUR", "END", 771 #ifdef SEEK_DATA 772 "DATA", 773 #endif 774 #ifdef SEEK_HOLE 775 "HOLE", 776 #endif 777 }; 778 static DEFINE_STRARRAY(whences, "SEEK_"); 779 780 static const char *fcntl_cmds[] = { 781 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK", 782 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64", 783 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX", 784 "GETOWNER_UIDS", 785 }; 786 static DEFINE_STRARRAY(fcntl_cmds, "F_"); 787 788 static const char *fcntl_linux_specific_cmds[] = { 789 "SETLEASE", "GETLEASE", "NOTIFY", "DUPFD_QUERY", [5] = "CANCELLK", "DUPFD_CLOEXEC", 790 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS", 791 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT", 792 }; 793 794 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE); 795 796 static struct strarray *fcntl_cmds_arrays[] = { 797 &strarray__fcntl_cmds, 798 &strarray__fcntl_linux_specific_cmds, 799 }; 800 801 static DEFINE_STRARRAYS(fcntl_cmds_arrays); 802 803 static const char *rlimit_resources[] = { 804 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE", 805 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO", 806 "RTTIME", 807 }; 808 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_"); 809 810 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", }; 811 static DEFINE_STRARRAY(sighow, "SIG_"); 812 813 static const char *clockid[] = { 814 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID", 815 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME", 816 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI" 817 }; 818 static DEFINE_STRARRAY(clockid, "CLOCK_"); 819 820 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size, 821 struct syscall_arg *arg) 822 { 823 bool show_prefix = arg->show_string_prefix; 824 const char *suffix = "_OK"; 825 size_t printed = 0; 826 int mode = arg->val; 827 828 if (mode == F_OK) /* 0 */ 829 return scnprintf(bf, size, "F%s", show_prefix ? suffix : ""); 830 #define P_MODE(n) \ 831 if (mode & n##_OK) { \ 832 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ 833 mode &= ~n##_OK; \ 834 } 835 836 P_MODE(R); 837 P_MODE(W); 838 P_MODE(X); 839 #undef P_MODE 840 841 if (mode) 842 printed += scnprintf(bf + printed, size - printed, "|%#x", mode); 843 844 return printed; 845 } 846 847 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode 848 849 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 850 struct syscall_arg *arg); 851 852 #define SCA_FILENAME syscall_arg__scnprintf_filename 853 854 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size, 855 struct syscall_arg *arg) 856 { 857 bool show_prefix = arg->show_string_prefix; 858 const char *prefix = "O_"; 859 int printed = 0, flags = arg->val; 860 861 #define P_FLAG(n) \ 862 if (flags & O_##n) { \ 863 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 864 flags &= ~O_##n; \ 865 } 866 867 P_FLAG(CLOEXEC); 868 P_FLAG(NONBLOCK); 869 #undef P_FLAG 870 871 if (flags) 872 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 873 874 return printed; 875 } 876 877 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags 878 879 #ifndef GRND_NONBLOCK 880 #define GRND_NONBLOCK 0x0001 881 #endif 882 #ifndef GRND_RANDOM 883 #define GRND_RANDOM 0x0002 884 #endif 885 886 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size, 887 struct syscall_arg *arg) 888 { 889 bool show_prefix = arg->show_string_prefix; 890 const char *prefix = "GRND_"; 891 int printed = 0, flags = arg->val; 892 893 #define P_FLAG(n) \ 894 if (flags & GRND_##n) { \ 895 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 896 flags &= ~GRND_##n; \ 897 } 898 899 P_FLAG(RANDOM); 900 P_FLAG(NONBLOCK); 901 #undef P_FLAG 902 903 if (flags) 904 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 905 906 return printed; 907 } 908 909 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags 910 911 #ifdef HAVE_LIBBPF_SUPPORT 912 static void syscall_arg_fmt__cache_btf_enum(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type) 913 { 914 int id; 915 916 type = strstr(type, "enum "); 917 if (type == NULL) 918 return; 919 920 type += 5; // skip "enum " to get the enumeration name 921 922 id = btf__find_by_name(btf, type); 923 if (id < 0) 924 return; 925 926 arg_fmt->type = btf__type_by_id(btf, id); 927 } 928 929 static bool syscall_arg__strtoul_btf_enum(char *bf, size_t size, struct syscall_arg *arg, u64 *val) 930 { 931 const struct btf_type *bt = arg->fmt->type; 932 struct btf *btf = arg->trace->btf; 933 struct btf_enum *be = btf_enum(bt); 934 935 for (int i = 0; i < btf_vlen(bt); ++i, ++be) { 936 const char *name = btf__name_by_offset(btf, be->name_off); 937 int max_len = max(size, strlen(name)); 938 939 if (strncmp(name, bf, max_len) == 0) { 940 *val = be->val; 941 return true; 942 } 943 } 944 945 return false; 946 } 947 948 static bool syscall_arg__strtoul_btf_type(char *bf, size_t size, struct syscall_arg *arg, u64 *val) 949 { 950 const struct btf_type *bt; 951 char *type = arg->type_name; 952 struct btf *btf; 953 954 trace__load_vmlinux_btf(arg->trace); 955 956 btf = arg->trace->btf; 957 if (btf == NULL) 958 return false; 959 960 if (arg->fmt->type == NULL) { 961 // See if this is an enum 962 syscall_arg_fmt__cache_btf_enum(arg->fmt, btf, type); 963 } 964 965 // Now let's see if we have a BTF type resolved 966 bt = arg->fmt->type; 967 if (bt == NULL) 968 return false; 969 970 // If it is an enum: 971 if (btf_is_enum(arg->fmt->type)) 972 return syscall_arg__strtoul_btf_enum(bf, size, arg, val); 973 974 return false; 975 } 976 977 static size_t btf_enum_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, int val) 978 { 979 struct btf_enum *be = btf_enum(type); 980 const int nr_entries = btf_vlen(type); 981 982 for (int i = 0; i < nr_entries; ++i, ++be) { 983 if (be->val == val) { 984 return scnprintf(bf, size, "%s", 985 btf__name_by_offset(btf, be->name_off)); 986 } 987 } 988 989 return 0; 990 } 991 992 static size_t trace__btf_scnprintf(struct trace *trace, struct syscall_arg_fmt *arg_fmt, char *bf, 993 size_t size, int val, char *type) 994 { 995 if (trace->btf == NULL) 996 return 0; 997 998 if (arg_fmt->type == NULL) { 999 // Check if this is an enum and if we have the BTF type for it. 1000 syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type); 1001 } 1002 1003 // Did we manage to find a BTF type for the syscall/tracepoint argument? 1004 if (arg_fmt->type == NULL) 1005 return 0; 1006 1007 if (btf_is_enum(arg_fmt->type)) 1008 return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val); 1009 1010 return 0; 1011 } 1012 1013 #else // HAVE_LIBBPF_SUPPORT 1014 static size_t trace__btf_scnprintf(struct trace *trace __maybe_unused, struct syscall_arg_fmt *arg_fmt __maybe_unused, 1015 char *bf __maybe_unused, size_t size __maybe_unused, int val __maybe_unused, 1016 char *type __maybe_unused) 1017 { 1018 return 0; 1019 } 1020 1021 static bool syscall_arg__strtoul_btf_type(char *bf __maybe_unused, size_t size __maybe_unused, 1022 struct syscall_arg *arg __maybe_unused, u64 *val __maybe_unused) 1023 { 1024 return false; 1025 } 1026 #endif // HAVE_LIBBPF_SUPPORT 1027 1028 #define STUL_BTF_TYPE syscall_arg__strtoul_btf_type 1029 1030 #define STRARRAY(name, array) \ 1031 { .scnprintf = SCA_STRARRAY, \ 1032 .strtoul = STUL_STRARRAY, \ 1033 .parm = &strarray__##array, } 1034 1035 #define STRARRAY_FLAGS(name, array) \ 1036 { .scnprintf = SCA_STRARRAY_FLAGS, \ 1037 .strtoul = STUL_STRARRAY_FLAGS, \ 1038 .parm = &strarray__##array, } 1039 1040 #include "trace/beauty/arch_errno_names.c" 1041 #include "trace/beauty/eventfd.c" 1042 #include "trace/beauty/futex_op.c" 1043 #include "trace/beauty/futex_val3.c" 1044 #include "trace/beauty/mmap.c" 1045 #include "trace/beauty/mode_t.c" 1046 #include "trace/beauty/msg_flags.c" 1047 #include "trace/beauty/open_flags.c" 1048 #include "trace/beauty/perf_event_open.c" 1049 #include "trace/beauty/pid.c" 1050 #include "trace/beauty/sched_policy.c" 1051 #include "trace/beauty/seccomp.c" 1052 #include "trace/beauty/signum.c" 1053 #include "trace/beauty/socket_type.c" 1054 #include "trace/beauty/waitid_options.c" 1055 1056 static const struct syscall_fmt syscall_fmts[] = { 1057 { .name = "access", 1058 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 1059 { .name = "arch_prctl", 1060 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ }, 1061 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, }, 1062 { .name = "bind", 1063 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 1064 [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ }, 1065 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 1066 { .name = "bpf", 1067 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, }, 1068 { .name = "brk", .hexret = true, 1069 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, }, 1070 { .name = "clock_gettime", 1071 .arg = { [0] = STRARRAY(clk_id, clockid), }, }, 1072 { .name = "clock_nanosleep", 1073 .arg = { [2] = { .scnprintf = SCA_TIMESPEC, /* rqtp */ }, }, }, 1074 { .name = "clone", .errpid = true, .nr_args = 5, 1075 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, }, 1076 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, }, 1077 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, }, 1078 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, }, 1079 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, }, 1080 { .name = "close", 1081 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, }, 1082 { .name = "connect", 1083 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 1084 [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ }, 1085 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 1086 { .name = "epoll_ctl", 1087 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, }, 1088 { .name = "eventfd2", 1089 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, }, 1090 { .name = "faccessat", 1091 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1092 [1] = { .scnprintf = SCA_FILENAME, /* pathname */ }, 1093 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 1094 { .name = "faccessat2", 1095 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1096 [1] = { .scnprintf = SCA_FILENAME, /* pathname */ }, 1097 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, 1098 [3] = { .scnprintf = SCA_FACCESSAT2_FLAGS, /* flags */ }, }, }, 1099 { .name = "fchmodat", 1100 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1101 { .name = "fchownat", 1102 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1103 { .name = "fcntl", 1104 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */ 1105 .strtoul = STUL_STRARRAYS, 1106 .parm = &strarrays__fcntl_cmds_arrays, 1107 .show_zero = true, }, 1108 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, }, 1109 { .name = "flock", 1110 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, }, 1111 { .name = "fsconfig", 1112 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, }, 1113 { .name = "fsmount", 1114 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags), 1115 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, }, 1116 { .name = "fspick", 1117 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1118 [1] = { .scnprintf = SCA_FILENAME, /* path */ }, 1119 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, }, 1120 { .name = "fstat", .alias = "newfstat", }, 1121 { .name = "futex", 1122 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, 1123 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, }, 1124 { .name = "futimesat", 1125 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1126 { .name = "getitimer", 1127 .arg = { [0] = STRARRAY(which, itimers), }, }, 1128 { .name = "getpid", .errpid = true, }, 1129 { .name = "getpgid", .errpid = true, }, 1130 { .name = "getppid", .errpid = true, }, 1131 { .name = "getrandom", 1132 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, }, 1133 { .name = "getrlimit", 1134 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 1135 { .name = "getsockopt", 1136 .arg = { [1] = STRARRAY(level, socket_level), }, }, 1137 { .name = "gettid", .errpid = true, }, 1138 { .name = "ioctl", 1139 .arg = { 1140 #if defined(__i386__) || defined(__x86_64__) 1141 /* 1142 * FIXME: Make this available to all arches. 1143 */ 1144 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ }, 1145 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 1146 #else 1147 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 1148 #endif 1149 { .name = "kcmp", .nr_args = 5, 1150 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, }, 1151 [1] = { .name = "pid2", .scnprintf = SCA_PID, }, 1152 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, }, 1153 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, }, 1154 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, }, 1155 { .name = "keyctl", 1156 .arg = { [0] = STRARRAY(option, keyctl_options), }, }, 1157 { .name = "kill", 1158 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1159 { .name = "linkat", 1160 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1161 { .name = "lseek", 1162 .arg = { [2] = STRARRAY(whence, whences), }, }, 1163 { .name = "lstat", .alias = "newlstat", }, 1164 { .name = "madvise", 1165 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1166 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, }, 1167 { .name = "mkdirat", 1168 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1169 { .name = "mknodat", 1170 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1171 { .name = "mmap", .hexret = true, 1172 /* The standard mmap maps to old_mmap on s390x */ 1173 #if defined(__s390x__) 1174 .alias = "old_mmap", 1175 #endif 1176 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, 1177 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ 1178 .strtoul = STUL_STRARRAY_FLAGS, 1179 .parm = &strarray__mmap_flags, }, 1180 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, }, 1181 { .name = "mount", 1182 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ }, 1183 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */ 1184 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, }, 1185 { .name = "move_mount", 1186 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ }, 1187 [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ }, 1188 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ }, 1189 [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ }, 1190 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, }, 1191 { .name = "mprotect", 1192 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1193 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, }, }, 1194 { .name = "mq_unlink", 1195 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, }, 1196 { .name = "mremap", .hexret = true, 1197 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, }, 1198 { .name = "name_to_handle_at", 1199 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1200 { .name = "nanosleep", 1201 .arg = { [0] = { .scnprintf = SCA_TIMESPEC, /* req */ }, }, }, 1202 { .name = "newfstatat", .alias = "fstatat", 1203 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1204 [1] = { .scnprintf = SCA_FILENAME, /* pathname */ }, 1205 [3] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, 1206 { .name = "open", 1207 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1208 { .name = "open_by_handle_at", 1209 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1210 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1211 { .name = "openat", 1212 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1213 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1214 { .name = "perf_event_open", 1215 .arg = { [0] = { .scnprintf = SCA_PERF_ATTR, /* attr */ }, 1216 [2] = { .scnprintf = SCA_INT, /* cpu */ }, 1217 [3] = { .scnprintf = SCA_FD, /* group_fd */ }, 1218 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, }, 1219 { .name = "pipe2", 1220 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, }, 1221 { .name = "pkey_alloc", 1222 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, }, 1223 { .name = "pkey_free", 1224 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, }, 1225 { .name = "pkey_mprotect", 1226 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1227 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, 1228 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, }, 1229 { .name = "poll", .timeout = true, }, 1230 { .name = "ppoll", .timeout = true, }, 1231 { .name = "prctl", 1232 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ 1233 .strtoul = STUL_STRARRAY, 1234 .parm = &strarray__prctl_options, }, 1235 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ }, 1236 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, }, 1237 { .name = "pread", .alias = "pread64", }, 1238 { .name = "preadv", .alias = "pread", }, 1239 { .name = "prlimit64", 1240 .arg = { [1] = STRARRAY(resource, rlimit_resources), }, }, 1241 { .name = "pwrite", .alias = "pwrite64", }, 1242 { .name = "readlinkat", 1243 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1244 { .name = "recvfrom", 1245 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1246 { .name = "recvmmsg", 1247 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1248 { .name = "recvmsg", 1249 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1250 { .name = "renameat", 1251 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1252 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, }, 1253 { .name = "renameat2", 1254 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1255 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, 1256 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, }, 1257 { .name = "rt_sigaction", 1258 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1259 { .name = "rt_sigprocmask", 1260 .arg = { [0] = STRARRAY(how, sighow), }, }, 1261 { .name = "rt_sigqueueinfo", 1262 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1263 { .name = "rt_tgsigqueueinfo", 1264 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1265 { .name = "sched_setscheduler", 1266 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, }, 1267 { .name = "seccomp", 1268 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ }, 1269 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, }, 1270 { .name = "select", .timeout = true, }, 1271 { .name = "sendfile", .alias = "sendfile64", }, 1272 { .name = "sendmmsg", 1273 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1274 { .name = "sendmsg", 1275 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1276 { .name = "sendto", 1277 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, 1278 [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, }, 1279 { .name = "set_tid_address", .errpid = true, }, 1280 { .name = "setitimer", 1281 .arg = { [0] = STRARRAY(which, itimers), }, }, 1282 { .name = "setrlimit", 1283 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 1284 { .name = "setsockopt", 1285 .arg = { [1] = STRARRAY(level, socket_level), }, }, 1286 { .name = "socket", 1287 .arg = { [0] = STRARRAY(family, socket_families), 1288 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1289 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1290 { .name = "socketpair", 1291 .arg = { [0] = STRARRAY(family, socket_families), 1292 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1293 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1294 { .name = "stat", .alias = "newstat", }, 1295 { .name = "statx", 1296 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ }, 1297 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ } , 1298 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, }, 1299 { .name = "swapoff", 1300 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, }, 1301 { .name = "swapon", 1302 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, }, 1303 { .name = "symlinkat", 1304 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1305 { .name = "sync_file_range", 1306 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, }, 1307 { .name = "tgkill", 1308 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1309 { .name = "tkill", 1310 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1311 { .name = "umount2", .alias = "umount", 1312 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, }, 1313 { .name = "uname", .alias = "newuname", }, 1314 { .name = "unlinkat", 1315 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1316 [1] = { .scnprintf = SCA_FILENAME, /* pathname */ }, 1317 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, 1318 { .name = "utimensat", 1319 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, }, 1320 { .name = "wait4", .errpid = true, 1321 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1322 { .name = "waitid", .errpid = true, 1323 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1324 }; 1325 1326 static int syscall_fmt__cmp(const void *name, const void *fmtp) 1327 { 1328 const struct syscall_fmt *fmt = fmtp; 1329 return strcmp(name, fmt->name); 1330 } 1331 1332 static const struct syscall_fmt *__syscall_fmt__find(const struct syscall_fmt *fmts, 1333 const int nmemb, 1334 const char *name) 1335 { 1336 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); 1337 } 1338 1339 static const struct syscall_fmt *syscall_fmt__find(const char *name) 1340 { 1341 const int nmemb = ARRAY_SIZE(syscall_fmts); 1342 return __syscall_fmt__find(syscall_fmts, nmemb, name); 1343 } 1344 1345 static const struct syscall_fmt *__syscall_fmt__find_by_alias(const struct syscall_fmt *fmts, 1346 const int nmemb, const char *alias) 1347 { 1348 int i; 1349 1350 for (i = 0; i < nmemb; ++i) { 1351 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0) 1352 return &fmts[i]; 1353 } 1354 1355 return NULL; 1356 } 1357 1358 static const struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias) 1359 { 1360 const int nmemb = ARRAY_SIZE(syscall_fmts); 1361 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias); 1362 } 1363 1364 /* 1365 * is_exit: is this "exit" or "exit_group"? 1366 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter. 1367 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc. 1368 * nonexistent: Just a hole in the syscall table, syscall id not allocated 1369 */ 1370 struct syscall { 1371 struct tep_event *tp_format; 1372 int nr_args; 1373 int args_size; 1374 struct { 1375 struct bpf_program *sys_enter, 1376 *sys_exit; 1377 } bpf_prog; 1378 bool is_exit; 1379 bool is_open; 1380 bool nonexistent; 1381 bool use_btf; 1382 struct tep_format_field *args; 1383 const char *name; 1384 const struct syscall_fmt *fmt; 1385 struct syscall_arg_fmt *arg_fmt; 1386 }; 1387 1388 /* 1389 * We need to have this 'calculated' boolean because in some cases we really 1390 * don't know what is the duration of a syscall, for instance, when we start 1391 * a session and some threads are waiting for a syscall to finish, say 'poll', 1392 * in which case all we can do is to print "( ? ) for duration and for the 1393 * start timestamp. 1394 */ 1395 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp) 1396 { 1397 double duration = (double)t / NSEC_PER_MSEC; 1398 size_t printed = fprintf(fp, "("); 1399 1400 if (!calculated) 1401 printed += fprintf(fp, " "); 1402 else if (duration >= 1.0) 1403 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration); 1404 else if (duration >= 0.01) 1405 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration); 1406 else 1407 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration); 1408 return printed + fprintf(fp, "): "); 1409 } 1410 1411 /** 1412 * filename.ptr: The filename char pointer that will be vfs_getname'd 1413 * filename.entry_str_pos: Where to insert the string translated from 1414 * filename.ptr by the vfs_getname tracepoint/kprobe. 1415 * ret_scnprintf: syscall args may set this to a different syscall return 1416 * formatter, for instance, fcntl may return fds, file flags, etc. 1417 */ 1418 struct thread_trace { 1419 u64 entry_time; 1420 bool entry_pending; 1421 unsigned long nr_events; 1422 unsigned long pfmaj, pfmin; 1423 char *entry_str; 1424 double runtime_ms; 1425 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 1426 struct { 1427 unsigned long ptr; 1428 short int entry_str_pos; 1429 bool pending_open; 1430 unsigned int namelen; 1431 char *name; 1432 } filename; 1433 struct { 1434 int max; 1435 struct file *table; 1436 } files; 1437 1438 struct intlist *syscall_stats; 1439 }; 1440 1441 static struct thread_trace *thread_trace__new(void) 1442 { 1443 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace)); 1444 1445 if (ttrace) { 1446 ttrace->files.max = -1; 1447 ttrace->syscall_stats = intlist__new(NULL); 1448 } 1449 1450 return ttrace; 1451 } 1452 1453 static void thread_trace__free_files(struct thread_trace *ttrace); 1454 1455 static void thread_trace__delete(void *pttrace) 1456 { 1457 struct thread_trace *ttrace = pttrace; 1458 1459 if (!ttrace) 1460 return; 1461 1462 intlist__delete(ttrace->syscall_stats); 1463 ttrace->syscall_stats = NULL; 1464 thread_trace__free_files(ttrace); 1465 zfree(&ttrace->entry_str); 1466 free(ttrace); 1467 } 1468 1469 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp) 1470 { 1471 struct thread_trace *ttrace; 1472 1473 if (thread == NULL) 1474 goto fail; 1475 1476 if (thread__priv(thread) == NULL) 1477 thread__set_priv(thread, thread_trace__new()); 1478 1479 if (thread__priv(thread) == NULL) 1480 goto fail; 1481 1482 ttrace = thread__priv(thread); 1483 ++ttrace->nr_events; 1484 1485 return ttrace; 1486 fail: 1487 color_fprintf(fp, PERF_COLOR_RED, 1488 "WARNING: not enough memory, dropping samples!\n"); 1489 return NULL; 1490 } 1491 1492 1493 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg, 1494 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg)) 1495 { 1496 struct thread_trace *ttrace = thread__priv(arg->thread); 1497 1498 ttrace->ret_scnprintf = ret_scnprintf; 1499 } 1500 1501 #define TRACE_PFMAJ (1 << 0) 1502 #define TRACE_PFMIN (1 << 1) 1503 1504 static const size_t trace__entry_str_size = 2048; 1505 1506 static void thread_trace__free_files(struct thread_trace *ttrace) 1507 { 1508 for (int i = 0; i < ttrace->files.max; ++i) { 1509 struct file *file = ttrace->files.table + i; 1510 zfree(&file->pathname); 1511 } 1512 1513 zfree(&ttrace->files.table); 1514 ttrace->files.max = -1; 1515 } 1516 1517 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd) 1518 { 1519 if (fd < 0) 1520 return NULL; 1521 1522 if (fd > ttrace->files.max) { 1523 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); 1524 1525 if (nfiles == NULL) 1526 return NULL; 1527 1528 if (ttrace->files.max != -1) { 1529 memset(nfiles + ttrace->files.max + 1, 0, 1530 (fd - ttrace->files.max) * sizeof(struct file)); 1531 } else { 1532 memset(nfiles, 0, (fd + 1) * sizeof(struct file)); 1533 } 1534 1535 ttrace->files.table = nfiles; 1536 ttrace->files.max = fd; 1537 } 1538 1539 return ttrace->files.table + fd; 1540 } 1541 1542 struct file *thread__files_entry(struct thread *thread, int fd) 1543 { 1544 return thread_trace__files_entry(thread__priv(thread), fd); 1545 } 1546 1547 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) 1548 { 1549 struct thread_trace *ttrace = thread__priv(thread); 1550 struct file *file = thread_trace__files_entry(ttrace, fd); 1551 1552 if (file != NULL) { 1553 struct stat st; 1554 if (stat(pathname, &st) == 0) 1555 file->dev_maj = major(st.st_rdev); 1556 file->pathname = strdup(pathname); 1557 if (file->pathname) 1558 return 0; 1559 } 1560 1561 return -1; 1562 } 1563 1564 static int thread__read_fd_path(struct thread *thread, int fd) 1565 { 1566 char linkname[PATH_MAX], pathname[PATH_MAX]; 1567 struct stat st; 1568 int ret; 1569 1570 if (thread__pid(thread) == thread__tid(thread)) { 1571 scnprintf(linkname, sizeof(linkname), 1572 "/proc/%d/fd/%d", thread__pid(thread), fd); 1573 } else { 1574 scnprintf(linkname, sizeof(linkname), 1575 "/proc/%d/task/%d/fd/%d", 1576 thread__pid(thread), thread__tid(thread), fd); 1577 } 1578 1579 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname)) 1580 return -1; 1581 1582 ret = readlink(linkname, pathname, sizeof(pathname)); 1583 1584 if (ret < 0 || ret > st.st_size) 1585 return -1; 1586 1587 pathname[ret] = '\0'; 1588 return trace__set_fd_pathname(thread, fd, pathname); 1589 } 1590 1591 static const char *thread__fd_path(struct thread *thread, int fd, 1592 struct trace *trace) 1593 { 1594 struct thread_trace *ttrace = thread__priv(thread); 1595 1596 if (ttrace == NULL || trace->fd_path_disabled) 1597 return NULL; 1598 1599 if (fd < 0) 1600 return NULL; 1601 1602 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { 1603 if (!trace->live) 1604 return NULL; 1605 ++trace->stats.proc_getname; 1606 if (thread__read_fd_path(thread, fd)) 1607 return NULL; 1608 } 1609 1610 return ttrace->files.table[fd].pathname; 1611 } 1612 1613 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg) 1614 { 1615 int fd = arg->val; 1616 size_t printed = scnprintf(bf, size, "%d", fd); 1617 const char *path = thread__fd_path(arg->thread, fd, arg->trace); 1618 1619 if (path) 1620 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1621 1622 return printed; 1623 } 1624 1625 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) 1626 { 1627 size_t printed = scnprintf(bf, size, "%d", fd); 1628 struct thread *thread = machine__find_thread(trace->host, pid, pid); 1629 1630 if (thread) { 1631 const char *path = thread__fd_path(thread, fd, trace); 1632 1633 if (path) 1634 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1635 1636 thread__put(thread); 1637 } 1638 1639 return printed; 1640 } 1641 1642 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 1643 struct syscall_arg *arg) 1644 { 1645 int fd = arg->val; 1646 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg); 1647 struct thread_trace *ttrace = thread__priv(arg->thread); 1648 1649 if (ttrace && fd >= 0 && fd <= ttrace->files.max) 1650 zfree(&ttrace->files.table[fd].pathname); 1651 1652 return printed; 1653 } 1654 1655 static void thread__set_filename_pos(struct thread *thread, const char *bf, 1656 unsigned long ptr) 1657 { 1658 struct thread_trace *ttrace = thread__priv(thread); 1659 1660 ttrace->filename.ptr = ptr; 1661 ttrace->filename.entry_str_pos = bf - ttrace->entry_str; 1662 } 1663 1664 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size) 1665 { 1666 struct augmented_arg *augmented_arg = arg->augmented.args; 1667 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value); 1668 /* 1669 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls 1670 * we would have two strings, each prefixed by its size. 1671 */ 1672 int consumed = sizeof(*augmented_arg) + augmented_arg->size; 1673 1674 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1675 arg->augmented.size -= consumed; 1676 1677 return printed; 1678 } 1679 1680 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 1681 struct syscall_arg *arg) 1682 { 1683 unsigned long ptr = arg->val; 1684 1685 if (arg->augmented.args) 1686 return syscall_arg__scnprintf_augmented_string(arg, bf, size); 1687 1688 if (!arg->trace->vfs_getname) 1689 return scnprintf(bf, size, "%#x", ptr); 1690 1691 thread__set_filename_pos(arg->thread, bf, ptr); 1692 return 0; 1693 } 1694 1695 static bool trace__filter_duration(struct trace *trace, double t) 1696 { 1697 return t < (trace->duration_filter * NSEC_PER_MSEC); 1698 } 1699 1700 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1701 { 1702 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; 1703 1704 return fprintf(fp, "%10.3f ", ts); 1705 } 1706 1707 /* 1708 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are 1709 * using ttrace->entry_time for a thread that receives a sys_exit without 1710 * first having received a sys_enter ("poll" issued before tracing session 1711 * starts, lost sys_enter exit due to ring buffer overflow). 1712 */ 1713 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1714 { 1715 if (tstamp > 0) 1716 return __trace__fprintf_tstamp(trace, tstamp, fp); 1717 1718 return fprintf(fp, " ? "); 1719 } 1720 1721 static pid_t workload_pid = -1; 1722 static volatile sig_atomic_t done = false; 1723 static volatile sig_atomic_t interrupted = false; 1724 1725 static void sighandler_interrupt(int sig __maybe_unused) 1726 { 1727 done = interrupted = true; 1728 } 1729 1730 static void sighandler_chld(int sig __maybe_unused, siginfo_t *info, 1731 void *context __maybe_unused) 1732 { 1733 if (info->si_pid == workload_pid) 1734 done = true; 1735 } 1736 1737 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) 1738 { 1739 size_t printed = 0; 1740 1741 if (trace->multiple_threads) { 1742 if (trace->show_comm) 1743 printed += fprintf(fp, "%.14s/", thread__comm_str(thread)); 1744 printed += fprintf(fp, "%d ", thread__tid(thread)); 1745 } 1746 1747 return printed; 1748 } 1749 1750 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, 1751 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp) 1752 { 1753 size_t printed = 0; 1754 1755 if (trace->show_tstamp) 1756 printed = trace__fprintf_tstamp(trace, tstamp, fp); 1757 if (trace->show_duration) 1758 printed += fprintf_duration(duration, duration_calculated, fp); 1759 return printed + trace__fprintf_comm_tid(trace, thread, fp); 1760 } 1761 1762 static int trace__process_event(struct trace *trace, struct machine *machine, 1763 union perf_event *event, struct perf_sample *sample) 1764 { 1765 int ret = 0; 1766 1767 switch (event->header.type) { 1768 case PERF_RECORD_LOST: 1769 color_fprintf(trace->output, PERF_COLOR_RED, 1770 "LOST %" PRIu64 " events!\n", event->lost.lost); 1771 ret = machine__process_lost_event(machine, event, sample); 1772 break; 1773 default: 1774 ret = machine__process_event(machine, event, sample); 1775 break; 1776 } 1777 1778 return ret; 1779 } 1780 1781 static int trace__tool_process(struct perf_tool *tool, 1782 union perf_event *event, 1783 struct perf_sample *sample, 1784 struct machine *machine) 1785 { 1786 struct trace *trace = container_of(tool, struct trace, tool); 1787 return trace__process_event(trace, machine, event, sample); 1788 } 1789 1790 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 1791 { 1792 struct machine *machine = vmachine; 1793 1794 if (machine->kptr_restrict_warned) 1795 return NULL; 1796 1797 if (symbol_conf.kptr_restrict) { 1798 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 1799 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n" 1800 "Kernel samples will not be resolved.\n"); 1801 machine->kptr_restrict_warned = true; 1802 return NULL; 1803 } 1804 1805 return machine__resolve_kernel_addr(vmachine, addrp, modp); 1806 } 1807 1808 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) 1809 { 1810 int err = symbol__init(NULL); 1811 1812 if (err) 1813 return err; 1814 1815 trace->host = machine__new_host(); 1816 if (trace->host == NULL) 1817 return -ENOMEM; 1818 1819 thread__set_priv_destructor(thread_trace__delete); 1820 1821 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); 1822 if (err < 0) 1823 goto out; 1824 1825 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1826 evlist->core.threads, trace__tool_process, 1827 true, false, 1); 1828 out: 1829 if (err) 1830 symbol__exit(); 1831 1832 return err; 1833 } 1834 1835 static void trace__symbols__exit(struct trace *trace) 1836 { 1837 machine__exit(trace->host); 1838 trace->host = NULL; 1839 1840 symbol__exit(); 1841 } 1842 1843 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) 1844 { 1845 int idx; 1846 1847 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) 1848 nr_args = sc->fmt->nr_args; 1849 1850 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); 1851 if (sc->arg_fmt == NULL) 1852 return -1; 1853 1854 for (idx = 0; idx < nr_args; ++idx) { 1855 if (sc->fmt) 1856 sc->arg_fmt[idx] = sc->fmt->arg[idx]; 1857 } 1858 1859 sc->nr_args = nr_args; 1860 return 0; 1861 } 1862 1863 static const struct syscall_arg_fmt syscall_arg_fmts__by_name[] = { 1864 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, }, 1865 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, }, 1866 }; 1867 1868 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp) 1869 { 1870 const struct syscall_arg_fmt *fmt = fmtp; 1871 return strcmp(name, fmt->name); 1872 } 1873 1874 static const struct syscall_arg_fmt * 1875 __syscall_arg_fmt__find_by_name(const struct syscall_arg_fmt *fmts, const int nmemb, 1876 const char *name) 1877 { 1878 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp); 1879 } 1880 1881 static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name) 1882 { 1883 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name); 1884 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name); 1885 } 1886 1887 static struct tep_format_field * 1888 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field, 1889 bool *use_btf) 1890 { 1891 struct tep_format_field *last_field = NULL; 1892 int len; 1893 1894 for (; field; field = field->next, ++arg) { 1895 last_field = field; 1896 1897 if (arg->scnprintf) 1898 continue; 1899 1900 len = strlen(field->name); 1901 1902 if (strcmp(field->type, "const char *") == 0 && 1903 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) || 1904 strstr(field->name, "path") != NULL)) 1905 arg->scnprintf = SCA_FILENAME; 1906 else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr")) 1907 arg->scnprintf = SCA_PTR; 1908 else if (strcmp(field->type, "pid_t") == 0) 1909 arg->scnprintf = SCA_PID; 1910 else if (strcmp(field->type, "umode_t") == 0) 1911 arg->scnprintf = SCA_MODE_T; 1912 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) { 1913 arg->scnprintf = SCA_CHAR_ARRAY; 1914 arg->nr_entries = field->arraylen; 1915 } else if ((strcmp(field->type, "int") == 0 || 1916 strcmp(field->type, "unsigned int") == 0 || 1917 strcmp(field->type, "long") == 0) && 1918 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) { 1919 /* 1920 * /sys/kernel/tracing/events/syscalls/sys_enter* 1921 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c 1922 * 65 int 1923 * 23 unsigned int 1924 * 7 unsigned long 1925 */ 1926 arg->scnprintf = SCA_FD; 1927 } else if (strstr(field->type, "enum") && use_btf != NULL) { 1928 *use_btf = true; 1929 arg->strtoul = STUL_BTF_TYPE; 1930 } else { 1931 const struct syscall_arg_fmt *fmt = 1932 syscall_arg_fmt__find_by_name(field->name); 1933 1934 if (fmt) { 1935 arg->scnprintf = fmt->scnprintf; 1936 arg->strtoul = fmt->strtoul; 1937 } 1938 } 1939 } 1940 1941 return last_field; 1942 } 1943 1944 static int syscall__set_arg_fmts(struct syscall *sc) 1945 { 1946 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args, 1947 &sc->use_btf); 1948 1949 if (last_field) 1950 sc->args_size = last_field->offset + last_field->size; 1951 1952 return 0; 1953 } 1954 1955 static int trace__read_syscall_info(struct trace *trace, int id) 1956 { 1957 char tp_name[128]; 1958 struct syscall *sc; 1959 const char *name = syscalltbl__name(trace->sctbl, id); 1960 int err; 1961 1962 #ifdef HAVE_SYSCALL_TABLE_SUPPORT 1963 if (trace->syscalls.table == NULL) { 1964 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); 1965 if (trace->syscalls.table == NULL) 1966 return -ENOMEM; 1967 } 1968 #else 1969 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) { 1970 // When using libaudit we don't know beforehand what is the max syscall id 1971 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); 1972 1973 if (table == NULL) 1974 return -ENOMEM; 1975 1976 // Need to memset from offset 0 and +1 members if brand new 1977 if (trace->syscalls.table == NULL) 1978 memset(table, 0, (id + 1) * sizeof(*sc)); 1979 else 1980 memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc)); 1981 1982 trace->syscalls.table = table; 1983 trace->sctbl->syscalls.max_id = id; 1984 } 1985 #endif 1986 sc = trace->syscalls.table + id; 1987 if (sc->nonexistent) 1988 return -EEXIST; 1989 1990 if (name == NULL) { 1991 sc->nonexistent = true; 1992 return -EEXIST; 1993 } 1994 1995 sc->name = name; 1996 sc->fmt = syscall_fmt__find(sc->name); 1997 1998 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 1999 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 2000 2001 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) { 2002 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); 2003 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 2004 } 2005 2006 /* 2007 * Fails to read trace point format via sysfs node, so the trace point 2008 * doesn't exist. Set the 'nonexistent' flag as true. 2009 */ 2010 if (IS_ERR(sc->tp_format)) { 2011 sc->nonexistent = true; 2012 return PTR_ERR(sc->tp_format); 2013 } 2014 2015 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 2016 RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields)) 2017 return -ENOMEM; 2018 2019 sc->args = sc->tp_format->format.fields; 2020 /* 2021 * We need to check and discard the first variable '__syscall_nr' 2022 * or 'nr' that mean the syscall number. It is needless here. 2023 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels. 2024 */ 2025 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) { 2026 sc->args = sc->args->next; 2027 --sc->nr_args; 2028 } 2029 2030 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit"); 2031 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat"); 2032 2033 err = syscall__set_arg_fmts(sc); 2034 2035 /* after calling syscall__set_arg_fmts() we'll know whether use_btf is true */ 2036 if (sc->use_btf) 2037 trace__load_vmlinux_btf(trace); 2038 2039 return err; 2040 } 2041 2042 static int evsel__init_tp_arg_scnprintf(struct evsel *evsel, bool *use_btf) 2043 { 2044 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 2045 2046 if (fmt != NULL) { 2047 syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields, use_btf); 2048 return 0; 2049 } 2050 2051 return -ENOMEM; 2052 } 2053 2054 static int intcmp(const void *a, const void *b) 2055 { 2056 const int *one = a, *another = b; 2057 2058 return *one - *another; 2059 } 2060 2061 static int trace__validate_ev_qualifier(struct trace *trace) 2062 { 2063 int err = 0; 2064 bool printed_invalid_prefix = false; 2065 struct str_node *pos; 2066 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); 2067 2068 trace->ev_qualifier_ids.entries = malloc(nr_allocated * 2069 sizeof(trace->ev_qualifier_ids.entries[0])); 2070 2071 if (trace->ev_qualifier_ids.entries == NULL) { 2072 fputs("Error:\tNot enough memory for allocating events qualifier ids\n", 2073 trace->output); 2074 err = -EINVAL; 2075 goto out; 2076 } 2077 2078 strlist__for_each_entry(pos, trace->ev_qualifier) { 2079 const char *sc = pos->s; 2080 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; 2081 2082 if (id < 0) { 2083 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); 2084 if (id >= 0) 2085 goto matches; 2086 2087 if (!printed_invalid_prefix) { 2088 pr_debug("Skipping unknown syscalls: "); 2089 printed_invalid_prefix = true; 2090 } else { 2091 pr_debug(", "); 2092 } 2093 2094 pr_debug("%s", sc); 2095 continue; 2096 } 2097 matches: 2098 trace->ev_qualifier_ids.entries[nr_used++] = id; 2099 if (match_next == -1) 2100 continue; 2101 2102 while (1) { 2103 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); 2104 if (id < 0) 2105 break; 2106 if (nr_allocated == nr_used) { 2107 void *entries; 2108 2109 nr_allocated += 8; 2110 entries = realloc(trace->ev_qualifier_ids.entries, 2111 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); 2112 if (entries == NULL) { 2113 err = -ENOMEM; 2114 fputs("\nError:\t Not enough memory for parsing\n", trace->output); 2115 goto out_free; 2116 } 2117 trace->ev_qualifier_ids.entries = entries; 2118 } 2119 trace->ev_qualifier_ids.entries[nr_used++] = id; 2120 } 2121 } 2122 2123 trace->ev_qualifier_ids.nr = nr_used; 2124 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); 2125 out: 2126 if (printed_invalid_prefix) 2127 pr_debug("\n"); 2128 return err; 2129 out_free: 2130 zfree(&trace->ev_qualifier_ids.entries); 2131 trace->ev_qualifier_ids.nr = 0; 2132 goto out; 2133 } 2134 2135 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) 2136 { 2137 bool in_ev_qualifier; 2138 2139 if (trace->ev_qualifier_ids.nr == 0) 2140 return true; 2141 2142 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, 2143 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; 2144 2145 if (in_ev_qualifier) 2146 return !trace->not_ev_qualifier; 2147 2148 return trace->not_ev_qualifier; 2149 } 2150 2151 /* 2152 * args is to be interpreted as a series of longs but we need to handle 2153 * 8-byte unaligned accesses. args points to raw_data within the event 2154 * and raw_data is guaranteed to be 8-byte unaligned because it is 2155 * preceded by raw_size which is a u32. So we need to copy args to a temp 2156 * variable to read it. Most notably this avoids extended load instructions 2157 * on unaligned addresses 2158 */ 2159 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx) 2160 { 2161 unsigned long val; 2162 unsigned char *p = arg->args + sizeof(unsigned long) * idx; 2163 2164 memcpy(&val, p, sizeof(val)); 2165 return val; 2166 } 2167 2168 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size, 2169 struct syscall_arg *arg) 2170 { 2171 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name) 2172 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name); 2173 2174 return scnprintf(bf, size, "arg%d: ", arg->idx); 2175 } 2176 2177 /* 2178 * Check if the value is in fact zero, i.e. mask whatever needs masking, such 2179 * as mount 'flags' argument that needs ignoring some magic flag, see comment 2180 * in tools/perf/trace/beauty/mount_flags.c 2181 */ 2182 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val) 2183 { 2184 if (fmt && fmt->mask_val) 2185 return fmt->mask_val(arg, val); 2186 2187 return val; 2188 } 2189 2190 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size, 2191 struct syscall_arg *arg, unsigned long val) 2192 { 2193 if (fmt && fmt->scnprintf) { 2194 arg->val = val; 2195 if (fmt->parm) 2196 arg->parm = fmt->parm; 2197 return fmt->scnprintf(bf, size, arg); 2198 } 2199 return scnprintf(bf, size, "%ld", val); 2200 } 2201 2202 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, 2203 unsigned char *args, void *augmented_args, int augmented_args_size, 2204 struct trace *trace, struct thread *thread) 2205 { 2206 size_t printed = 0, btf_printed; 2207 unsigned long val; 2208 u8 bit = 1; 2209 struct syscall_arg arg = { 2210 .args = args, 2211 .augmented = { 2212 .size = augmented_args_size, 2213 .args = augmented_args, 2214 }, 2215 .idx = 0, 2216 .mask = 0, 2217 .trace = trace, 2218 .thread = thread, 2219 .show_string_prefix = trace->show_string_prefix, 2220 }; 2221 struct thread_trace *ttrace = thread__priv(thread); 2222 2223 /* 2224 * Things like fcntl will set this in its 'cmd' formatter to pick the 2225 * right formatter for the return value (an fd? file flags?), which is 2226 * not needed for syscalls that always return a given type, say an fd. 2227 */ 2228 ttrace->ret_scnprintf = NULL; 2229 2230 if (sc->args != NULL) { 2231 struct tep_format_field *field; 2232 2233 for (field = sc->args; field; 2234 field = field->next, ++arg.idx, bit <<= 1) { 2235 if (arg.mask & bit) 2236 continue; 2237 2238 arg.fmt = &sc->arg_fmt[arg.idx]; 2239 val = syscall_arg__val(&arg, arg.idx); 2240 /* 2241 * Some syscall args need some mask, most don't and 2242 * return val untouched. 2243 */ 2244 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val); 2245 2246 /* 2247 * Suppress this argument if its value is zero and show_zero 2248 * property isn't set. 2249 * 2250 * If it has a BTF type, then override the zero suppression knob 2251 * as the common case is for zero in an enum to have an associated entry. 2252 */ 2253 if (val == 0 && !trace->show_zeros && 2254 !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero) && 2255 !(sc->arg_fmt && sc->arg_fmt[arg.idx].strtoul == STUL_BTF_TYPE)) 2256 continue; 2257 2258 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2259 2260 if (trace->show_arg_names) 2261 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2262 2263 btf_printed = trace__btf_scnprintf(trace, &sc->arg_fmt[arg.idx], bf + printed, 2264 size - printed, val, field->type); 2265 if (btf_printed) { 2266 printed += btf_printed; 2267 continue; 2268 } 2269 2270 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], 2271 bf + printed, size - printed, &arg, val); 2272 } 2273 } else if (IS_ERR(sc->tp_format)) { 2274 /* 2275 * If we managed to read the tracepoint /format file, then we 2276 * may end up not having any args, like with gettid(), so only 2277 * print the raw args when we didn't manage to read it. 2278 */ 2279 while (arg.idx < sc->nr_args) { 2280 if (arg.mask & bit) 2281 goto next_arg; 2282 val = syscall_arg__val(&arg, arg.idx); 2283 if (printed) 2284 printed += scnprintf(bf + printed, size - printed, ", "); 2285 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg); 2286 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val); 2287 next_arg: 2288 ++arg.idx; 2289 bit <<= 1; 2290 } 2291 } 2292 2293 return printed; 2294 } 2295 2296 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel, 2297 union perf_event *event, 2298 struct perf_sample *sample); 2299 2300 static struct syscall *trace__syscall_info(struct trace *trace, 2301 struct evsel *evsel, int id) 2302 { 2303 int err = 0; 2304 2305 if (id < 0) { 2306 2307 /* 2308 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried 2309 * before that, leaving at a higher verbosity level till that is 2310 * explained. Reproduced with plain ftrace with: 2311 * 2312 * echo 1 > /t/events/raw_syscalls/sys_exit/enable 2313 * grep "NR -1 " /t/trace_pipe 2314 * 2315 * After generating some load on the machine. 2316 */ 2317 if (verbose > 1) { 2318 static u64 n; 2319 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", 2320 id, evsel__name(evsel), ++n); 2321 } 2322 return NULL; 2323 } 2324 2325 err = -EINVAL; 2326 2327 #ifdef HAVE_SYSCALL_TABLE_SUPPORT 2328 if (id > trace->sctbl->syscalls.max_id) { 2329 #else 2330 if (id >= trace->sctbl->syscalls.max_id) { 2331 /* 2332 * With libaudit we don't know beforehand what is the max_id, 2333 * so we let trace__read_syscall_info() figure that out as we 2334 * go on reading syscalls. 2335 */ 2336 err = trace__read_syscall_info(trace, id); 2337 if (err) 2338 #endif 2339 goto out_cant_read; 2340 } 2341 2342 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) && 2343 (err = trace__read_syscall_info(trace, id)) != 0) 2344 goto out_cant_read; 2345 2346 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent) 2347 goto out_cant_read; 2348 2349 return &trace->syscalls.table[id]; 2350 2351 out_cant_read: 2352 if (verbose > 0) { 2353 char sbuf[STRERR_BUFSIZE]; 2354 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf))); 2355 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL) 2356 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); 2357 fputs(" information\n", trace->output); 2358 } 2359 return NULL; 2360 } 2361 2362 struct syscall_stats { 2363 struct stats stats; 2364 u64 nr_failures; 2365 int max_errno; 2366 u32 *errnos; 2367 }; 2368 2369 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace, 2370 int id, struct perf_sample *sample, long err, bool errno_summary) 2371 { 2372 struct int_node *inode; 2373 struct syscall_stats *stats; 2374 u64 duration = 0; 2375 2376 inode = intlist__findnew(ttrace->syscall_stats, id); 2377 if (inode == NULL) 2378 return; 2379 2380 stats = inode->priv; 2381 if (stats == NULL) { 2382 stats = zalloc(sizeof(*stats)); 2383 if (stats == NULL) 2384 return; 2385 2386 init_stats(&stats->stats); 2387 inode->priv = stats; 2388 } 2389 2390 if (ttrace->entry_time && sample->time > ttrace->entry_time) 2391 duration = sample->time - ttrace->entry_time; 2392 2393 update_stats(&stats->stats, duration); 2394 2395 if (err < 0) { 2396 ++stats->nr_failures; 2397 2398 if (!errno_summary) 2399 return; 2400 2401 err = -err; 2402 if (err > stats->max_errno) { 2403 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32)); 2404 2405 if (new_errnos) { 2406 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32)); 2407 } else { 2408 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n", 2409 thread__comm_str(thread), thread__pid(thread), 2410 thread__tid(thread)); 2411 return; 2412 } 2413 2414 stats->errnos = new_errnos; 2415 stats->max_errno = err; 2416 } 2417 2418 ++stats->errnos[err - 1]; 2419 } 2420 } 2421 2422 static int trace__printf_interrupted_entry(struct trace *trace) 2423 { 2424 struct thread_trace *ttrace; 2425 size_t printed; 2426 int len; 2427 2428 if (trace->failure_only || trace->current == NULL) 2429 return 0; 2430 2431 ttrace = thread__priv(trace->current); 2432 2433 if (!ttrace->entry_pending) 2434 return 0; 2435 2436 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); 2437 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); 2438 2439 if (len < trace->args_alignment - 4) 2440 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); 2441 2442 printed += fprintf(trace->output, " ...\n"); 2443 2444 ttrace->entry_pending = false; 2445 ++trace->nr_events_printed; 2446 2447 return printed; 2448 } 2449 2450 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, 2451 struct perf_sample *sample, struct thread *thread) 2452 { 2453 int printed = 0; 2454 2455 if (trace->print_sample) { 2456 double ts = (double)sample->time / NSEC_PER_MSEC; 2457 2458 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n", 2459 evsel__name(evsel), ts, 2460 thread__comm_str(thread), 2461 sample->pid, sample->tid, sample->cpu); 2462 } 2463 2464 return printed; 2465 } 2466 2467 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size) 2468 { 2469 void *augmented_args = NULL; 2470 /* 2471 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter 2472 * and there we get all 6 syscall args plus the tracepoint common fields 2473 * that gets calculated at the start and the syscall_nr (another long). 2474 * So we check if that is the case and if so don't look after the 2475 * sc->args_size but always after the full raw_syscalls:sys_enter payload, 2476 * which is fixed. 2477 * 2478 * We'll revisit this later to pass s->args_size to the BPF augmenter 2479 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it 2480 * copies only what we need for each syscall, like what happens when we 2481 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace 2482 * traffic to just what is needed for each syscall. 2483 */ 2484 int args_size = raw_augmented_args_size ?: sc->args_size; 2485 2486 *augmented_args_size = sample->raw_size - args_size; 2487 if (*augmented_args_size > 0) 2488 augmented_args = sample->raw_data + args_size; 2489 2490 return augmented_args; 2491 } 2492 2493 static void syscall__exit(struct syscall *sc) 2494 { 2495 if (!sc) 2496 return; 2497 2498 zfree(&sc->arg_fmt); 2499 } 2500 2501 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, 2502 union perf_event *event __maybe_unused, 2503 struct perf_sample *sample) 2504 { 2505 char *msg; 2506 void *args; 2507 int printed = 0; 2508 struct thread *thread; 2509 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2510 int augmented_args_size = 0; 2511 void *augmented_args = NULL; 2512 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2513 struct thread_trace *ttrace; 2514 2515 if (sc == NULL) 2516 return -1; 2517 2518 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2519 ttrace = thread__trace(thread, trace->output); 2520 if (ttrace == NULL) 2521 goto out_put; 2522 2523 trace__fprintf_sample(trace, evsel, sample, thread); 2524 2525 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2526 2527 if (ttrace->entry_str == NULL) { 2528 ttrace->entry_str = malloc(trace__entry_str_size); 2529 if (!ttrace->entry_str) 2530 goto out_put; 2531 } 2532 2533 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) 2534 trace__printf_interrupted_entry(trace); 2535 /* 2536 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible 2537 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments 2538 * this breaks syscall__augmented_args() check for augmented args, as we calculate 2539 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file, 2540 * so when handling, say the openat syscall, we end up getting 6 args for the 2541 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly 2542 * thinking that the extra 2 u64 args are the augmented filename, so just check 2543 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one. 2544 */ 2545 if (evsel != trace->syscalls.events.sys_enter) 2546 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2547 ttrace->entry_time = sample->time; 2548 msg = ttrace->entry_str; 2549 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); 2550 2551 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed, 2552 args, augmented_args, augmented_args_size, trace, thread); 2553 2554 if (sc->is_exit) { 2555 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { 2556 int alignment = 0; 2557 2558 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); 2559 printed = fprintf(trace->output, "%s)", ttrace->entry_str); 2560 if (trace->args_alignment > printed) 2561 alignment = trace->args_alignment - printed; 2562 fprintf(trace->output, "%*s= ?\n", alignment, " "); 2563 } 2564 } else { 2565 ttrace->entry_pending = true; 2566 /* See trace__vfs_getname & trace__sys_exit */ 2567 ttrace->filename.pending_open = false; 2568 } 2569 2570 if (trace->current != thread) { 2571 thread__put(trace->current); 2572 trace->current = thread__get(thread); 2573 } 2574 err = 0; 2575 out_put: 2576 thread__put(thread); 2577 return err; 2578 } 2579 2580 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, 2581 struct perf_sample *sample) 2582 { 2583 struct thread_trace *ttrace; 2584 struct thread *thread; 2585 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2586 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2587 char msg[1024]; 2588 void *args, *augmented_args = NULL; 2589 int augmented_args_size; 2590 2591 if (sc == NULL) 2592 return -1; 2593 2594 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2595 ttrace = thread__trace(thread, trace->output); 2596 /* 2597 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args() 2598 * and the rest of the beautifiers accessing it via struct syscall_arg touches it. 2599 */ 2600 if (ttrace == NULL) 2601 goto out_put; 2602 2603 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2604 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2605 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); 2606 fprintf(trace->output, "%s", msg); 2607 err = 0; 2608 out_put: 2609 thread__put(thread); 2610 return err; 2611 } 2612 2613 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, 2614 struct perf_sample *sample, 2615 struct callchain_cursor *cursor) 2616 { 2617 struct addr_location al; 2618 int max_stack = evsel->core.attr.sample_max_stack ? 2619 evsel->core.attr.sample_max_stack : 2620 trace->max_stack; 2621 int err = -1; 2622 2623 addr_location__init(&al); 2624 if (machine__resolve(trace->host, &al, sample) < 0) 2625 goto out; 2626 2627 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack); 2628 out: 2629 addr_location__exit(&al); 2630 return err; 2631 } 2632 2633 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) 2634 { 2635 /* TODO: user-configurable print_opts */ 2636 const unsigned int print_opts = EVSEL__PRINT_SYM | 2637 EVSEL__PRINT_DSO | 2638 EVSEL__PRINT_UNKNOWN_AS_ADDR; 2639 2640 return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output); 2641 } 2642 2643 static const char *errno_to_name(struct evsel *evsel, int err) 2644 { 2645 struct perf_env *env = evsel__env(evsel); 2646 2647 return perf_env__arch_strerrno(env, err); 2648 } 2649 2650 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, 2651 union perf_event *event __maybe_unused, 2652 struct perf_sample *sample) 2653 { 2654 long ret; 2655 u64 duration = 0; 2656 bool duration_calculated = false; 2657 struct thread *thread; 2658 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0; 2659 int alignment = trace->args_alignment; 2660 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2661 struct thread_trace *ttrace; 2662 2663 if (sc == NULL) 2664 return -1; 2665 2666 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2667 ttrace = thread__trace(thread, trace->output); 2668 if (ttrace == NULL) 2669 goto out_put; 2670 2671 trace__fprintf_sample(trace, evsel, sample, thread); 2672 2673 ret = perf_evsel__sc_tp_uint(evsel, ret, sample); 2674 2675 if (trace->summary) 2676 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary); 2677 2678 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) { 2679 trace__set_fd_pathname(thread, ret, ttrace->filename.name); 2680 ttrace->filename.pending_open = false; 2681 ++trace->stats.vfs_getname; 2682 } 2683 2684 if (ttrace->entry_time) { 2685 duration = sample->time - ttrace->entry_time; 2686 if (trace__filter_duration(trace, duration)) 2687 goto out; 2688 duration_calculated = true; 2689 } else if (trace->duration_filter) 2690 goto out; 2691 2692 if (sample->callchain) { 2693 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 2694 2695 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 2696 if (callchain_ret == 0) { 2697 if (cursor->nr < trace->min_stack) 2698 goto out; 2699 callchain_ret = 1; 2700 } 2701 } 2702 2703 if (trace->summary_only || (ret >= 0 && trace->failure_only)) 2704 goto out; 2705 2706 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output); 2707 2708 if (ttrace->entry_pending) { 2709 printed = fprintf(trace->output, "%s", ttrace->entry_str); 2710 } else { 2711 printed += fprintf(trace->output, " ... ["); 2712 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 2713 printed += 9; 2714 printed += fprintf(trace->output, "]: %s()", sc->name); 2715 } 2716 2717 printed++; /* the closing ')' */ 2718 2719 if (alignment > printed) 2720 alignment -= printed; 2721 else 2722 alignment = 0; 2723 2724 fprintf(trace->output, ")%*s= ", alignment, " "); 2725 2726 if (sc->fmt == NULL) { 2727 if (ret < 0) 2728 goto errno_print; 2729 signed_print: 2730 fprintf(trace->output, "%ld", ret); 2731 } else if (ret < 0) { 2732 errno_print: { 2733 char bf[STRERR_BUFSIZE]; 2734 const char *emsg = str_error_r(-ret, bf, sizeof(bf)), 2735 *e = errno_to_name(evsel, -ret); 2736 2737 fprintf(trace->output, "-1 %s (%s)", e, emsg); 2738 } 2739 } else if (ret == 0 && sc->fmt->timeout) 2740 fprintf(trace->output, "0 (Timeout)"); 2741 else if (ttrace->ret_scnprintf) { 2742 char bf[1024]; 2743 struct syscall_arg arg = { 2744 .val = ret, 2745 .thread = thread, 2746 .trace = trace, 2747 }; 2748 ttrace->ret_scnprintf(bf, sizeof(bf), &arg); 2749 ttrace->ret_scnprintf = NULL; 2750 fprintf(trace->output, "%s", bf); 2751 } else if (sc->fmt->hexret) 2752 fprintf(trace->output, "%#lx", ret); 2753 else if (sc->fmt->errpid) { 2754 struct thread *child = machine__find_thread(trace->host, ret, ret); 2755 2756 if (child != NULL) { 2757 fprintf(trace->output, "%ld", ret); 2758 if (thread__comm_set(child)) 2759 fprintf(trace->output, " (%s)", thread__comm_str(child)); 2760 thread__put(child); 2761 } 2762 } else 2763 goto signed_print; 2764 2765 fputc('\n', trace->output); 2766 2767 /* 2768 * We only consider an 'event' for the sake of --max-events a non-filtered 2769 * sys_enter + sys_exit and other tracepoint events. 2770 */ 2771 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) 2772 interrupted = true; 2773 2774 if (callchain_ret > 0) 2775 trace__fprintf_callchain(trace, sample); 2776 else if (callchain_ret < 0) 2777 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 2778 out: 2779 ttrace->entry_pending = false; 2780 err = 0; 2781 out_put: 2782 thread__put(thread); 2783 return err; 2784 } 2785 2786 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, 2787 union perf_event *event __maybe_unused, 2788 struct perf_sample *sample) 2789 { 2790 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2791 struct thread_trace *ttrace; 2792 size_t filename_len, entry_str_len, to_move; 2793 ssize_t remaining_space; 2794 char *pos; 2795 const char *filename = evsel__rawptr(evsel, sample, "pathname"); 2796 2797 if (!thread) 2798 goto out; 2799 2800 ttrace = thread__priv(thread); 2801 if (!ttrace) 2802 goto out_put; 2803 2804 filename_len = strlen(filename); 2805 if (filename_len == 0) 2806 goto out_put; 2807 2808 if (ttrace->filename.namelen < filename_len) { 2809 char *f = realloc(ttrace->filename.name, filename_len + 1); 2810 2811 if (f == NULL) 2812 goto out_put; 2813 2814 ttrace->filename.namelen = filename_len; 2815 ttrace->filename.name = f; 2816 } 2817 2818 strcpy(ttrace->filename.name, filename); 2819 ttrace->filename.pending_open = true; 2820 2821 if (!ttrace->filename.ptr) 2822 goto out_put; 2823 2824 entry_str_len = strlen(ttrace->entry_str); 2825 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */ 2826 if (remaining_space <= 0) 2827 goto out_put; 2828 2829 if (filename_len > (size_t)remaining_space) { 2830 filename += filename_len - remaining_space; 2831 filename_len = remaining_space; 2832 } 2833 2834 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */ 2835 pos = ttrace->entry_str + ttrace->filename.entry_str_pos; 2836 memmove(pos + filename_len, pos, to_move); 2837 memcpy(pos, filename, filename_len); 2838 2839 ttrace->filename.ptr = 0; 2840 ttrace->filename.entry_str_pos = 0; 2841 out_put: 2842 thread__put(thread); 2843 out: 2844 return 0; 2845 } 2846 2847 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, 2848 union perf_event *event __maybe_unused, 2849 struct perf_sample *sample) 2850 { 2851 u64 runtime = evsel__intval(evsel, sample, "runtime"); 2852 double runtime_ms = (double)runtime / NSEC_PER_MSEC; 2853 struct thread *thread = machine__findnew_thread(trace->host, 2854 sample->pid, 2855 sample->tid); 2856 struct thread_trace *ttrace = thread__trace(thread, trace->output); 2857 2858 if (ttrace == NULL) 2859 goto out_dump; 2860 2861 ttrace->runtime_ms += runtime_ms; 2862 trace->runtime_ms += runtime_ms; 2863 out_put: 2864 thread__put(thread); 2865 return 0; 2866 2867 out_dump: 2868 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", 2869 evsel->name, 2870 evsel__strval(evsel, sample, "comm"), 2871 (pid_t)evsel__intval(evsel, sample, "pid"), 2872 runtime, 2873 evsel__intval(evsel, sample, "vruntime")); 2874 goto out_put; 2875 } 2876 2877 static int bpf_output__printer(enum binary_printer_ops op, 2878 unsigned int val, void *extra __maybe_unused, FILE *fp) 2879 { 2880 unsigned char ch = (unsigned char)val; 2881 2882 switch (op) { 2883 case BINARY_PRINT_CHAR_DATA: 2884 return fprintf(fp, "%c", isprint(ch) ? ch : '.'); 2885 case BINARY_PRINT_DATA_BEGIN: 2886 case BINARY_PRINT_LINE_BEGIN: 2887 case BINARY_PRINT_ADDR: 2888 case BINARY_PRINT_NUM_DATA: 2889 case BINARY_PRINT_NUM_PAD: 2890 case BINARY_PRINT_SEP: 2891 case BINARY_PRINT_CHAR_PAD: 2892 case BINARY_PRINT_LINE_END: 2893 case BINARY_PRINT_DATA_END: 2894 default: 2895 break; 2896 } 2897 2898 return 0; 2899 } 2900 2901 static void bpf_output__fprintf(struct trace *trace, 2902 struct perf_sample *sample) 2903 { 2904 binary__fprintf(sample->raw_data, sample->raw_size, 8, 2905 bpf_output__printer, NULL, trace->output); 2906 ++trace->nr_events_printed; 2907 } 2908 2909 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample, 2910 struct thread *thread, void *augmented_args, int augmented_args_size) 2911 { 2912 char bf[2048]; 2913 size_t size = sizeof(bf); 2914 struct tep_format_field *field = evsel->tp_format->format.fields; 2915 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel); 2916 size_t printed = 0, btf_printed; 2917 unsigned long val; 2918 u8 bit = 1; 2919 struct syscall_arg syscall_arg = { 2920 .augmented = { 2921 .size = augmented_args_size, 2922 .args = augmented_args, 2923 }, 2924 .idx = 0, 2925 .mask = 0, 2926 .trace = trace, 2927 .thread = thread, 2928 .show_string_prefix = trace->show_string_prefix, 2929 }; 2930 2931 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) { 2932 if (syscall_arg.mask & bit) 2933 continue; 2934 2935 syscall_arg.len = 0; 2936 syscall_arg.fmt = arg; 2937 if (field->flags & TEP_FIELD_IS_ARRAY) { 2938 int offset = field->offset; 2939 2940 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2941 offset = format_field__intval(field, sample, evsel->needs_swap); 2942 syscall_arg.len = offset >> 16; 2943 offset &= 0xffff; 2944 if (tep_field_is_relative(field->flags)) 2945 offset += field->offset + field->size; 2946 } 2947 2948 val = (uintptr_t)(sample->raw_data + offset); 2949 } else 2950 val = format_field__intval(field, sample, evsel->needs_swap); 2951 /* 2952 * Some syscall args need some mask, most don't and 2953 * return val untouched. 2954 */ 2955 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val); 2956 2957 /* Suppress this argument if its value is zero and show_zero property isn't set. */ 2958 if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE) 2959 continue; 2960 2961 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2962 2963 if (trace->show_arg_names) 2964 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2965 2966 btf_printed = trace__btf_scnprintf(trace, arg, bf + printed, size - printed, val, field->type); 2967 if (btf_printed) { 2968 printed += btf_printed; 2969 continue; 2970 } 2971 2972 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val); 2973 } 2974 2975 return printed + fprintf(trace->output, "%s", bf); 2976 } 2977 2978 static int trace__event_handler(struct trace *trace, struct evsel *evsel, 2979 union perf_event *event __maybe_unused, 2980 struct perf_sample *sample) 2981 { 2982 struct thread *thread; 2983 int callchain_ret = 0; 2984 /* 2985 * Check if we called perf_evsel__disable(evsel) due to, for instance, 2986 * this event's max_events having been hit and this is an entry coming 2987 * from the ring buffer that we should discard, since the max events 2988 * have already been considered/printed. 2989 */ 2990 if (evsel->disabled) 2991 return 0; 2992 2993 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2994 2995 if (sample->callchain) { 2996 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 2997 2998 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 2999 if (callchain_ret == 0) { 3000 if (cursor->nr < trace->min_stack) 3001 goto out; 3002 callchain_ret = 1; 3003 } 3004 } 3005 3006 trace__printf_interrupted_entry(trace); 3007 trace__fprintf_tstamp(trace, sample->time, trace->output); 3008 3009 if (trace->trace_syscalls && trace->show_duration) 3010 fprintf(trace->output, "( ): "); 3011 3012 if (thread) 3013 trace__fprintf_comm_tid(trace, thread, trace->output); 3014 3015 if (evsel == trace->syscalls.events.bpf_output) { 3016 int id = perf_evsel__sc_tp_uint(evsel, id, sample); 3017 struct syscall *sc = trace__syscall_info(trace, evsel, id); 3018 3019 if (sc) { 3020 fprintf(trace->output, "%s(", sc->name); 3021 trace__fprintf_sys_enter(trace, evsel, sample); 3022 fputc(')', trace->output); 3023 goto newline; 3024 } 3025 3026 /* 3027 * XXX: Not having the associated syscall info or not finding/adding 3028 * the thread should never happen, but if it does... 3029 * fall thru and print it as a bpf_output event. 3030 */ 3031 } 3032 3033 fprintf(trace->output, "%s(", evsel->name); 3034 3035 if (evsel__is_bpf_output(evsel)) { 3036 bpf_output__fprintf(trace, sample); 3037 } else if (evsel->tp_format) { 3038 if (strncmp(evsel->tp_format->name, "sys_enter_", 10) || 3039 trace__fprintf_sys_enter(trace, evsel, sample)) { 3040 if (trace->libtraceevent_print) { 3041 event_format__fprintf(evsel->tp_format, sample->cpu, 3042 sample->raw_data, sample->raw_size, 3043 trace->output); 3044 } else { 3045 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0); 3046 } 3047 } 3048 } 3049 3050 newline: 3051 fprintf(trace->output, ")\n"); 3052 3053 if (callchain_ret > 0) 3054 trace__fprintf_callchain(trace, sample); 3055 else if (callchain_ret < 0) 3056 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3057 3058 ++trace->nr_events_printed; 3059 3060 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { 3061 evsel__disable(evsel); 3062 evsel__close(evsel); 3063 } 3064 out: 3065 thread__put(thread); 3066 return 0; 3067 } 3068 3069 static void print_location(FILE *f, struct perf_sample *sample, 3070 struct addr_location *al, 3071 bool print_dso, bool print_sym) 3072 { 3073 3074 if ((verbose > 0 || print_dso) && al->map) 3075 fprintf(f, "%s@", dso__long_name(map__dso(al->map))); 3076 3077 if ((verbose > 0 || print_sym) && al->sym) 3078 fprintf(f, "%s+0x%" PRIx64, al->sym->name, 3079 al->addr - al->sym->start); 3080 else if (al->map) 3081 fprintf(f, "0x%" PRIx64, al->addr); 3082 else 3083 fprintf(f, "0x%" PRIx64, sample->addr); 3084 } 3085 3086 static int trace__pgfault(struct trace *trace, 3087 struct evsel *evsel, 3088 union perf_event *event __maybe_unused, 3089 struct perf_sample *sample) 3090 { 3091 struct thread *thread; 3092 struct addr_location al; 3093 char map_type = 'd'; 3094 struct thread_trace *ttrace; 3095 int err = -1; 3096 int callchain_ret = 0; 3097 3098 addr_location__init(&al); 3099 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3100 3101 if (sample->callchain) { 3102 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 3103 3104 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 3105 if (callchain_ret == 0) { 3106 if (cursor->nr < trace->min_stack) 3107 goto out_put; 3108 callchain_ret = 1; 3109 } 3110 } 3111 3112 ttrace = thread__trace(thread, trace->output); 3113 if (ttrace == NULL) 3114 goto out_put; 3115 3116 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ) 3117 ttrace->pfmaj++; 3118 else 3119 ttrace->pfmin++; 3120 3121 if (trace->summary_only) 3122 goto out; 3123 3124 thread__find_symbol(thread, sample->cpumode, sample->ip, &al); 3125 3126 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); 3127 3128 fprintf(trace->output, "%sfault [", 3129 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ? 3130 "maj" : "min"); 3131 3132 print_location(trace->output, sample, &al, false, true); 3133 3134 fprintf(trace->output, "] => "); 3135 3136 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 3137 3138 if (!al.map) { 3139 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 3140 3141 if (al.map) 3142 map_type = 'x'; 3143 else 3144 map_type = '?'; 3145 } 3146 3147 print_location(trace->output, sample, &al, true, false); 3148 3149 fprintf(trace->output, " (%c%c)\n", map_type, al.level); 3150 3151 if (callchain_ret > 0) 3152 trace__fprintf_callchain(trace, sample); 3153 else if (callchain_ret < 0) 3154 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3155 3156 ++trace->nr_events_printed; 3157 out: 3158 err = 0; 3159 out_put: 3160 thread__put(thread); 3161 addr_location__exit(&al); 3162 return err; 3163 } 3164 3165 static void trace__set_base_time(struct trace *trace, 3166 struct evsel *evsel, 3167 struct perf_sample *sample) 3168 { 3169 /* 3170 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust 3171 * and don't use sample->time unconditionally, we may end up having 3172 * some other event in the future without PERF_SAMPLE_TIME for good 3173 * reason, i.e. we may not be interested in its timestamps, just in 3174 * it taking place, picking some piece of information when it 3175 * appears in our event stream (vfs_getname comes to mind). 3176 */ 3177 if (trace->base_time == 0 && !trace->full_time && 3178 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 3179 trace->base_time = sample->time; 3180 } 3181 3182 static int trace__process_sample(struct perf_tool *tool, 3183 union perf_event *event, 3184 struct perf_sample *sample, 3185 struct evsel *evsel, 3186 struct machine *machine __maybe_unused) 3187 { 3188 struct trace *trace = container_of(tool, struct trace, tool); 3189 struct thread *thread; 3190 int err = 0; 3191 3192 tracepoint_handler handler = evsel->handler; 3193 3194 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3195 if (thread && thread__is_filtered(thread)) 3196 goto out; 3197 3198 trace__set_base_time(trace, evsel, sample); 3199 3200 if (handler) { 3201 ++trace->nr_events; 3202 handler(trace, evsel, event, sample); 3203 } 3204 out: 3205 thread__put(thread); 3206 return err; 3207 } 3208 3209 static int trace__record(struct trace *trace, int argc, const char **argv) 3210 { 3211 unsigned int rec_argc, i, j; 3212 const char **rec_argv; 3213 const char * const record_args[] = { 3214 "record", 3215 "-R", 3216 "-m", "1024", 3217 "-c", "1", 3218 }; 3219 pid_t pid = getpid(); 3220 char *filter = asprintf__tp_filter_pids(1, &pid); 3221 const char * const sc_args[] = { "-e", }; 3222 unsigned int sc_args_nr = ARRAY_SIZE(sc_args); 3223 const char * const majpf_args[] = { "-e", "major-faults" }; 3224 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args); 3225 const char * const minpf_args[] = { "-e", "minor-faults" }; 3226 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args); 3227 int err = -1; 3228 3229 /* +3 is for the event string below and the pid filter */ 3230 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 + 3231 majpf_args_nr + minpf_args_nr + argc; 3232 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3233 3234 if (rec_argv == NULL || filter == NULL) 3235 goto out_free; 3236 3237 j = 0; 3238 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3239 rec_argv[j++] = record_args[i]; 3240 3241 if (trace->trace_syscalls) { 3242 for (i = 0; i < sc_args_nr; i++) 3243 rec_argv[j++] = sc_args[i]; 3244 3245 /* event string may be different for older kernels - e.g., RHEL6 */ 3246 if (is_valid_tracepoint("raw_syscalls:sys_enter")) 3247 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit"; 3248 else if (is_valid_tracepoint("syscalls:sys_enter")) 3249 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit"; 3250 else { 3251 pr_err("Neither raw_syscalls nor syscalls events exist.\n"); 3252 goto out_free; 3253 } 3254 } 3255 3256 rec_argv[j++] = "--filter"; 3257 rec_argv[j++] = filter; 3258 3259 if (trace->trace_pgfaults & TRACE_PFMAJ) 3260 for (i = 0; i < majpf_args_nr; i++) 3261 rec_argv[j++] = majpf_args[i]; 3262 3263 if (trace->trace_pgfaults & TRACE_PFMIN) 3264 for (i = 0; i < minpf_args_nr; i++) 3265 rec_argv[j++] = minpf_args[i]; 3266 3267 for (i = 0; i < (unsigned int)argc; i++) 3268 rec_argv[j++] = argv[i]; 3269 3270 err = cmd_record(j, rec_argv); 3271 out_free: 3272 free(filter); 3273 free(rec_argv); 3274 return err; 3275 } 3276 3277 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); 3278 3279 static bool evlist__add_vfs_getname(struct evlist *evlist) 3280 { 3281 bool found = false; 3282 struct evsel *evsel, *tmp; 3283 struct parse_events_error err; 3284 int ret; 3285 3286 parse_events_error__init(&err); 3287 ret = parse_events(evlist, "probe:vfs_getname*", &err); 3288 parse_events_error__exit(&err); 3289 if (ret) 3290 return false; 3291 3292 evlist__for_each_entry_safe(evlist, evsel, tmp) { 3293 if (!strstarts(evsel__name(evsel), "probe:vfs_getname")) 3294 continue; 3295 3296 if (evsel__field(evsel, "pathname")) { 3297 evsel->handler = trace__vfs_getname; 3298 found = true; 3299 continue; 3300 } 3301 3302 list_del_init(&evsel->core.node); 3303 evsel->evlist = NULL; 3304 evsel__delete(evsel); 3305 } 3306 3307 return found; 3308 } 3309 3310 static struct evsel *evsel__new_pgfault(u64 config) 3311 { 3312 struct evsel *evsel; 3313 struct perf_event_attr attr = { 3314 .type = PERF_TYPE_SOFTWARE, 3315 .mmap_data = 1, 3316 }; 3317 3318 attr.config = config; 3319 attr.sample_period = 1; 3320 3321 event_attr_init(&attr); 3322 3323 evsel = evsel__new(&attr); 3324 if (evsel) 3325 evsel->handler = trace__pgfault; 3326 3327 return evsel; 3328 } 3329 3330 static void evlist__free_syscall_tp_fields(struct evlist *evlist) 3331 { 3332 struct evsel *evsel; 3333 3334 evlist__for_each_entry(evlist, evsel) { 3335 evsel_trace__delete(evsel->priv); 3336 evsel->priv = NULL; 3337 } 3338 } 3339 3340 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) 3341 { 3342 const u32 type = event->header.type; 3343 struct evsel *evsel; 3344 3345 if (type != PERF_RECORD_SAMPLE) { 3346 trace__process_event(trace, trace->host, event, sample); 3347 return; 3348 } 3349 3350 evsel = evlist__id2evsel(trace->evlist, sample->id); 3351 if (evsel == NULL) { 3352 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); 3353 return; 3354 } 3355 3356 if (evswitch__discard(&trace->evswitch, evsel)) 3357 return; 3358 3359 trace__set_base_time(trace, evsel, sample); 3360 3361 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 3362 sample->raw_data == NULL) { 3363 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", 3364 evsel__name(evsel), sample->tid, 3365 sample->cpu, sample->raw_size); 3366 } else { 3367 tracepoint_handler handler = evsel->handler; 3368 handler(trace, evsel, event, sample); 3369 } 3370 3371 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) 3372 interrupted = true; 3373 } 3374 3375 static int trace__add_syscall_newtp(struct trace *trace) 3376 { 3377 int ret = -1; 3378 struct evlist *evlist = trace->evlist; 3379 struct evsel *sys_enter, *sys_exit; 3380 3381 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter); 3382 if (sys_enter == NULL) 3383 goto out; 3384 3385 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args)) 3386 goto out_delete_sys_enter; 3387 3388 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit); 3389 if (sys_exit == NULL) 3390 goto out_delete_sys_enter; 3391 3392 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret)) 3393 goto out_delete_sys_exit; 3394 3395 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param); 3396 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param); 3397 3398 evlist__add(evlist, sys_enter); 3399 evlist__add(evlist, sys_exit); 3400 3401 if (callchain_param.enabled && !trace->kernel_syscallchains) { 3402 /* 3403 * We're interested only in the user space callchain 3404 * leading to the syscall, allow overriding that for 3405 * debugging reasons using --kernel_syscall_callchains 3406 */ 3407 sys_exit->core.attr.exclude_callchain_kernel = 1; 3408 } 3409 3410 trace->syscalls.events.sys_enter = sys_enter; 3411 trace->syscalls.events.sys_exit = sys_exit; 3412 3413 ret = 0; 3414 out: 3415 return ret; 3416 3417 out_delete_sys_exit: 3418 evsel__delete_priv(sys_exit); 3419 out_delete_sys_enter: 3420 evsel__delete_priv(sys_enter); 3421 goto out; 3422 } 3423 3424 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) 3425 { 3426 int err = -1; 3427 struct evsel *sys_exit; 3428 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier, 3429 trace->ev_qualifier_ids.nr, 3430 trace->ev_qualifier_ids.entries); 3431 3432 if (filter == NULL) 3433 goto out_enomem; 3434 3435 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) { 3436 sys_exit = trace->syscalls.events.sys_exit; 3437 err = evsel__append_tp_filter(sys_exit, filter); 3438 } 3439 3440 free(filter); 3441 out: 3442 return err; 3443 out_enomem: 3444 errno = ENOMEM; 3445 goto out; 3446 } 3447 3448 #ifdef HAVE_BPF_SKEL 3449 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) 3450 { 3451 struct bpf_program *pos, *prog = NULL; 3452 const char *sec_name; 3453 3454 if (trace->skel->obj == NULL) 3455 return NULL; 3456 3457 bpf_object__for_each_program(pos, trace->skel->obj) { 3458 sec_name = bpf_program__section_name(pos); 3459 if (sec_name && !strcmp(sec_name, name)) { 3460 prog = pos; 3461 break; 3462 } 3463 } 3464 3465 return prog; 3466 } 3467 3468 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, 3469 const char *prog_name, const char *type) 3470 { 3471 struct bpf_program *prog; 3472 3473 if (prog_name == NULL) { 3474 char default_prog_name[256]; 3475 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name); 3476 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3477 if (prog != NULL) 3478 goto out_found; 3479 if (sc->fmt && sc->fmt->alias) { 3480 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias); 3481 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3482 if (prog != NULL) 3483 goto out_found; 3484 } 3485 goto out_unaugmented; 3486 } 3487 3488 prog = trace__find_bpf_program_by_title(trace, prog_name); 3489 3490 if (prog != NULL) { 3491 out_found: 3492 return prog; 3493 } 3494 3495 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n", 3496 prog_name, type, sc->name); 3497 out_unaugmented: 3498 return trace->skel->progs.syscall_unaugmented; 3499 } 3500 3501 static void trace__init_syscall_bpf_progs(struct trace *trace, int id) 3502 { 3503 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3504 3505 if (sc == NULL) 3506 return; 3507 3508 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3509 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit"); 3510 } 3511 3512 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) 3513 { 3514 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3515 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3516 } 3517 3518 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) 3519 { 3520 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3521 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3522 } 3523 3524 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc) 3525 { 3526 struct tep_format_field *field, *candidate_field; 3527 /* 3528 * We're only interested in syscalls that have a pointer: 3529 */ 3530 for (field = sc->args; field; field = field->next) { 3531 if (field->flags & TEP_FIELD_IS_POINTER) 3532 goto try_to_find_pair; 3533 } 3534 3535 return NULL; 3536 3537 try_to_find_pair: 3538 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { 3539 int id = syscalltbl__id_at_idx(trace->sctbl, i); 3540 struct syscall *pair = trace__syscall_info(trace, NULL, id); 3541 struct bpf_program *pair_prog; 3542 bool is_candidate = false; 3543 3544 if (pair == NULL || pair == sc || 3545 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented) 3546 continue; 3547 3548 for (field = sc->args, candidate_field = pair->args; 3549 field && candidate_field; field = field->next, candidate_field = candidate_field->next) { 3550 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER, 3551 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER; 3552 3553 if (is_pointer) { 3554 if (!candidate_is_pointer) { 3555 // The candidate just doesn't copies our pointer arg, might copy other pointers we want. 3556 continue; 3557 } 3558 } else { 3559 if (candidate_is_pointer) { 3560 // The candidate might copy a pointer we don't have, skip it. 3561 goto next_candidate; 3562 } 3563 continue; 3564 } 3565 3566 if (strcmp(field->type, candidate_field->type)) 3567 goto next_candidate; 3568 3569 /* 3570 * This is limited in the BPF program but sys_write 3571 * uses "const char *" for its "buf" arg so we need to 3572 * use some heuristic that is kinda future proof... 3573 */ 3574 if (strcmp(field->type, "const char *") == 0 && 3575 !(strstr(field->name, "name") || 3576 strstr(field->name, "path") || 3577 strstr(field->name, "file") || 3578 strstr(field->name, "root") || 3579 strstr(field->name, "description"))) 3580 goto next_candidate; 3581 3582 is_candidate = true; 3583 } 3584 3585 if (!is_candidate) 3586 goto next_candidate; 3587 3588 /* 3589 * Check if the tentative pair syscall augmenter has more pointers, if it has, 3590 * then it may be collecting that and we then can't use it, as it would collect 3591 * more than what is common to the two syscalls. 3592 */ 3593 if (candidate_field) { 3594 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next) 3595 if (candidate_field->flags & TEP_FIELD_IS_POINTER) 3596 goto next_candidate; 3597 } 3598 3599 pair_prog = pair->bpf_prog.sys_enter; 3600 /* 3601 * If the pair isn't enabled, then its bpf_prog.sys_enter will not 3602 * have been searched for, so search it here and if it returns the 3603 * unaugmented one, then ignore it, otherwise we'll reuse that BPF 3604 * program for a filtered syscall on a non-filtered one. 3605 * 3606 * For instance, we have "!syscalls:sys_enter_renameat" and that is 3607 * useful for "renameat2". 3608 */ 3609 if (pair_prog == NULL) { 3610 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3611 if (pair_prog == trace->skel->progs.syscall_unaugmented) 3612 goto next_candidate; 3613 } 3614 3615 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name); 3616 return pair_prog; 3617 next_candidate: 3618 continue; 3619 } 3620 3621 return NULL; 3622 } 3623 3624 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) 3625 { 3626 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter); 3627 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit); 3628 int err = 0; 3629 3630 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { 3631 int prog_fd, key = syscalltbl__id_at_idx(trace->sctbl, i); 3632 3633 if (!trace__syscall_enabled(trace, key)) 3634 continue; 3635 3636 trace__init_syscall_bpf_progs(trace, key); 3637 3638 // It'll get at least the "!raw_syscalls:unaugmented" 3639 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key); 3640 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3641 if (err) 3642 break; 3643 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key); 3644 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY); 3645 if (err) 3646 break; 3647 } 3648 3649 /* 3650 * Now lets do a second pass looking for enabled syscalls without 3651 * an augmenter that have a signature that is a superset of another 3652 * syscall with an augmenter so that we can auto-reuse it. 3653 * 3654 * I.e. if we have an augmenter for the "open" syscall that has 3655 * this signature: 3656 * 3657 * int open(const char *pathname, int flags, mode_t mode); 3658 * 3659 * I.e. that will collect just the first string argument, then we 3660 * can reuse it for the 'creat' syscall, that has this signature: 3661 * 3662 * int creat(const char *pathname, mode_t mode); 3663 * 3664 * and for: 3665 * 3666 * int stat(const char *pathname, struct stat *statbuf); 3667 * int lstat(const char *pathname, struct stat *statbuf); 3668 * 3669 * Because the 'open' augmenter will collect the first arg as a string, 3670 * and leave alone all the other args, which already helps with 3671 * beautifying 'stat' and 'lstat''s pathname arg. 3672 * 3673 * Then, in time, when 'stat' gets an augmenter that collects both 3674 * first and second arg (this one on the raw_syscalls:sys_exit prog 3675 * array tail call, then that one will be used. 3676 */ 3677 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { 3678 int key = syscalltbl__id_at_idx(trace->sctbl, i); 3679 struct syscall *sc = trace__syscall_info(trace, NULL, key); 3680 struct bpf_program *pair_prog; 3681 int prog_fd; 3682 3683 if (sc == NULL || sc->bpf_prog.sys_enter == NULL) 3684 continue; 3685 3686 /* 3687 * For now we're just reusing the sys_enter prog, and if it 3688 * already has an augmenter, we don't need to find one. 3689 */ 3690 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented) 3691 continue; 3692 3693 /* 3694 * Look at all the other syscalls for one that has a signature 3695 * that is close enough that we can share: 3696 */ 3697 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc); 3698 if (pair_prog == NULL) 3699 continue; 3700 3701 sc->bpf_prog.sys_enter = pair_prog; 3702 3703 /* 3704 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter 3705 * with the fd for the program we're reusing: 3706 */ 3707 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter); 3708 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3709 if (err) 3710 break; 3711 } 3712 3713 return err; 3714 } 3715 #endif // HAVE_BPF_SKEL 3716 3717 static int trace__set_ev_qualifier_filter(struct trace *trace) 3718 { 3719 if (trace->syscalls.events.sys_enter) 3720 return trace__set_ev_qualifier_tp_filter(trace); 3721 return 0; 3722 } 3723 3724 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, 3725 size_t npids __maybe_unused, pid_t *pids __maybe_unused) 3726 { 3727 int err = 0; 3728 #ifdef HAVE_LIBBPF_SUPPORT 3729 bool value = true; 3730 int map_fd = bpf_map__fd(map); 3731 size_t i; 3732 3733 for (i = 0; i < npids; ++i) { 3734 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY); 3735 if (err) 3736 break; 3737 } 3738 #endif 3739 return err; 3740 } 3741 3742 static int trace__set_filter_loop_pids(struct trace *trace) 3743 { 3744 unsigned int nr = 1, err; 3745 pid_t pids[32] = { 3746 getpid(), 3747 }; 3748 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]); 3749 3750 while (thread && nr < ARRAY_SIZE(pids)) { 3751 struct thread *parent = machine__find_thread(trace->host, 3752 thread__ppid(thread), 3753 thread__ppid(thread)); 3754 3755 if (parent == NULL) 3756 break; 3757 3758 if (!strcmp(thread__comm_str(parent), "sshd") || 3759 strstarts(thread__comm_str(parent), "gnome-terminal")) { 3760 pids[nr++] = thread__tid(parent); 3761 break; 3762 } 3763 thread = parent; 3764 } 3765 3766 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids); 3767 if (!err && trace->filter_pids.map) 3768 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); 3769 3770 return err; 3771 } 3772 3773 static int trace__set_filter_pids(struct trace *trace) 3774 { 3775 int err = 0; 3776 /* 3777 * Better not use !target__has_task() here because we need to cover the 3778 * case where no threads were specified in the command line, but a 3779 * workload was, and in that case we will fill in the thread_map when 3780 * we fork the workload in evlist__prepare_workload. 3781 */ 3782 if (trace->filter_pids.nr > 0) { 3783 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr, 3784 trace->filter_pids.entries); 3785 if (!err && trace->filter_pids.map) { 3786 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, 3787 trace->filter_pids.entries); 3788 } 3789 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { 3790 err = trace__set_filter_loop_pids(trace); 3791 } 3792 3793 return err; 3794 } 3795 3796 static int __trace__deliver_event(struct trace *trace, union perf_event *event) 3797 { 3798 struct evlist *evlist = trace->evlist; 3799 struct perf_sample sample; 3800 int err = evlist__parse_sample(evlist, event, &sample); 3801 3802 if (err) 3803 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); 3804 else 3805 trace__handle_event(trace, event, &sample); 3806 3807 return 0; 3808 } 3809 3810 static int __trace__flush_events(struct trace *trace) 3811 { 3812 u64 first = ordered_events__first_time(&trace->oe.data); 3813 u64 flush = trace->oe.last - NSEC_PER_SEC; 3814 3815 /* Is there some thing to flush.. */ 3816 if (first && first < flush) 3817 return ordered_events__flush_time(&trace->oe.data, flush); 3818 3819 return 0; 3820 } 3821 3822 static int trace__flush_events(struct trace *trace) 3823 { 3824 return !trace->sort_events ? 0 : __trace__flush_events(trace); 3825 } 3826 3827 static int trace__deliver_event(struct trace *trace, union perf_event *event) 3828 { 3829 int err; 3830 3831 if (!trace->sort_events) 3832 return __trace__deliver_event(trace, event); 3833 3834 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); 3835 if (err && err != -1) 3836 return err; 3837 3838 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL); 3839 if (err) 3840 return err; 3841 3842 return trace__flush_events(trace); 3843 } 3844 3845 static int ordered_events__deliver_event(struct ordered_events *oe, 3846 struct ordered_event *event) 3847 { 3848 struct trace *trace = container_of(oe, struct trace, oe.data); 3849 3850 return __trace__deliver_event(trace, event->event); 3851 } 3852 3853 static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg, 3854 char **type) 3855 { 3856 struct tep_format_field *field; 3857 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel); 3858 3859 if (evsel->tp_format == NULL || fmt == NULL) 3860 return NULL; 3861 3862 for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt) 3863 if (strcmp(field->name, arg) == 0) { 3864 *type = field->type; 3865 return fmt; 3866 } 3867 3868 return NULL; 3869 } 3870 3871 static int trace__expand_filter(struct trace *trace, struct evsel *evsel) 3872 { 3873 char *tok, *left = evsel->filter, *new_filter = evsel->filter; 3874 3875 while ((tok = strpbrk(left, "=<>!")) != NULL) { 3876 char *right = tok + 1, *right_end; 3877 3878 if (*right == '=') 3879 ++right; 3880 3881 while (isspace(*right)) 3882 ++right; 3883 3884 if (*right == '\0') 3885 break; 3886 3887 while (!isalpha(*left)) 3888 if (++left == tok) { 3889 /* 3890 * Bail out, can't find the name of the argument that is being 3891 * used in the filter, let it try to set this filter, will fail later. 3892 */ 3893 return 0; 3894 } 3895 3896 right_end = right + 1; 3897 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|') 3898 ++right_end; 3899 3900 if (isalpha(*right)) { 3901 struct syscall_arg_fmt *fmt; 3902 int left_size = tok - left, 3903 right_size = right_end - right; 3904 char arg[128], *type; 3905 3906 while (isspace(left[left_size - 1])) 3907 --left_size; 3908 3909 scnprintf(arg, sizeof(arg), "%.*s", left_size, left); 3910 3911 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg, &type); 3912 if (fmt == NULL) { 3913 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n", 3914 arg, evsel->name, evsel->filter); 3915 return -1; 3916 } 3917 3918 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ", 3919 arg, (int)(right - tok), tok, right_size, right); 3920 3921 if (fmt->strtoul) { 3922 u64 val; 3923 struct syscall_arg syscall_arg = { 3924 .trace = trace, 3925 .fmt = fmt, 3926 .type_name = type, 3927 .parm = fmt->parm, 3928 }; 3929 3930 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) { 3931 char *n, expansion[19]; 3932 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val); 3933 int expansion_offset = right - new_filter; 3934 3935 pr_debug("%s", expansion); 3936 3937 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) { 3938 pr_debug(" out of memory!\n"); 3939 free(new_filter); 3940 return -1; 3941 } 3942 if (new_filter != evsel->filter) 3943 free(new_filter); 3944 left = n + expansion_offset + expansion_lenght; 3945 new_filter = n; 3946 } else { 3947 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n", 3948 right_size, right, arg, evsel->name, evsel->filter); 3949 return -1; 3950 } 3951 } else { 3952 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n", 3953 arg, evsel->name, evsel->filter); 3954 return -1; 3955 } 3956 3957 pr_debug("\n"); 3958 } else { 3959 left = right_end; 3960 } 3961 } 3962 3963 if (new_filter != evsel->filter) { 3964 pr_debug("New filter for %s: %s\n", evsel->name, new_filter); 3965 evsel__set_filter(evsel, new_filter); 3966 free(new_filter); 3967 } 3968 3969 return 0; 3970 } 3971 3972 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) 3973 { 3974 struct evlist *evlist = trace->evlist; 3975 struct evsel *evsel; 3976 3977 evlist__for_each_entry(evlist, evsel) { 3978 if (evsel->filter == NULL) 3979 continue; 3980 3981 if (trace__expand_filter(trace, evsel)) { 3982 *err_evsel = evsel; 3983 return -1; 3984 } 3985 } 3986 3987 return 0; 3988 } 3989 3990 static int trace__run(struct trace *trace, int argc, const char **argv) 3991 { 3992 struct evlist *evlist = trace->evlist; 3993 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL; 3994 int err = -1, i; 3995 unsigned long before; 3996 const bool forks = argc > 0; 3997 bool draining = false; 3998 3999 trace->live = true; 4000 4001 if (!trace->raw_augmented_syscalls) { 4002 if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) 4003 goto out_error_raw_syscalls; 4004 4005 if (trace->trace_syscalls) 4006 trace->vfs_getname = evlist__add_vfs_getname(evlist); 4007 } 4008 4009 if ((trace->trace_pgfaults & TRACE_PFMAJ)) { 4010 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ); 4011 if (pgfault_maj == NULL) 4012 goto out_error_mem; 4013 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param); 4014 evlist__add(evlist, pgfault_maj); 4015 } 4016 4017 if ((trace->trace_pgfaults & TRACE_PFMIN)) { 4018 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN); 4019 if (pgfault_min == NULL) 4020 goto out_error_mem; 4021 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param); 4022 evlist__add(evlist, pgfault_min); 4023 } 4024 4025 /* Enable ignoring missing threads when -u/-p option is defined. */ 4026 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid; 4027 4028 if (trace->sched && 4029 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime)) 4030 goto out_error_sched_stat_runtime; 4031 /* 4032 * If a global cgroup was set, apply it to all the events without an 4033 * explicit cgroup. I.e.: 4034 * 4035 * trace -G A -e sched:*switch 4036 * 4037 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc 4038 * _and_ sched:sched_switch to the 'A' cgroup, while: 4039 * 4040 * trace -e sched:*switch -G A 4041 * 4042 * will only set the sched:sched_switch event to the 'A' cgroup, all the 4043 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without" 4044 * a cgroup (on the root cgroup, sys wide, etc). 4045 * 4046 * Multiple cgroups: 4047 * 4048 * trace -G A -e sched:*switch -G B 4049 * 4050 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes 4051 * to the 'B' cgroup. 4052 * 4053 * evlist__set_default_cgroup() grabs a reference of the passed cgroup 4054 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL. 4055 */ 4056 if (trace->cgroup) 4057 evlist__set_default_cgroup(trace->evlist, trace->cgroup); 4058 4059 err = evlist__create_maps(evlist, &trace->opts.target); 4060 if (err < 0) { 4061 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); 4062 goto out_delete_evlist; 4063 } 4064 4065 err = trace__symbols_init(trace, evlist); 4066 if (err < 0) { 4067 fprintf(trace->output, "Problems initializing symbol libraries!\n"); 4068 goto out_delete_evlist; 4069 } 4070 4071 evlist__config(evlist, &trace->opts, &callchain_param); 4072 4073 if (forks) { 4074 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL); 4075 if (err < 0) { 4076 fprintf(trace->output, "Couldn't run the workload!\n"); 4077 goto out_delete_evlist; 4078 } 4079 workload_pid = evlist->workload.pid; 4080 } 4081 4082 err = evlist__open(evlist); 4083 if (err < 0) 4084 goto out_error_open; 4085 #ifdef HAVE_BPF_SKEL 4086 if (trace->syscalls.events.bpf_output) { 4087 struct perf_cpu cpu; 4088 4089 /* 4090 * Set up the __augmented_syscalls__ BPF map to hold for each 4091 * CPU the bpf-output event's file descriptor. 4092 */ 4093 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) { 4094 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__, 4095 &cpu.cpu, sizeof(int), 4096 xyarray__entry(trace->syscalls.events.bpf_output->core.fd, 4097 cpu.cpu, 0), 4098 sizeof(__u32), BPF_ANY); 4099 } 4100 } 4101 #endif 4102 err = trace__set_filter_pids(trace); 4103 if (err < 0) 4104 goto out_error_mem; 4105 4106 #ifdef HAVE_BPF_SKEL 4107 if (trace->skel && trace->skel->progs.sys_enter) 4108 trace__init_syscalls_bpf_prog_array_maps(trace); 4109 #endif 4110 4111 if (trace->ev_qualifier_ids.nr > 0) { 4112 err = trace__set_ev_qualifier_filter(trace); 4113 if (err < 0) 4114 goto out_errno; 4115 4116 if (trace->syscalls.events.sys_exit) { 4117 pr_debug("event qualifier tracepoint filter: %s\n", 4118 trace->syscalls.events.sys_exit->filter); 4119 } 4120 } 4121 4122 /* 4123 * If the "close" syscall is not traced, then we will not have the 4124 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the 4125 * fd->pathname table and were ending up showing the last value set by 4126 * syscalls opening a pathname and associating it with a descriptor or 4127 * reading it from /proc/pid/fd/ in cases where that doesn't make 4128 * sense. 4129 * 4130 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is 4131 * not in use. 4132 */ 4133 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close")); 4134 4135 err = trace__expand_filters(trace, &evsel); 4136 if (err) 4137 goto out_delete_evlist; 4138 err = evlist__apply_filters(evlist, &evsel); 4139 if (err < 0) 4140 goto out_error_apply_filters; 4141 4142 err = evlist__mmap(evlist, trace->opts.mmap_pages); 4143 if (err < 0) 4144 goto out_error_mmap; 4145 4146 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay) 4147 evlist__enable(evlist); 4148 4149 if (forks) 4150 evlist__start_workload(evlist); 4151 4152 if (trace->opts.target.initial_delay) { 4153 usleep(trace->opts.target.initial_delay * 1000); 4154 evlist__enable(evlist); 4155 } 4156 4157 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || 4158 perf_thread_map__nr(evlist->core.threads) > 1 || 4159 evlist__first(evlist)->core.attr.inherit; 4160 4161 /* 4162 * Now that we already used evsel->core.attr to ask the kernel to setup the 4163 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in 4164 * trace__resolve_callchain(), allowing per-event max-stack settings 4165 * to override an explicitly set --max-stack global setting. 4166 */ 4167 evlist__for_each_entry(evlist, evsel) { 4168 if (evsel__has_callchain(evsel) && 4169 evsel->core.attr.sample_max_stack == 0) 4170 evsel->core.attr.sample_max_stack = trace->max_stack; 4171 } 4172 again: 4173 before = trace->nr_events; 4174 4175 for (i = 0; i < evlist->core.nr_mmaps; i++) { 4176 union perf_event *event; 4177 struct mmap *md; 4178 4179 md = &evlist->mmap[i]; 4180 if (perf_mmap__read_init(&md->core) < 0) 4181 continue; 4182 4183 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 4184 ++trace->nr_events; 4185 4186 err = trace__deliver_event(trace, event); 4187 if (err) 4188 goto out_disable; 4189 4190 perf_mmap__consume(&md->core); 4191 4192 if (interrupted) 4193 goto out_disable; 4194 4195 if (done && !draining) { 4196 evlist__disable(evlist); 4197 draining = true; 4198 } 4199 } 4200 perf_mmap__read_done(&md->core); 4201 } 4202 4203 if (trace->nr_events == before) { 4204 int timeout = done ? 100 : -1; 4205 4206 if (!draining && evlist__poll(evlist, timeout) > 0) { 4207 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) 4208 draining = true; 4209 4210 goto again; 4211 } else { 4212 if (trace__flush_events(trace)) 4213 goto out_disable; 4214 } 4215 } else { 4216 goto again; 4217 } 4218 4219 out_disable: 4220 thread__zput(trace->current); 4221 4222 evlist__disable(evlist); 4223 4224 if (trace->sort_events) 4225 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL); 4226 4227 if (!err) { 4228 if (trace->summary) 4229 trace__fprintf_thread_summary(trace, trace->output); 4230 4231 if (trace->show_tool_stats) { 4232 fprintf(trace->output, "Stats:\n " 4233 " vfs_getname : %" PRIu64 "\n" 4234 " proc_getname: %" PRIu64 "\n", 4235 trace->stats.vfs_getname, 4236 trace->stats.proc_getname); 4237 } 4238 } 4239 4240 out_delete_evlist: 4241 trace__symbols__exit(trace); 4242 evlist__free_syscall_tp_fields(evlist); 4243 evlist__delete(evlist); 4244 cgroup__put(trace->cgroup); 4245 trace->evlist = NULL; 4246 trace->live = false; 4247 return err; 4248 { 4249 char errbuf[BUFSIZ]; 4250 4251 out_error_sched_stat_runtime: 4252 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime"); 4253 goto out_error; 4254 4255 out_error_raw_syscalls: 4256 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)"); 4257 goto out_error; 4258 4259 out_error_mmap: 4260 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf)); 4261 goto out_error; 4262 4263 out_error_open: 4264 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); 4265 4266 out_error: 4267 fprintf(trace->output, "%s\n", errbuf); 4268 goto out_delete_evlist; 4269 4270 out_error_apply_filters: 4271 fprintf(trace->output, 4272 "Failed to set filter \"%s\" on event %s with %d (%s)\n", 4273 evsel->filter, evsel__name(evsel), errno, 4274 str_error_r(errno, errbuf, sizeof(errbuf))); 4275 goto out_delete_evlist; 4276 } 4277 out_error_mem: 4278 fprintf(trace->output, "Not enough memory to run!\n"); 4279 goto out_delete_evlist; 4280 4281 out_errno: 4282 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno)); 4283 goto out_delete_evlist; 4284 } 4285 4286 static int trace__replay(struct trace *trace) 4287 { 4288 const struct evsel_str_handler handlers[] = { 4289 { "probe:vfs_getname", trace__vfs_getname, }, 4290 }; 4291 struct perf_data data = { 4292 .path = input_name, 4293 .mode = PERF_DATA_MODE_READ, 4294 .force = trace->force, 4295 }; 4296 struct perf_session *session; 4297 struct evsel *evsel; 4298 int err = -1; 4299 4300 trace->tool.sample = trace__process_sample; 4301 trace->tool.mmap = perf_event__process_mmap; 4302 trace->tool.mmap2 = perf_event__process_mmap2; 4303 trace->tool.comm = perf_event__process_comm; 4304 trace->tool.exit = perf_event__process_exit; 4305 trace->tool.fork = perf_event__process_fork; 4306 trace->tool.attr = perf_event__process_attr; 4307 trace->tool.tracing_data = perf_event__process_tracing_data; 4308 trace->tool.build_id = perf_event__process_build_id; 4309 trace->tool.namespaces = perf_event__process_namespaces; 4310 4311 trace->tool.ordered_events = true; 4312 trace->tool.ordering_requires_timestamps = true; 4313 4314 /* add tid to output */ 4315 trace->multiple_threads = true; 4316 4317 session = perf_session__new(&data, &trace->tool); 4318 if (IS_ERR(session)) 4319 return PTR_ERR(session); 4320 4321 if (trace->opts.target.pid) 4322 symbol_conf.pid_list_str = strdup(trace->opts.target.pid); 4323 4324 if (trace->opts.target.tid) 4325 symbol_conf.tid_list_str = strdup(trace->opts.target.tid); 4326 4327 if (symbol__init(&session->header.env) < 0) 4328 goto out; 4329 4330 trace->host = &session->machines.host; 4331 4332 err = perf_session__set_tracepoints_handlers(session, handlers); 4333 if (err) 4334 goto out; 4335 4336 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter"); 4337 trace->syscalls.events.sys_enter = evsel; 4338 /* older kernels have syscalls tp versus raw_syscalls */ 4339 if (evsel == NULL) 4340 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter"); 4341 4342 if (evsel && 4343 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 || 4344 perf_evsel__init_sc_tp_ptr_field(evsel, args))) { 4345 pr_err("Error during initialize raw_syscalls:sys_enter event\n"); 4346 goto out; 4347 } 4348 4349 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit"); 4350 trace->syscalls.events.sys_exit = evsel; 4351 if (evsel == NULL) 4352 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit"); 4353 if (evsel && 4354 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 || 4355 perf_evsel__init_sc_tp_uint_field(evsel, ret))) { 4356 pr_err("Error during initialize raw_syscalls:sys_exit event\n"); 4357 goto out; 4358 } 4359 4360 evlist__for_each_entry(session->evlist, evsel) { 4361 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && 4362 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ || 4363 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 4364 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS)) 4365 evsel->handler = trace__pgfault; 4366 } 4367 4368 setup_pager(); 4369 4370 err = perf_session__process_events(session); 4371 if (err) 4372 pr_err("Failed to process events, error %d", err); 4373 4374 else if (trace->summary) 4375 trace__fprintf_thread_summary(trace, trace->output); 4376 4377 out: 4378 perf_session__delete(session); 4379 4380 return err; 4381 } 4382 4383 static size_t trace__fprintf_threads_header(FILE *fp) 4384 { 4385 size_t printed; 4386 4387 printed = fprintf(fp, "\n Summary of events:\n\n"); 4388 4389 return printed; 4390 } 4391 4392 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs, 4393 struct syscall_stats *stats; 4394 double msecs; 4395 int syscall; 4396 ) 4397 { 4398 struct int_node *source = rb_entry(nd, struct int_node, rb_node); 4399 struct syscall_stats *stats = source->priv; 4400 4401 entry->syscall = source->i; 4402 entry->stats = stats; 4403 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0; 4404 } 4405 4406 static size_t thread__dump_stats(struct thread_trace *ttrace, 4407 struct trace *trace, FILE *fp) 4408 { 4409 size_t printed = 0; 4410 struct syscall *sc; 4411 struct rb_node *nd; 4412 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats); 4413 4414 if (syscall_stats == NULL) 4415 return 0; 4416 4417 printed += fprintf(fp, "\n"); 4418 4419 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n"); 4420 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n"); 4421 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n"); 4422 4423 resort_rb__for_each_entry(nd, syscall_stats) { 4424 struct syscall_stats *stats = syscall_stats_entry->stats; 4425 if (stats) { 4426 double min = (double)(stats->stats.min) / NSEC_PER_MSEC; 4427 double max = (double)(stats->stats.max) / NSEC_PER_MSEC; 4428 double avg = avg_stats(&stats->stats); 4429 double pct; 4430 u64 n = (u64)stats->stats.n; 4431 4432 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0; 4433 avg /= NSEC_PER_MSEC; 4434 4435 sc = &trace->syscalls.table[syscall_stats_entry->syscall]; 4436 printed += fprintf(fp, " %-15s", sc->name); 4437 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f", 4438 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg); 4439 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct); 4440 4441 if (trace->errno_summary && stats->nr_failures) { 4442 int e; 4443 4444 for (e = 0; e < stats->max_errno; ++e) { 4445 if (stats->errnos[e] != 0) 4446 fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]); 4447 } 4448 } 4449 } 4450 } 4451 4452 resort_rb__delete(syscall_stats); 4453 printed += fprintf(fp, "\n\n"); 4454 4455 return printed; 4456 } 4457 4458 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) 4459 { 4460 size_t printed = 0; 4461 struct thread_trace *ttrace = thread__priv(thread); 4462 double ratio; 4463 4464 if (ttrace == NULL) 4465 return 0; 4466 4467 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; 4468 4469 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread)); 4470 printed += fprintf(fp, "%lu events, ", ttrace->nr_events); 4471 printed += fprintf(fp, "%.1f%%", ratio); 4472 if (ttrace->pfmaj) 4473 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj); 4474 if (ttrace->pfmin) 4475 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin); 4476 if (trace->sched) 4477 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); 4478 else if (fputc('\n', fp) != EOF) 4479 ++printed; 4480 4481 printed += thread__dump_stats(ttrace, trace, fp); 4482 4483 return printed; 4484 } 4485 4486 static unsigned long thread__nr_events(struct thread_trace *ttrace) 4487 { 4488 return ttrace ? ttrace->nr_events : 0; 4489 } 4490 4491 static int trace_nr_events_cmp(void *priv __maybe_unused, 4492 const struct list_head *la, 4493 const struct list_head *lb) 4494 { 4495 struct thread_list *a = list_entry(la, struct thread_list, list); 4496 struct thread_list *b = list_entry(lb, struct thread_list, list); 4497 unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread)); 4498 unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread)); 4499 4500 if (a_nr_events != b_nr_events) 4501 return a_nr_events < b_nr_events ? -1 : 1; 4502 4503 /* Identical number of threads, place smaller tids first. */ 4504 return thread__tid(a->thread) < thread__tid(b->thread) 4505 ? -1 4506 : (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0); 4507 } 4508 4509 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) 4510 { 4511 size_t printed = trace__fprintf_threads_header(fp); 4512 LIST_HEAD(threads); 4513 4514 if (machine__thread_list(trace->host, &threads) == 0) { 4515 struct thread_list *pos; 4516 4517 list_sort(NULL, &threads, trace_nr_events_cmp); 4518 4519 list_for_each_entry(pos, &threads, list) 4520 printed += trace__fprintf_thread(fp, pos->thread, trace); 4521 } 4522 thread_list__delete(&threads); 4523 return printed; 4524 } 4525 4526 static int trace__set_duration(const struct option *opt, const char *str, 4527 int unset __maybe_unused) 4528 { 4529 struct trace *trace = opt->value; 4530 4531 trace->duration_filter = atof(str); 4532 return 0; 4533 } 4534 4535 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str, 4536 int unset __maybe_unused) 4537 { 4538 int ret = -1; 4539 size_t i; 4540 struct trace *trace = opt->value; 4541 /* 4542 * FIXME: introduce a intarray class, plain parse csv and create a 4543 * { int nr, int entries[] } struct... 4544 */ 4545 struct intlist *list = intlist__new(str); 4546 4547 if (list == NULL) 4548 return -1; 4549 4550 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1; 4551 trace->filter_pids.entries = calloc(i, sizeof(pid_t)); 4552 4553 if (trace->filter_pids.entries == NULL) 4554 goto out; 4555 4556 trace->filter_pids.entries[0] = getpid(); 4557 4558 for (i = 1; i < trace->filter_pids.nr; ++i) 4559 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; 4560 4561 intlist__delete(list); 4562 ret = 0; 4563 out: 4564 return ret; 4565 } 4566 4567 static int trace__open_output(struct trace *trace, const char *filename) 4568 { 4569 struct stat st; 4570 4571 if (!stat(filename, &st) && st.st_size) { 4572 char oldname[PATH_MAX]; 4573 4574 scnprintf(oldname, sizeof(oldname), "%s.old", filename); 4575 unlink(oldname); 4576 rename(filename, oldname); 4577 } 4578 4579 trace->output = fopen(filename, "w"); 4580 4581 return trace->output == NULL ? -errno : 0; 4582 } 4583 4584 static int parse_pagefaults(const struct option *opt, const char *str, 4585 int unset __maybe_unused) 4586 { 4587 int *trace_pgfaults = opt->value; 4588 4589 if (strcmp(str, "all") == 0) 4590 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN; 4591 else if (strcmp(str, "maj") == 0) 4592 *trace_pgfaults |= TRACE_PFMAJ; 4593 else if (strcmp(str, "min") == 0) 4594 *trace_pgfaults |= TRACE_PFMIN; 4595 else 4596 return -1; 4597 4598 return 0; 4599 } 4600 4601 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler) 4602 { 4603 struct evsel *evsel; 4604 4605 evlist__for_each_entry(evlist, evsel) { 4606 if (evsel->handler == NULL) 4607 evsel->handler = handler; 4608 } 4609 } 4610 4611 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name) 4612 { 4613 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 4614 4615 if (fmt) { 4616 const struct syscall_fmt *scfmt = syscall_fmt__find(name); 4617 4618 if (scfmt) { 4619 int skip = 0; 4620 4621 if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 || 4622 strcmp(evsel->tp_format->format.fields->name, "nr") == 0) 4623 ++skip; 4624 4625 memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt)); 4626 } 4627 } 4628 } 4629 4630 static int evlist__set_syscall_tp_fields(struct evlist *evlist, bool *use_btf) 4631 { 4632 struct evsel *evsel; 4633 4634 evlist__for_each_entry(evlist, evsel) { 4635 if (evsel->priv || !evsel->tp_format) 4636 continue; 4637 4638 if (strcmp(evsel->tp_format->system, "syscalls")) { 4639 evsel__init_tp_arg_scnprintf(evsel, use_btf); 4640 continue; 4641 } 4642 4643 if (evsel__init_syscall_tp(evsel)) 4644 return -1; 4645 4646 if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) { 4647 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 4648 4649 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64))) 4650 return -1; 4651 4652 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1); 4653 } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) { 4654 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 4655 4656 if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap)) 4657 return -1; 4658 4659 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1); 4660 } 4661 } 4662 4663 return 0; 4664 } 4665 4666 /* 4667 * XXX: Hackish, just splitting the combined -e+--event (syscalls 4668 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use 4669 * existing facilities unchanged (trace->ev_qualifier + parse_options()). 4670 * 4671 * It'd be better to introduce a parse_options() variant that would return a 4672 * list with the terms it didn't match to an event... 4673 */ 4674 static int trace__parse_events_option(const struct option *opt, const char *str, 4675 int unset __maybe_unused) 4676 { 4677 struct trace *trace = (struct trace *)opt->value; 4678 const char *s = str; 4679 char *sep = NULL, *lists[2] = { NULL, NULL, }; 4680 int len = strlen(str) + 1, err = -1, list, idx; 4681 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR); 4682 char group_name[PATH_MAX]; 4683 const struct syscall_fmt *fmt; 4684 4685 if (strace_groups_dir == NULL) 4686 return -1; 4687 4688 if (*s == '!') { 4689 ++s; 4690 trace->not_ev_qualifier = true; 4691 } 4692 4693 while (1) { 4694 if ((sep = strchr(s, ',')) != NULL) 4695 *sep = '\0'; 4696 4697 list = 0; 4698 if (syscalltbl__id(trace->sctbl, s) >= 0 || 4699 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) { 4700 list = 1; 4701 goto do_concat; 4702 } 4703 4704 fmt = syscall_fmt__find_by_alias(s); 4705 if (fmt != NULL) { 4706 list = 1; 4707 s = fmt->name; 4708 } else { 4709 path__join(group_name, sizeof(group_name), strace_groups_dir, s); 4710 if (access(group_name, R_OK) == 0) 4711 list = 1; 4712 } 4713 do_concat: 4714 if (lists[list]) { 4715 sprintf(lists[list] + strlen(lists[list]), ",%s", s); 4716 } else { 4717 lists[list] = malloc(len); 4718 if (lists[list] == NULL) 4719 goto out; 4720 strcpy(lists[list], s); 4721 } 4722 4723 if (!sep) 4724 break; 4725 4726 *sep = ','; 4727 s = sep + 1; 4728 } 4729 4730 if (lists[1] != NULL) { 4731 struct strlist_config slist_config = { 4732 .dirname = strace_groups_dir, 4733 }; 4734 4735 trace->ev_qualifier = strlist__new(lists[1], &slist_config); 4736 if (trace->ev_qualifier == NULL) { 4737 fputs("Not enough memory to parse event qualifier", trace->output); 4738 goto out; 4739 } 4740 4741 if (trace__validate_ev_qualifier(trace)) 4742 goto out; 4743 trace->trace_syscalls = true; 4744 } 4745 4746 err = 0; 4747 4748 if (lists[0]) { 4749 struct parse_events_option_args parse_events_option_args = { 4750 .evlistp = &trace->evlist, 4751 }; 4752 struct option o = { 4753 .value = &parse_events_option_args, 4754 }; 4755 err = parse_events_option(&o, lists[0], 0); 4756 } 4757 out: 4758 free(strace_groups_dir); 4759 free(lists[0]); 4760 free(lists[1]); 4761 if (sep) 4762 *sep = ','; 4763 4764 return err; 4765 } 4766 4767 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset) 4768 { 4769 struct trace *trace = opt->value; 4770 4771 if (!list_empty(&trace->evlist->core.entries)) { 4772 struct option o = { 4773 .value = &trace->evlist, 4774 }; 4775 return parse_cgroups(&o, str, unset); 4776 } 4777 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); 4778 4779 return 0; 4780 } 4781 4782 static int trace__config(const char *var, const char *value, void *arg) 4783 { 4784 struct trace *trace = arg; 4785 int err = 0; 4786 4787 if (!strcmp(var, "trace.add_events")) { 4788 trace->perfconfig_events = strdup(value); 4789 if (trace->perfconfig_events == NULL) { 4790 pr_err("Not enough memory for %s\n", "trace.add_events"); 4791 return -1; 4792 } 4793 } else if (!strcmp(var, "trace.show_timestamp")) { 4794 trace->show_tstamp = perf_config_bool(var, value); 4795 } else if (!strcmp(var, "trace.show_duration")) { 4796 trace->show_duration = perf_config_bool(var, value); 4797 } else if (!strcmp(var, "trace.show_arg_names")) { 4798 trace->show_arg_names = perf_config_bool(var, value); 4799 if (!trace->show_arg_names) 4800 trace->show_zeros = true; 4801 } else if (!strcmp(var, "trace.show_zeros")) { 4802 bool new_show_zeros = perf_config_bool(var, value); 4803 if (!trace->show_arg_names && !new_show_zeros) { 4804 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n"); 4805 goto out; 4806 } 4807 trace->show_zeros = new_show_zeros; 4808 } else if (!strcmp(var, "trace.show_prefix")) { 4809 trace->show_string_prefix = perf_config_bool(var, value); 4810 } else if (!strcmp(var, "trace.no_inherit")) { 4811 trace->opts.no_inherit = perf_config_bool(var, value); 4812 } else if (!strcmp(var, "trace.args_alignment")) { 4813 int args_alignment = 0; 4814 if (perf_config_int(&args_alignment, var, value) == 0) 4815 trace->args_alignment = args_alignment; 4816 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) { 4817 if (strcasecmp(value, "libtraceevent") == 0) 4818 trace->libtraceevent_print = true; 4819 else if (strcasecmp(value, "libbeauty") == 0) 4820 trace->libtraceevent_print = false; 4821 } 4822 out: 4823 return err; 4824 } 4825 4826 static void trace__exit(struct trace *trace) 4827 { 4828 int i; 4829 4830 strlist__delete(trace->ev_qualifier); 4831 zfree(&trace->ev_qualifier_ids.entries); 4832 if (trace->syscalls.table) { 4833 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++) 4834 syscall__exit(&trace->syscalls.table[i]); 4835 zfree(&trace->syscalls.table); 4836 } 4837 syscalltbl__delete(trace->sctbl); 4838 zfree(&trace->perfconfig_events); 4839 } 4840 4841 #ifdef HAVE_BPF_SKEL 4842 static int bpf__setup_bpf_output(struct evlist *evlist) 4843 { 4844 int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/"); 4845 4846 if (err) 4847 pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n"); 4848 4849 return err; 4850 } 4851 #endif 4852 4853 int cmd_trace(int argc, const char **argv) 4854 { 4855 const char *trace_usage[] = { 4856 "perf trace [<options>] [<command>]", 4857 "perf trace [<options>] -- <command> [<options>]", 4858 "perf trace record [<options>] [<command>]", 4859 "perf trace record [<options>] -- <command> [<options>]", 4860 NULL 4861 }; 4862 struct trace trace = { 4863 .opts = { 4864 .target = { 4865 .uid = UINT_MAX, 4866 .uses_mmap = true, 4867 }, 4868 .user_freq = UINT_MAX, 4869 .user_interval = ULLONG_MAX, 4870 .no_buffering = true, 4871 .mmap_pages = UINT_MAX, 4872 }, 4873 .output = stderr, 4874 .show_comm = true, 4875 .show_tstamp = true, 4876 .show_duration = true, 4877 .show_arg_names = true, 4878 .args_alignment = 70, 4879 .trace_syscalls = false, 4880 .kernel_syscallchains = false, 4881 .max_stack = UINT_MAX, 4882 .max_events = ULONG_MAX, 4883 }; 4884 const char *output_name = NULL; 4885 const struct option trace_options[] = { 4886 OPT_CALLBACK('e', "event", &trace, "event", 4887 "event/syscall selector. use 'perf list' to list available events", 4888 trace__parse_events_option), 4889 OPT_CALLBACK(0, "filter", &trace.evlist, "filter", 4890 "event filter", parse_filter), 4891 OPT_BOOLEAN(0, "comm", &trace.show_comm, 4892 "show the thread COMM next to its id"), 4893 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"), 4894 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace", 4895 trace__parse_events_option), 4896 OPT_STRING('o', "output", &output_name, "file", "output file name"), 4897 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"), 4898 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", 4899 "trace events on existing process id"), 4900 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", 4901 "trace events on existing thread id"), 4902 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids", 4903 "pids to filter (by the kernel)", trace__set_filter_pids_from_option), 4904 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, 4905 "system-wide collection from all CPUs"), 4906 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", 4907 "list of cpus to monitor"), 4908 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, 4909 "child tasks do not inherit counters"), 4910 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", 4911 "number of mmap data pages", evlist__parse_mmap_pages), 4912 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user", 4913 "user to profile"), 4914 OPT_CALLBACK(0, "duration", &trace, "float", 4915 "show only events with duration > N.M ms", 4916 trace__set_duration), 4917 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), 4918 OPT_INCR('v', "verbose", &verbose, "be more verbose"), 4919 OPT_BOOLEAN('T', "time", &trace.full_time, 4920 "Show full timestamp, not time relative to first start"), 4921 OPT_BOOLEAN(0, "failure", &trace.failure_only, 4922 "Show only syscalls that failed"), 4923 OPT_BOOLEAN('s', "summary", &trace.summary_only, 4924 "Show only syscall summary with statistics"), 4925 OPT_BOOLEAN('S', "with-summary", &trace.summary, 4926 "Show all syscalls and summary with statistics"), 4927 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary, 4928 "Show errno stats per syscall, use with -s or -S"), 4929 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min", 4930 "Trace pagefaults", parse_pagefaults, "maj"), 4931 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"), 4932 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"), 4933 OPT_CALLBACK(0, "call-graph", &trace.opts, 4934 "record_mode[,record_size]", record_callchain_help, 4935 &record_parse_callchain_opt), 4936 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print, 4937 "Use libtraceevent to print the tracepoint arguments."), 4938 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, 4939 "Show the kernel callchains on the syscall exit path"), 4940 OPT_ULONG(0, "max-events", &trace.max_events, 4941 "Set the maximum number of events to print, exit after that is reached. "), 4942 OPT_UINTEGER(0, "min-stack", &trace.min_stack, 4943 "Set the minimum stack depth when parsing the callchain, " 4944 "anything below the specified depth will be ignored."), 4945 OPT_UINTEGER(0, "max-stack", &trace.max_stack, 4946 "Set the maximum stack depth when parsing the callchain, " 4947 "anything beyond the specified depth will be ignored. " 4948 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 4949 OPT_BOOLEAN(0, "sort-events", &trace.sort_events, 4950 "Sort batch of events before processing, use if getting out of order events"), 4951 OPT_BOOLEAN(0, "print-sample", &trace.print_sample, 4952 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"), 4953 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, 4954 "per thread proc mmap processing timeout in ms"), 4955 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", 4956 trace__parse_cgroups), 4957 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay, 4958 "ms to wait before starting measurement after program " 4959 "start"), 4960 OPTS_EVSWITCH(&trace.evswitch), 4961 OPT_END() 4962 }; 4963 bool __maybe_unused max_stack_user_set = true; 4964 bool mmap_pages_user_set = true; 4965 struct evsel *evsel; 4966 const char * const trace_subcommands[] = { "record", NULL }; 4967 int err = -1; 4968 char bf[BUFSIZ]; 4969 struct sigaction sigchld_act; 4970 4971 signal(SIGSEGV, sighandler_dump_stack); 4972 signal(SIGFPE, sighandler_dump_stack); 4973 signal(SIGINT, sighandler_interrupt); 4974 4975 memset(&sigchld_act, 0, sizeof(sigchld_act)); 4976 sigchld_act.sa_flags = SA_SIGINFO; 4977 sigchld_act.sa_sigaction = sighandler_chld; 4978 sigaction(SIGCHLD, &sigchld_act, NULL); 4979 4980 trace.evlist = evlist__new(); 4981 trace.sctbl = syscalltbl__new(); 4982 4983 if (trace.evlist == NULL || trace.sctbl == NULL) { 4984 pr_err("Not enough memory to run!\n"); 4985 err = -ENOMEM; 4986 goto out; 4987 } 4988 4989 /* 4990 * Parsing .perfconfig may entail creating a BPF event, that may need 4991 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting 4992 * is too small. This affects just this process, not touching the 4993 * global setting. If it fails we'll get something in 'perf trace -v' 4994 * to help diagnose the problem. 4995 */ 4996 rlimit__bump_memlock(); 4997 4998 err = perf_config(trace__config, &trace); 4999 if (err) 5000 goto out; 5001 5002 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands, 5003 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION); 5004 5005 /* 5006 * Here we already passed thru trace__parse_events_option() and it has 5007 * already figured out if -e syscall_name, if not but if --event 5008 * foo:bar was used, the user is interested _just_ in those, say, 5009 * tracepoint events, not in the strace-like syscall-name-based mode. 5010 * 5011 * This is important because we need to check if strace-like mode is 5012 * needed to decided if we should filter out the eBPF 5013 * __augmented_syscalls__ code, if it is in the mix, say, via 5014 * .perfconfig trace.add_events, and filter those out. 5015 */ 5016 if (!trace.trace_syscalls && !trace.trace_pgfaults && 5017 trace.evlist->core.nr_entries == 0 /* Was --events used? */) { 5018 trace.trace_syscalls = true; 5019 } 5020 /* 5021 * Now that we have --verbose figured out, lets see if we need to parse 5022 * events from .perfconfig, so that if those events fail parsing, say some 5023 * BPF program fails, then we'll be able to use --verbose to see what went 5024 * wrong in more detail. 5025 */ 5026 if (trace.perfconfig_events != NULL) { 5027 struct parse_events_error parse_err; 5028 5029 parse_events_error__init(&parse_err); 5030 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err); 5031 if (err) 5032 parse_events_error__print(&parse_err, trace.perfconfig_events); 5033 parse_events_error__exit(&parse_err); 5034 if (err) 5035 goto out; 5036 } 5037 5038 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) { 5039 usage_with_options_msg(trace_usage, trace_options, 5040 "cgroup monitoring only available in system-wide mode"); 5041 } 5042 5043 #ifdef HAVE_BPF_SKEL 5044 if (!trace.trace_syscalls) 5045 goto skip_augmentation; 5046 5047 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) { 5048 pr_debug("Syscall augmentation fails with record, disabling augmentation"); 5049 goto skip_augmentation; 5050 } 5051 5052 trace.skel = augmented_raw_syscalls_bpf__open(); 5053 if (!trace.skel) { 5054 pr_debug("Failed to open augmented syscalls BPF skeleton"); 5055 } else { 5056 /* 5057 * Disable attaching the BPF programs except for sys_enter and 5058 * sys_exit that tail call into this as necessary. 5059 */ 5060 struct bpf_program *prog; 5061 5062 bpf_object__for_each_program(prog, trace.skel->obj) { 5063 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit) 5064 bpf_program__set_autoattach(prog, /*autoattach=*/false); 5065 } 5066 5067 err = augmented_raw_syscalls_bpf__load(trace.skel); 5068 5069 if (err < 0) { 5070 libbpf_strerror(err, bf, sizeof(bf)); 5071 pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf); 5072 } else { 5073 augmented_raw_syscalls_bpf__attach(trace.skel); 5074 trace__add_syscall_newtp(&trace); 5075 } 5076 } 5077 5078 err = bpf__setup_bpf_output(trace.evlist); 5079 if (err) { 5080 libbpf_strerror(err, bf, sizeof(bf)); 5081 pr_err("ERROR: Setup BPF output event failed: %s\n", bf); 5082 goto out; 5083 } 5084 trace.syscalls.events.bpf_output = evlist__last(trace.evlist); 5085 assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__")); 5086 skip_augmentation: 5087 #endif 5088 err = -1; 5089 5090 if (trace.trace_pgfaults) { 5091 trace.opts.sample_address = true; 5092 trace.opts.sample_time = true; 5093 } 5094 5095 if (trace.opts.mmap_pages == UINT_MAX) 5096 mmap_pages_user_set = false; 5097 5098 if (trace.max_stack == UINT_MAX) { 5099 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack(); 5100 max_stack_user_set = false; 5101 } 5102 5103 #ifdef HAVE_DWARF_UNWIND_SUPPORT 5104 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) { 5105 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); 5106 } 5107 #endif 5108 5109 if (callchain_param.enabled) { 5110 if (!mmap_pages_user_set && geteuid() == 0) 5111 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4; 5112 5113 symbol_conf.use_callchain = true; 5114 } 5115 5116 if (trace.evlist->core.nr_entries > 0) { 5117 bool use_btf = false; 5118 5119 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler); 5120 if (evlist__set_syscall_tp_fields(trace.evlist, &use_btf)) { 5121 perror("failed to set syscalls:* tracepoint fields"); 5122 goto out; 5123 } 5124 5125 if (use_btf) 5126 trace__load_vmlinux_btf(&trace); 5127 } 5128 5129 if (trace.sort_events) { 5130 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace); 5131 ordered_events__set_copy_on_queue(&trace.oe.data, true); 5132 } 5133 5134 /* 5135 * If we are augmenting syscalls, then combine what we put in the 5136 * __augmented_syscalls__ BPF map with what is in the 5137 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF, 5138 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit. 5139 * 5140 * We'll switch to look at two BPF maps, one for sys_enter and the 5141 * other for sys_exit when we start augmenting the sys_exit paths with 5142 * buffers that are being copied from kernel to userspace, think 'read' 5143 * syscall. 5144 */ 5145 if (trace.syscalls.events.bpf_output) { 5146 evlist__for_each_entry(trace.evlist, evsel) { 5147 bool raw_syscalls_sys_exit = evsel__name_is(evsel, "raw_syscalls:sys_exit"); 5148 5149 if (raw_syscalls_sys_exit) { 5150 trace.raw_augmented_syscalls = true; 5151 goto init_augmented_syscall_tp; 5152 } 5153 5154 if (trace.syscalls.events.bpf_output->priv == NULL && 5155 strstr(evsel__name(evsel), "syscalls:sys_enter")) { 5156 struct evsel *augmented = trace.syscalls.events.bpf_output; 5157 if (evsel__init_augmented_syscall_tp(augmented, evsel) || 5158 evsel__init_augmented_syscall_tp_args(augmented)) 5159 goto out; 5160 /* 5161 * Augmented is __augmented_syscalls__ BPF_OUTPUT event 5162 * Above we made sure we can get from the payload the tp fields 5163 * that we get from syscalls:sys_enter tracefs format file. 5164 */ 5165 augmented->handler = trace__sys_enter; 5166 /* 5167 * Now we do the same for the *syscalls:sys_enter event so that 5168 * if we handle it directly, i.e. if the BPF prog returns 0 so 5169 * as not to filter it, then we'll handle it just like we would 5170 * for the BPF_OUTPUT one: 5171 */ 5172 if (evsel__init_augmented_syscall_tp(evsel, evsel) || 5173 evsel__init_augmented_syscall_tp_args(evsel)) 5174 goto out; 5175 evsel->handler = trace__sys_enter; 5176 } 5177 5178 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) { 5179 struct syscall_tp *sc; 5180 init_augmented_syscall_tp: 5181 if (evsel__init_augmented_syscall_tp(evsel, evsel)) 5182 goto out; 5183 sc = __evsel__syscall_tp(evsel); 5184 /* 5185 * For now with BPF raw_augmented we hook into 5186 * raw_syscalls:sys_enter and there we get all 5187 * 6 syscall args plus the tracepoint common 5188 * fields and the syscall_nr (another long). 5189 * So we check if that is the case and if so 5190 * don't look after the sc->args_size but 5191 * always after the full raw_syscalls:sys_enter 5192 * payload, which is fixed. 5193 * 5194 * We'll revisit this later to pass 5195 * s->args_size to the BPF augmenter (now 5196 * tools/perf/examples/bpf/augmented_raw_syscalls.c, 5197 * so that it copies only what we need for each 5198 * syscall, like what happens when we use 5199 * syscalls:sys_enter_NAME, so that we reduce 5200 * the kernel/userspace traffic to just what is 5201 * needed for each syscall. 5202 */ 5203 if (trace.raw_augmented_syscalls) 5204 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; 5205 evsel__init_augmented_syscall_tp_ret(evsel); 5206 evsel->handler = trace__sys_exit; 5207 } 5208 } 5209 } 5210 5211 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) 5212 return trace__record(&trace, argc-1, &argv[1]); 5213 5214 /* Using just --errno-summary will trigger --summary */ 5215 if (trace.errno_summary && !trace.summary && !trace.summary_only) 5216 trace.summary_only = true; 5217 5218 /* summary_only implies summary option, but don't overwrite summary if set */ 5219 if (trace.summary_only) 5220 trace.summary = trace.summary_only; 5221 5222 if (output_name != NULL) { 5223 err = trace__open_output(&trace, output_name); 5224 if (err < 0) { 5225 perror("failed to create output file"); 5226 goto out; 5227 } 5228 } 5229 5230 err = evswitch__init(&trace.evswitch, trace.evlist, stderr); 5231 if (err) 5232 goto out_close; 5233 5234 err = target__validate(&trace.opts.target); 5235 if (err) { 5236 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5237 fprintf(trace.output, "%s", bf); 5238 goto out_close; 5239 } 5240 5241 err = target__parse_uid(&trace.opts.target); 5242 if (err) { 5243 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5244 fprintf(trace.output, "%s", bf); 5245 goto out_close; 5246 } 5247 5248 if (!argc && target__none(&trace.opts.target)) 5249 trace.opts.target.system_wide = true; 5250 5251 if (input_name) 5252 err = trace__replay(&trace); 5253 else 5254 err = trace__run(&trace, argc, argv); 5255 5256 out_close: 5257 if (output_name != NULL) 5258 fclose(trace.output); 5259 out: 5260 trace__exit(&trace); 5261 #ifdef HAVE_BPF_SKEL 5262 augmented_raw_syscalls_bpf__destroy(trace.skel); 5263 #endif 5264 return err; 5265 } 5266