1 /* 2 * builtin-trace.c 3 * 4 * Builtin 'trace' command: 5 * 6 * Display a continuously updated trace of any workload, CPU, specific PID, 7 * system wide, etc. Default format is loosely strace like, but any other 8 * event may be specified using --event. 9 * 10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 11 * 12 * Initially based on the 'trace' prototype by Thomas Gleixner: 13 * 14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'") 15 */ 16 17 #include "util/record.h" 18 #include <api/fs/tracing_path.h> 19 #ifdef HAVE_LIBBPF_SUPPORT 20 #include <bpf/bpf.h> 21 #include <bpf/libbpf.h> 22 #include <bpf/btf.h> 23 #ifdef HAVE_BPF_SKEL 24 #include "bpf_skel/augmented_raw_syscalls.skel.h" 25 #endif 26 #endif 27 #include "util/bpf_map.h" 28 #include "util/rlimit.h" 29 #include "builtin.h" 30 #include "util/cgroup.h" 31 #include "util/color.h" 32 #include "util/config.h" 33 #include "util/debug.h" 34 #include "util/dso.h" 35 #include "util/env.h" 36 #include "util/event.h" 37 #include "util/evsel.h" 38 #include "util/evsel_fprintf.h" 39 #include "util/synthetic-events.h" 40 #include "util/evlist.h" 41 #include "util/evswitch.h" 42 #include "util/hashmap.h" 43 #include "util/mmap.h" 44 #include <subcmd/pager.h> 45 #include <subcmd/exec-cmd.h> 46 #include "util/machine.h" 47 #include "util/map.h" 48 #include "util/symbol.h" 49 #include "util/path.h" 50 #include "util/session.h" 51 #include "util/thread.h" 52 #include <subcmd/parse-options.h> 53 #include "util/strlist.h" 54 #include "util/intlist.h" 55 #include "util/thread_map.h" 56 #include "util/stat.h" 57 #include "util/tool.h" 58 #include "util/util.h" 59 #include "trace/beauty/beauty.h" 60 #include "trace-event.h" 61 #include "util/parse-events.h" 62 #include "util/tracepoint.h" 63 #include "callchain.h" 64 #include "print_binary.h" 65 #include "string2.h" 66 #include "syscalltbl.h" 67 #include "../perf.h" 68 #include "trace_augment.h" 69 70 #include <errno.h> 71 #include <inttypes.h> 72 #include <poll.h> 73 #include <signal.h> 74 #include <stdlib.h> 75 #include <string.h> 76 #include <linux/err.h> 77 #include <linux/filter.h> 78 #include <linux/kernel.h> 79 #include <linux/list_sort.h> 80 #include <linux/random.h> 81 #include <linux/stringify.h> 82 #include <linux/time64.h> 83 #include <linux/zalloc.h> 84 #include <fcntl.h> 85 #include <sys/sysmacros.h> 86 87 #include <linux/ctype.h> 88 #include <perf/mmap.h> 89 90 #ifdef HAVE_LIBTRACEEVENT 91 #include <event-parse.h> 92 #endif 93 94 #ifndef O_CLOEXEC 95 # define O_CLOEXEC 02000000 96 #endif 97 98 #ifndef F_LINUX_SPECIFIC_BASE 99 # define F_LINUX_SPECIFIC_BASE 1024 100 #endif 101 102 #define RAW_SYSCALL_ARGS_NUM 6 103 104 /* 105 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100 106 * 107 * We have to explicitely mark the direction of the flow of data, if from the 108 * kernel to user space or the other way around, since the BPF collector we 109 * have so far copies only from user to kernel space, mark the arguments that 110 * go that direction, so that we don´t end up collecting the previous contents 111 * for syscall args that goes from kernel to user space. 112 */ 113 struct syscall_arg_fmt { 114 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 115 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val); 116 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val); 117 void *parm; 118 const char *name; 119 u16 nr_entries; // for arrays 120 bool from_user; 121 bool show_zero; 122 #ifdef HAVE_LIBBPF_SUPPORT 123 const struct btf_type *type; 124 int type_id; /* used in btf_dump */ 125 #endif 126 }; 127 128 struct syscall_fmt { 129 const char *name; 130 const char *alias; 131 struct { 132 const char *sys_enter, 133 *sys_exit; 134 } bpf_prog_name; 135 struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM]; 136 u8 nr_args; 137 bool errpid; 138 bool timeout; 139 bool hexret; 140 }; 141 142 enum summary_mode { 143 SUMMARY__NONE = 0, 144 SUMMARY__BY_TOTAL, 145 SUMMARY__BY_THREAD, 146 }; 147 148 struct trace { 149 struct perf_tool tool; 150 struct syscalltbl *sctbl; 151 struct { 152 struct syscall *table; 153 struct { 154 struct evsel *sys_enter, 155 *sys_exit, 156 *bpf_output; 157 } events; 158 } syscalls; 159 #ifdef HAVE_BPF_SKEL 160 struct augmented_raw_syscalls_bpf *skel; 161 #endif 162 #ifdef HAVE_LIBBPF_SUPPORT 163 struct btf *btf; 164 #endif 165 struct record_opts opts; 166 struct evlist *evlist; 167 struct machine *host; 168 struct thread *current; 169 struct cgroup *cgroup; 170 u64 base_time; 171 FILE *output; 172 unsigned long nr_events; 173 unsigned long nr_events_printed; 174 unsigned long max_events; 175 struct evswitch evswitch; 176 struct strlist *ev_qualifier; 177 struct { 178 size_t nr; 179 int *entries; 180 } ev_qualifier_ids; 181 struct { 182 size_t nr; 183 pid_t *entries; 184 struct bpf_map *map; 185 } filter_pids; 186 struct hashmap *syscall_stats; 187 double duration_filter; 188 double runtime_ms; 189 unsigned long pfmaj, pfmin; 190 struct { 191 u64 vfs_getname, 192 proc_getname; 193 } stats; 194 unsigned int max_stack; 195 unsigned int min_stack; 196 enum summary_mode summary_mode; 197 int raw_augmented_syscalls_args_size; 198 bool raw_augmented_syscalls; 199 bool fd_path_disabled; 200 bool sort_events; 201 bool not_ev_qualifier; 202 bool live; 203 bool full_time; 204 bool sched; 205 bool multiple_threads; 206 bool summary; 207 bool summary_only; 208 bool errno_summary; 209 bool failure_only; 210 bool show_comm; 211 bool print_sample; 212 bool show_tool_stats; 213 bool trace_syscalls; 214 bool libtraceevent_print; 215 bool kernel_syscallchains; 216 s16 args_alignment; 217 bool show_tstamp; 218 bool show_duration; 219 bool show_zeros; 220 bool show_arg_names; 221 bool show_string_prefix; 222 bool force; 223 bool vfs_getname; 224 bool force_btf; 225 int trace_pgfaults; 226 char *perfconfig_events; 227 struct { 228 struct ordered_events data; 229 u64 last; 230 } oe; 231 }; 232 233 static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused) 234 { 235 #ifdef HAVE_LIBBPF_SUPPORT 236 if (trace->btf != NULL) 237 return; 238 239 trace->btf = btf__load_vmlinux_btf(); 240 if (verbose > 0) { 241 fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" : 242 "Failed to load vmlinux BTF\n"); 243 } 244 #endif 245 } 246 247 struct tp_field { 248 int offset; 249 union { 250 u64 (*integer)(struct tp_field *field, struct perf_sample *sample); 251 void *(*pointer)(struct tp_field *field, struct perf_sample *sample); 252 }; 253 }; 254 255 #define TP_UINT_FIELD(bits) \ 256 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \ 257 { \ 258 u##bits value; \ 259 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 260 return value; \ 261 } 262 263 TP_UINT_FIELD(8); 264 TP_UINT_FIELD(16); 265 TP_UINT_FIELD(32); 266 TP_UINT_FIELD(64); 267 268 #define TP_UINT_FIELD__SWAPPED(bits) \ 269 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \ 270 { \ 271 u##bits value; \ 272 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 273 return bswap_##bits(value);\ 274 } 275 276 TP_UINT_FIELD__SWAPPED(16); 277 TP_UINT_FIELD__SWAPPED(32); 278 TP_UINT_FIELD__SWAPPED(64); 279 280 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap) 281 { 282 field->offset = offset; 283 284 switch (size) { 285 case 1: 286 field->integer = tp_field__u8; 287 break; 288 case 2: 289 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; 290 break; 291 case 4: 292 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; 293 break; 294 case 8: 295 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; 296 break; 297 default: 298 return -1; 299 } 300 301 return 0; 302 } 303 304 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap) 305 { 306 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); 307 } 308 309 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample) 310 { 311 return sample->raw_data + field->offset; 312 } 313 314 static int __tp_field__init_ptr(struct tp_field *field, int offset) 315 { 316 field->offset = offset; 317 field->pointer = tp_field__ptr; 318 return 0; 319 } 320 321 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field) 322 { 323 return __tp_field__init_ptr(field, format_field->offset); 324 } 325 326 struct syscall_tp { 327 struct tp_field id; 328 union { 329 struct tp_field args, ret; 330 }; 331 }; 332 333 /* 334 * The evsel->priv as used by 'perf trace' 335 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME 336 * fmt: for all the other tracepoints 337 */ 338 struct evsel_trace { 339 struct syscall_tp sc; 340 struct syscall_arg_fmt *fmt; 341 }; 342 343 static struct evsel_trace *evsel_trace__new(void) 344 { 345 return zalloc(sizeof(struct evsel_trace)); 346 } 347 348 static void evsel_trace__delete(struct evsel_trace *et) 349 { 350 if (et == NULL) 351 return; 352 353 zfree(&et->fmt); 354 free(et); 355 } 356 357 /* 358 * Used with raw_syscalls:sys_{enter,exit} and with the 359 * syscalls:sys_{enter,exit}_SYSCALL tracepoints 360 */ 361 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel) 362 { 363 struct evsel_trace *et = evsel->priv; 364 365 return &et->sc; 366 } 367 368 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel) 369 { 370 if (evsel->priv == NULL) { 371 evsel->priv = evsel_trace__new(); 372 if (evsel->priv == NULL) 373 return NULL; 374 } 375 376 return __evsel__syscall_tp(evsel); 377 } 378 379 /* 380 * Used with all the other tracepoints. 381 */ 382 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel) 383 { 384 struct evsel_trace *et = evsel->priv; 385 386 return et->fmt; 387 } 388 389 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel) 390 { 391 struct evsel_trace *et = evsel->priv; 392 393 if (evsel->priv == NULL) { 394 et = evsel->priv = evsel_trace__new(); 395 396 if (et == NULL) 397 return NULL; 398 } 399 400 if (et->fmt == NULL) { 401 const struct tep_event *tp_format = evsel__tp_format(evsel); 402 403 if (tp_format == NULL) 404 goto out_delete; 405 406 et->fmt = calloc(tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt)); 407 if (et->fmt == NULL) 408 goto out_delete; 409 } 410 411 return __evsel__syscall_arg_fmt(evsel); 412 413 out_delete: 414 evsel_trace__delete(evsel->priv); 415 evsel->priv = NULL; 416 return NULL; 417 } 418 419 static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name) 420 { 421 struct tep_format_field *format_field = evsel__field(evsel, name); 422 423 if (format_field == NULL) 424 return -1; 425 426 return tp_field__init_uint(field, format_field, evsel->needs_swap); 427 } 428 429 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \ 430 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 431 evsel__init_tp_uint_field(evsel, &sc->name, #name); }) 432 433 static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name) 434 { 435 struct tep_format_field *format_field = evsel__field(evsel, name); 436 437 if (format_field == NULL) 438 return -1; 439 440 return tp_field__init_ptr(field, format_field); 441 } 442 443 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \ 444 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 445 evsel__init_tp_ptr_field(evsel, &sc->name, #name); }) 446 447 static void evsel__delete_priv(struct evsel *evsel) 448 { 449 zfree(&evsel->priv); 450 evsel__delete(evsel); 451 } 452 453 static int evsel__init_syscall_tp(struct evsel *evsel) 454 { 455 struct syscall_tp *sc = evsel__syscall_tp(evsel); 456 457 if (sc != NULL) { 458 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") && 459 evsel__init_tp_uint_field(evsel, &sc->id, "nr")) 460 return -ENOENT; 461 462 return 0; 463 } 464 465 return -ENOMEM; 466 } 467 468 static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp) 469 { 470 struct syscall_tp *sc = evsel__syscall_tp(evsel); 471 472 if (sc != NULL) { 473 struct tep_format_field *syscall_id = evsel__field(tp, "id"); 474 if (syscall_id == NULL) 475 syscall_id = evsel__field(tp, "__syscall_nr"); 476 if (syscall_id == NULL || 477 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) 478 return -EINVAL; 479 480 return 0; 481 } 482 483 return -ENOMEM; 484 } 485 486 static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel) 487 { 488 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 489 490 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); 491 } 492 493 static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel) 494 { 495 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 496 497 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap); 498 } 499 500 static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler) 501 { 502 if (evsel__syscall_tp(evsel) != NULL) { 503 if (perf_evsel__init_sc_tp_uint_field(evsel, id)) 504 return -ENOENT; 505 506 evsel->handler = handler; 507 return 0; 508 } 509 510 return -ENOMEM; 511 } 512 513 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler) 514 { 515 struct evsel *evsel = evsel__newtp("raw_syscalls", direction); 516 517 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */ 518 if (IS_ERR(evsel)) 519 evsel = evsel__newtp("syscalls", direction); 520 521 if (IS_ERR(evsel)) 522 return NULL; 523 524 if (evsel__init_raw_syscall_tp(evsel, handler)) 525 goto out_delete; 526 527 return evsel; 528 529 out_delete: 530 evsel__delete_priv(evsel); 531 return NULL; 532 } 533 534 #define perf_evsel__sc_tp_uint(evsel, name, sample) \ 535 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 536 fields->name.integer(&fields->name, sample); }) 537 538 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \ 539 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 540 fields->name.pointer(&fields->name, sample); }) 541 542 size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val) 543 { 544 int idx = val - sa->offset; 545 546 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 547 size_t printed = scnprintf(bf, size, intfmt, val); 548 if (show_suffix) 549 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 550 return printed; 551 } 552 553 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : ""); 554 } 555 556 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 557 { 558 int idx = val - sa->offset; 559 560 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 561 size_t printed = scnprintf(bf, size, intfmt, val); 562 if (show_prefix) 563 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 564 return printed; 565 } 566 567 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 568 } 569 570 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size, 571 const char *intfmt, 572 struct syscall_arg *arg) 573 { 574 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val); 575 } 576 577 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, 578 struct syscall_arg *arg) 579 { 580 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg); 581 } 582 583 #define SCA_STRARRAY syscall_arg__scnprintf_strarray 584 585 bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 586 { 587 return strarray__strtoul(arg->parm, bf, size, ret); 588 } 589 590 bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 591 { 592 return strarray__strtoul_flags(arg->parm, bf, size, ret); 593 } 594 595 bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 596 { 597 return strarrays__strtoul(arg->parm, bf, size, ret); 598 } 599 600 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg) 601 { 602 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val); 603 } 604 605 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 606 { 607 size_t printed; 608 int i; 609 610 for (i = 0; i < sas->nr_entries; ++i) { 611 struct strarray *sa = sas->entries[i]; 612 int idx = val - sa->offset; 613 614 if (idx >= 0 && idx < sa->nr_entries) { 615 if (sa->entries[idx] == NULL) 616 break; 617 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 618 } 619 } 620 621 printed = scnprintf(bf, size, intfmt, val); 622 if (show_prefix) 623 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); 624 return printed; 625 } 626 627 bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret) 628 { 629 int i; 630 631 for (i = 0; i < sa->nr_entries; ++i) { 632 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') { 633 *ret = sa->offset + i; 634 return true; 635 } 636 } 637 638 return false; 639 } 640 641 bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret) 642 { 643 u64 val = 0; 644 char *tok = bf, *sep, *end; 645 646 *ret = 0; 647 648 while (size != 0) { 649 int toklen = size; 650 651 sep = memchr(tok, '|', size); 652 if (sep != NULL) { 653 size -= sep - tok + 1; 654 655 end = sep - 1; 656 while (end > tok && isspace(*end)) 657 --end; 658 659 toklen = end - tok + 1; 660 } 661 662 while (isspace(*tok)) 663 ++tok; 664 665 if (isalpha(*tok) || *tok == '_') { 666 if (!strarray__strtoul(sa, tok, toklen, &val)) 667 return false; 668 } else 669 val = strtoul(tok, NULL, 0); 670 671 *ret |= (1 << (val - 1)); 672 673 if (sep == NULL) 674 break; 675 tok = sep + 1; 676 } 677 678 return true; 679 } 680 681 bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret) 682 { 683 int i; 684 685 for (i = 0; i < sas->nr_entries; ++i) { 686 struct strarray *sa = sas->entries[i]; 687 688 if (strarray__strtoul(sa, bf, size, ret)) 689 return true; 690 } 691 692 return false; 693 } 694 695 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size, 696 struct syscall_arg *arg) 697 { 698 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val); 699 } 700 701 #ifndef AT_FDCWD 702 #define AT_FDCWD -100 703 #endif 704 705 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size, 706 struct syscall_arg *arg) 707 { 708 int fd = arg->val; 709 const char *prefix = "AT_FD"; 710 711 if (fd == AT_FDCWD) 712 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD"); 713 714 return syscall_arg__scnprintf_fd(bf, size, arg); 715 } 716 717 #define SCA_FDAT syscall_arg__scnprintf_fd_at 718 719 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 720 struct syscall_arg *arg); 721 722 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd 723 724 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg) 725 { 726 return scnprintf(bf, size, "%#lx", arg->val); 727 } 728 729 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg) 730 { 731 if (arg->val == 0) 732 return scnprintf(bf, size, "NULL"); 733 return syscall_arg__scnprintf_hex(bf, size, arg); 734 } 735 736 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg) 737 { 738 return scnprintf(bf, size, "%d", arg->val); 739 } 740 741 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg) 742 { 743 return scnprintf(bf, size, "%ld", arg->val); 744 } 745 746 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg) 747 { 748 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can 749 // fill missing comms using thread__set_comm()... 750 // here or in a special syscall_arg__scnprintf_pid_sched_tp... 751 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val); 752 } 753 754 #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array 755 756 static const char *bpf_cmd[] = { 757 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM", 758 "MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH", 759 "PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID", 760 "PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD", 761 "PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID", 762 "TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE", 763 "BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH", 764 "MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE", 765 "LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE", 766 "LINK_DETACH", "PROG_BIND_MAP", 767 }; 768 static DEFINE_STRARRAY(bpf_cmd, "BPF_"); 769 770 static const char *fsmount_flags[] = { 771 [1] = "CLOEXEC", 772 }; 773 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_"); 774 775 #include "trace/beauty/generated/fsconfig_arrays.c" 776 777 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_"); 778 779 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", }; 780 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1); 781 782 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", }; 783 static DEFINE_STRARRAY(itimers, "ITIMER_"); 784 785 static const char *keyctl_options[] = { 786 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN", 787 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ", 788 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT", 789 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT", 790 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT", 791 }; 792 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_"); 793 794 static const char *whences[] = { "SET", "CUR", "END", 795 #ifdef SEEK_DATA 796 "DATA", 797 #endif 798 #ifdef SEEK_HOLE 799 "HOLE", 800 #endif 801 }; 802 static DEFINE_STRARRAY(whences, "SEEK_"); 803 804 static const char *fcntl_cmds[] = { 805 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK", 806 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64", 807 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX", 808 "GETOWNER_UIDS", 809 }; 810 static DEFINE_STRARRAY(fcntl_cmds, "F_"); 811 812 static const char *fcntl_linux_specific_cmds[] = { 813 "SETLEASE", "GETLEASE", "NOTIFY", "DUPFD_QUERY", [5] = "CANCELLK", "DUPFD_CLOEXEC", 814 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS", 815 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT", 816 }; 817 818 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE); 819 820 static struct strarray *fcntl_cmds_arrays[] = { 821 &strarray__fcntl_cmds, 822 &strarray__fcntl_linux_specific_cmds, 823 }; 824 825 static DEFINE_STRARRAYS(fcntl_cmds_arrays); 826 827 static const char *rlimit_resources[] = { 828 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE", 829 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO", 830 "RTTIME", 831 }; 832 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_"); 833 834 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", }; 835 static DEFINE_STRARRAY(sighow, "SIG_"); 836 837 static const char *clockid[] = { 838 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID", 839 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME", 840 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI" 841 }; 842 static DEFINE_STRARRAY(clockid, "CLOCK_"); 843 844 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size, 845 struct syscall_arg *arg) 846 { 847 bool show_prefix = arg->show_string_prefix; 848 const char *suffix = "_OK"; 849 size_t printed = 0; 850 int mode = arg->val; 851 852 if (mode == F_OK) /* 0 */ 853 return scnprintf(bf, size, "F%s", show_prefix ? suffix : ""); 854 #define P_MODE(n) \ 855 if (mode & n##_OK) { \ 856 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ 857 mode &= ~n##_OK; \ 858 } 859 860 P_MODE(R); 861 P_MODE(W); 862 P_MODE(X); 863 #undef P_MODE 864 865 if (mode) 866 printed += scnprintf(bf + printed, size - printed, "|%#x", mode); 867 868 return printed; 869 } 870 871 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode 872 873 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 874 struct syscall_arg *arg); 875 876 #define SCA_FILENAME syscall_arg__scnprintf_filename 877 878 // 'argname' is just documentational at this point, to remove the previous comment with that info 879 #define SCA_FILENAME_FROM_USER(argname) \ 880 { .scnprintf = SCA_FILENAME, \ 881 .from_user = true, } 882 883 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg); 884 885 #define SCA_BUF syscall_arg__scnprintf_buf 886 887 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size, 888 struct syscall_arg *arg) 889 { 890 bool show_prefix = arg->show_string_prefix; 891 const char *prefix = "O_"; 892 int printed = 0, flags = arg->val; 893 894 #define P_FLAG(n) \ 895 if (flags & O_##n) { \ 896 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 897 flags &= ~O_##n; \ 898 } 899 900 P_FLAG(CLOEXEC); 901 P_FLAG(NONBLOCK); 902 #undef P_FLAG 903 904 if (flags) 905 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 906 907 return printed; 908 } 909 910 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags 911 912 #ifndef GRND_NONBLOCK 913 #define GRND_NONBLOCK 0x0001 914 #endif 915 #ifndef GRND_RANDOM 916 #define GRND_RANDOM 0x0002 917 #endif 918 919 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size, 920 struct syscall_arg *arg) 921 { 922 bool show_prefix = arg->show_string_prefix; 923 const char *prefix = "GRND_"; 924 int printed = 0, flags = arg->val; 925 926 #define P_FLAG(n) \ 927 if (flags & GRND_##n) { \ 928 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 929 flags &= ~GRND_##n; \ 930 } 931 932 P_FLAG(RANDOM); 933 P_FLAG(NONBLOCK); 934 #undef P_FLAG 935 936 if (flags) 937 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 938 939 return printed; 940 } 941 942 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags 943 944 #ifdef HAVE_LIBBPF_SUPPORT 945 static void syscall_arg_fmt__cache_btf_enum(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type) 946 { 947 int id; 948 949 type = strstr(type, "enum "); 950 if (type == NULL) 951 return; 952 953 type += 5; // skip "enum " to get the enumeration name 954 955 id = btf__find_by_name(btf, type); 956 if (id < 0) 957 return; 958 959 arg_fmt->type = btf__type_by_id(btf, id); 960 } 961 962 static bool syscall_arg__strtoul_btf_enum(char *bf, size_t size, struct syscall_arg *arg, u64 *val) 963 { 964 const struct btf_type *bt = arg->fmt->type; 965 struct btf *btf = arg->trace->btf; 966 struct btf_enum *be = btf_enum(bt); 967 968 for (int i = 0; i < btf_vlen(bt); ++i, ++be) { 969 const char *name = btf__name_by_offset(btf, be->name_off); 970 int max_len = max(size, strlen(name)); 971 972 if (strncmp(name, bf, max_len) == 0) { 973 *val = be->val; 974 return true; 975 } 976 } 977 978 return false; 979 } 980 981 static bool syscall_arg__strtoul_btf_type(char *bf, size_t size, struct syscall_arg *arg, u64 *val) 982 { 983 const struct btf_type *bt; 984 char *type = arg->type_name; 985 struct btf *btf; 986 987 trace__load_vmlinux_btf(arg->trace); 988 989 btf = arg->trace->btf; 990 if (btf == NULL) 991 return false; 992 993 if (arg->fmt->type == NULL) { 994 // See if this is an enum 995 syscall_arg_fmt__cache_btf_enum(arg->fmt, btf, type); 996 } 997 998 // Now let's see if we have a BTF type resolved 999 bt = arg->fmt->type; 1000 if (bt == NULL) 1001 return false; 1002 1003 // If it is an enum: 1004 if (btf_is_enum(arg->fmt->type)) 1005 return syscall_arg__strtoul_btf_enum(bf, size, arg, val); 1006 1007 return false; 1008 } 1009 1010 static size_t btf_enum_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, int val) 1011 { 1012 struct btf_enum *be = btf_enum(type); 1013 const int nr_entries = btf_vlen(type); 1014 1015 for (int i = 0; i < nr_entries; ++i, ++be) { 1016 if (be->val == val) { 1017 return scnprintf(bf, size, "%s", 1018 btf__name_by_offset(btf, be->name_off)); 1019 } 1020 } 1021 1022 return 0; 1023 } 1024 1025 struct trace_btf_dump_snprintf_ctx { 1026 char *bf; 1027 size_t printed, size; 1028 }; 1029 1030 static void trace__btf_dump_snprintf(void *vctx, const char *fmt, va_list args) 1031 { 1032 struct trace_btf_dump_snprintf_ctx *ctx = vctx; 1033 1034 ctx->printed += vscnprintf(ctx->bf + ctx->printed, ctx->size - ctx->printed, fmt, args); 1035 } 1036 1037 static size_t btf_struct_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, struct syscall_arg *arg) 1038 { 1039 struct trace_btf_dump_snprintf_ctx ctx = { 1040 .bf = bf, 1041 .size = size, 1042 }; 1043 struct augmented_arg *augmented_arg = arg->augmented.args; 1044 int type_id = arg->fmt->type_id, consumed; 1045 struct btf_dump *btf_dump; 1046 1047 LIBBPF_OPTS(btf_dump_opts, dump_opts); 1048 LIBBPF_OPTS(btf_dump_type_data_opts, dump_data_opts); 1049 1050 if (arg == NULL || arg->augmented.args == NULL) 1051 return 0; 1052 1053 dump_data_opts.compact = true; 1054 dump_data_opts.skip_names = !arg->trace->show_arg_names; 1055 1056 btf_dump = btf_dump__new(btf, trace__btf_dump_snprintf, &ctx, &dump_opts); 1057 if (btf_dump == NULL) 1058 return 0; 1059 1060 /* pretty print the struct data here */ 1061 if (btf_dump__dump_type_data(btf_dump, type_id, arg->augmented.args->value, type->size, &dump_data_opts) == 0) 1062 return 0; 1063 1064 consumed = sizeof(*augmented_arg) + augmented_arg->size; 1065 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1066 arg->augmented.size -= consumed; 1067 1068 btf_dump__free(btf_dump); 1069 1070 return ctx.printed; 1071 } 1072 1073 static size_t trace__btf_scnprintf(struct trace *trace, struct syscall_arg *arg, char *bf, 1074 size_t size, int val, char *type) 1075 { 1076 struct syscall_arg_fmt *arg_fmt = arg->fmt; 1077 1078 if (trace->btf == NULL) 1079 return 0; 1080 1081 if (arg_fmt->type == NULL) { 1082 // Check if this is an enum and if we have the BTF type for it. 1083 syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type); 1084 } 1085 1086 // Did we manage to find a BTF type for the syscall/tracepoint argument? 1087 if (arg_fmt->type == NULL) 1088 return 0; 1089 1090 if (btf_is_enum(arg_fmt->type)) 1091 return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val); 1092 else if (btf_is_struct(arg_fmt->type) || btf_is_union(arg_fmt->type)) 1093 return btf_struct_scnprintf(arg_fmt->type, trace->btf, bf, size, arg); 1094 1095 return 0; 1096 } 1097 1098 #else // HAVE_LIBBPF_SUPPORT 1099 static size_t trace__btf_scnprintf(struct trace *trace __maybe_unused, struct syscall_arg *arg __maybe_unused, 1100 char *bf __maybe_unused, size_t size __maybe_unused, int val __maybe_unused, 1101 char *type __maybe_unused) 1102 { 1103 return 0; 1104 } 1105 1106 static bool syscall_arg__strtoul_btf_type(char *bf __maybe_unused, size_t size __maybe_unused, 1107 struct syscall_arg *arg __maybe_unused, u64 *val __maybe_unused) 1108 { 1109 return false; 1110 } 1111 #endif // HAVE_LIBBPF_SUPPORT 1112 1113 #define STUL_BTF_TYPE syscall_arg__strtoul_btf_type 1114 1115 #define STRARRAY(name, array) \ 1116 { .scnprintf = SCA_STRARRAY, \ 1117 .strtoul = STUL_STRARRAY, \ 1118 .parm = &strarray__##array, } 1119 1120 #define STRARRAY_FLAGS(name, array) \ 1121 { .scnprintf = SCA_STRARRAY_FLAGS, \ 1122 .strtoul = STUL_STRARRAY_FLAGS, \ 1123 .parm = &strarray__##array, } 1124 1125 #include "trace/beauty/eventfd.c" 1126 #include "trace/beauty/futex_op.c" 1127 #include "trace/beauty/futex_val3.c" 1128 #include "trace/beauty/mmap.c" 1129 #include "trace/beauty/mode_t.c" 1130 #include "trace/beauty/msg_flags.c" 1131 #include "trace/beauty/open_flags.c" 1132 #include "trace/beauty/perf_event_open.c" 1133 #include "trace/beauty/pid.c" 1134 #include "trace/beauty/sched_policy.c" 1135 #include "trace/beauty/seccomp.c" 1136 #include "trace/beauty/signum.c" 1137 #include "trace/beauty/socket_type.c" 1138 #include "trace/beauty/waitid_options.c" 1139 1140 static const struct syscall_fmt syscall_fmts[] = { 1141 { .name = "access", 1142 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 1143 { .name = "arch_prctl", 1144 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ }, 1145 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, }, 1146 { .name = "bind", 1147 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 1148 [1] = SCA_SOCKADDR_FROM_USER(umyaddr), 1149 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 1150 { .name = "bpf", 1151 .arg = { [0] = STRARRAY(cmd, bpf_cmd), 1152 [1] = { .from_user = true /* attr */, }, } }, 1153 { .name = "brk", .hexret = true, 1154 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, }, 1155 { .name = "clock_gettime", 1156 .arg = { [0] = STRARRAY(clk_id, clockid), }, }, 1157 { .name = "clock_nanosleep", 1158 .arg = { [2] = SCA_TIMESPEC_FROM_USER(req), }, }, 1159 { .name = "clone", .errpid = true, .nr_args = 5, 1160 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, }, 1161 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, }, 1162 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, }, 1163 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, }, 1164 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, }, 1165 { .name = "close", 1166 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, }, 1167 { .name = "connect", 1168 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 1169 [1] = SCA_SOCKADDR_FROM_USER(servaddr), 1170 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 1171 { .name = "epoll_ctl", 1172 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, }, 1173 { .name = "eventfd2", 1174 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, }, 1175 { .name = "faccessat", 1176 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1177 [1] = SCA_FILENAME_FROM_USER(pathname), 1178 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 1179 { .name = "faccessat2", 1180 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1181 [1] = SCA_FILENAME_FROM_USER(pathname), 1182 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, 1183 [3] = { .scnprintf = SCA_FACCESSAT2_FLAGS, /* flags */ }, }, }, 1184 { .name = "fchmodat", 1185 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1186 { .name = "fchownat", 1187 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1188 { .name = "fcntl", 1189 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */ 1190 .strtoul = STUL_STRARRAYS, 1191 .parm = &strarrays__fcntl_cmds_arrays, 1192 .show_zero = true, }, 1193 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, }, 1194 { .name = "flock", 1195 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, }, 1196 { .name = "fsconfig", 1197 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, }, 1198 { .name = "fsmount", 1199 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags), 1200 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, }, 1201 { .name = "fspick", 1202 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1203 [1] = SCA_FILENAME_FROM_USER(path), 1204 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, }, 1205 { .name = "fstat", .alias = "newfstat", }, 1206 { .name = "futex", 1207 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, 1208 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, }, 1209 { .name = "futimesat", 1210 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1211 { .name = "getitimer", 1212 .arg = { [0] = STRARRAY(which, itimers), }, }, 1213 { .name = "getpid", .errpid = true, }, 1214 { .name = "getpgid", .errpid = true, }, 1215 { .name = "getppid", .errpid = true, }, 1216 { .name = "getrandom", 1217 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, }, 1218 { .name = "getrlimit", 1219 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 1220 { .name = "getsockopt", 1221 .arg = { [1] = STRARRAY(level, socket_level), }, }, 1222 { .name = "gettid", .errpid = true, }, 1223 { .name = "ioctl", 1224 .arg = { 1225 #if defined(__i386__) || defined(__x86_64__) 1226 /* 1227 * FIXME: Make this available to all arches. 1228 */ 1229 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ }, 1230 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 1231 #else 1232 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 1233 #endif 1234 { .name = "kcmp", .nr_args = 5, 1235 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, }, 1236 [1] = { .name = "pid2", .scnprintf = SCA_PID, }, 1237 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, }, 1238 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, }, 1239 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, }, 1240 { .name = "keyctl", 1241 .arg = { [0] = STRARRAY(option, keyctl_options), }, }, 1242 { .name = "kill", 1243 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1244 { .name = "linkat", 1245 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1246 { .name = "lseek", 1247 .arg = { [2] = STRARRAY(whence, whences), }, }, 1248 { .name = "lstat", .alias = "newlstat", }, 1249 { .name = "madvise", 1250 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1251 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, }, 1252 { .name = "mkdirat", 1253 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1254 { .name = "mknodat", 1255 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1256 { .name = "mmap", .hexret = true, 1257 /* The standard mmap maps to old_mmap on s390x */ 1258 #if defined(__s390x__) 1259 .alias = "old_mmap", 1260 #endif 1261 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, 1262 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ 1263 .strtoul = STUL_STRARRAY_FLAGS, 1264 .parm = &strarray__mmap_flags, }, 1265 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, }, 1266 { .name = "mount", 1267 .arg = { [0] = SCA_FILENAME_FROM_USER(devname), 1268 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */ 1269 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, }, 1270 { .name = "move_mount", 1271 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ }, 1272 [1] = SCA_FILENAME_FROM_USER(pathname), 1273 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ }, 1274 [3] = SCA_FILENAME_FROM_USER(pathname), 1275 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, }, 1276 { .name = "mprotect", 1277 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1278 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, }, }, 1279 { .name = "mq_unlink", 1280 .arg = { [0] = SCA_FILENAME_FROM_USER(u_name), }, }, 1281 { .name = "mremap", .hexret = true, 1282 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, }, 1283 { .name = "name_to_handle_at", 1284 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1285 { .name = "nanosleep", 1286 .arg = { [0] = SCA_TIMESPEC_FROM_USER(req), }, }, 1287 { .name = "newfstatat", .alias = "fstatat", 1288 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1289 [1] = SCA_FILENAME_FROM_USER(pathname), 1290 [3] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, 1291 { .name = "open", 1292 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1293 { .name = "open_by_handle_at", 1294 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1295 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1296 { .name = "openat", 1297 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1298 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1299 { .name = "perf_event_open", 1300 .arg = { [0] = SCA_PERF_ATTR_FROM_USER(attr), 1301 [2] = { .scnprintf = SCA_INT, /* cpu */ }, 1302 [3] = { .scnprintf = SCA_FD, /* group_fd */ }, 1303 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, }, 1304 { .name = "pipe2", 1305 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, }, 1306 { .name = "pkey_alloc", 1307 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, }, 1308 { .name = "pkey_free", 1309 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, }, 1310 { .name = "pkey_mprotect", 1311 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1312 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, 1313 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, }, 1314 { .name = "poll", .timeout = true, }, 1315 { .name = "ppoll", .timeout = true, }, 1316 { .name = "prctl", 1317 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ 1318 .strtoul = STUL_STRARRAY, 1319 .parm = &strarray__prctl_options, }, 1320 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ }, 1321 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, }, 1322 { .name = "pread", .alias = "pread64", }, 1323 { .name = "preadv", .alias = "pread", }, 1324 { .name = "prlimit64", 1325 .arg = { [1] = STRARRAY(resource, rlimit_resources), 1326 [2] = { .from_user = true /* new_rlim */, }, }, }, 1327 { .name = "pwrite", .alias = "pwrite64", }, 1328 { .name = "readlinkat", 1329 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1330 { .name = "recvfrom", 1331 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1332 { .name = "recvmmsg", 1333 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1334 { .name = "recvmsg", 1335 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1336 { .name = "renameat", 1337 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1338 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, }, 1339 { .name = "renameat2", 1340 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1341 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, 1342 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, }, 1343 { .name = "rseq", .errpid = true, 1344 .arg = { [0] = { .from_user = true /* rseq */, }, }, }, 1345 { .name = "rt_sigaction", 1346 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1347 { .name = "rt_sigprocmask", 1348 .arg = { [0] = STRARRAY(how, sighow), }, }, 1349 { .name = "rt_sigqueueinfo", 1350 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1351 { .name = "rt_tgsigqueueinfo", 1352 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1353 { .name = "sched_setscheduler", 1354 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, }, 1355 { .name = "seccomp", 1356 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ }, 1357 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, }, 1358 { .name = "select", .timeout = true, }, 1359 { .name = "sendfile", .alias = "sendfile64", }, 1360 { .name = "sendmmsg", 1361 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1362 { .name = "sendmsg", 1363 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1364 { .name = "sendto", 1365 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, 1366 [4] = SCA_SOCKADDR_FROM_USER(addr), }, }, 1367 { .name = "set_robust_list", .errpid = true, 1368 .arg = { [0] = { .from_user = true /* head */, }, }, }, 1369 { .name = "set_tid_address", .errpid = true, }, 1370 { .name = "setitimer", 1371 .arg = { [0] = STRARRAY(which, itimers), }, }, 1372 { .name = "setrlimit", 1373 .arg = { [0] = STRARRAY(resource, rlimit_resources), 1374 [1] = { .from_user = true /* rlim */, }, }, }, 1375 { .name = "setsockopt", 1376 .arg = { [1] = STRARRAY(level, socket_level), }, }, 1377 { .name = "socket", 1378 .arg = { [0] = STRARRAY(family, socket_families), 1379 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1380 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1381 { .name = "socketpair", 1382 .arg = { [0] = STRARRAY(family, socket_families), 1383 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1384 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1385 { .name = "stat", .alias = "newstat", }, 1386 { .name = "statx", 1387 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ }, 1388 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ } , 1389 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, }, 1390 { .name = "swapoff", 1391 .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, }, 1392 { .name = "swapon", 1393 .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, }, 1394 { .name = "symlinkat", 1395 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1396 { .name = "sync_file_range", 1397 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, }, 1398 { .name = "tgkill", 1399 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1400 { .name = "tkill", 1401 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1402 { .name = "umount2", .alias = "umount", 1403 .arg = { [0] = SCA_FILENAME_FROM_USER(name), }, }, 1404 { .name = "uname", .alias = "newuname", }, 1405 { .name = "unlinkat", 1406 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1407 [1] = SCA_FILENAME_FROM_USER(pathname), 1408 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, 1409 { .name = "utimensat", 1410 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, }, 1411 { .name = "wait4", .errpid = true, 1412 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1413 { .name = "waitid", .errpid = true, 1414 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1415 { .name = "write", 1416 .arg = { [1] = { .scnprintf = SCA_BUF /* buf */, .from_user = true, }, }, }, 1417 }; 1418 1419 static int syscall_fmt__cmp(const void *name, const void *fmtp) 1420 { 1421 const struct syscall_fmt *fmt = fmtp; 1422 return strcmp(name, fmt->name); 1423 } 1424 1425 static const struct syscall_fmt *__syscall_fmt__find(const struct syscall_fmt *fmts, 1426 const int nmemb, 1427 const char *name) 1428 { 1429 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); 1430 } 1431 1432 static const struct syscall_fmt *syscall_fmt__find(const char *name) 1433 { 1434 const int nmemb = ARRAY_SIZE(syscall_fmts); 1435 return __syscall_fmt__find(syscall_fmts, nmemb, name); 1436 } 1437 1438 static const struct syscall_fmt *__syscall_fmt__find_by_alias(const struct syscall_fmt *fmts, 1439 const int nmemb, const char *alias) 1440 { 1441 int i; 1442 1443 for (i = 0; i < nmemb; ++i) { 1444 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0) 1445 return &fmts[i]; 1446 } 1447 1448 return NULL; 1449 } 1450 1451 static const struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias) 1452 { 1453 const int nmemb = ARRAY_SIZE(syscall_fmts); 1454 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias); 1455 } 1456 1457 /* 1458 * is_exit: is this "exit" or "exit_group"? 1459 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter. 1460 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc. 1461 * nonexistent: Just a hole in the syscall table, syscall id not allocated 1462 */ 1463 struct syscall { 1464 struct tep_event *tp_format; 1465 int nr_args; 1466 int args_size; 1467 struct { 1468 struct bpf_program *sys_enter, 1469 *sys_exit; 1470 } bpf_prog; 1471 bool is_exit; 1472 bool is_open; 1473 bool nonexistent; 1474 bool use_btf; 1475 struct tep_format_field *args; 1476 const char *name; 1477 const struct syscall_fmt *fmt; 1478 struct syscall_arg_fmt *arg_fmt; 1479 }; 1480 1481 /* 1482 * We need to have this 'calculated' boolean because in some cases we really 1483 * don't know what is the duration of a syscall, for instance, when we start 1484 * a session and some threads are waiting for a syscall to finish, say 'poll', 1485 * in which case all we can do is to print "( ? ) for duration and for the 1486 * start timestamp. 1487 */ 1488 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp) 1489 { 1490 double duration = (double)t / NSEC_PER_MSEC; 1491 size_t printed = fprintf(fp, "("); 1492 1493 if (!calculated) 1494 printed += fprintf(fp, " "); 1495 else if (duration >= 1.0) 1496 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration); 1497 else if (duration >= 0.01) 1498 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration); 1499 else 1500 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration); 1501 return printed + fprintf(fp, "): "); 1502 } 1503 1504 /** 1505 * filename.ptr: The filename char pointer that will be vfs_getname'd 1506 * filename.entry_str_pos: Where to insert the string translated from 1507 * filename.ptr by the vfs_getname tracepoint/kprobe. 1508 * ret_scnprintf: syscall args may set this to a different syscall return 1509 * formatter, for instance, fcntl may return fds, file flags, etc. 1510 */ 1511 struct thread_trace { 1512 u64 entry_time; 1513 bool entry_pending; 1514 unsigned long nr_events; 1515 unsigned long pfmaj, pfmin; 1516 char *entry_str; 1517 double runtime_ms; 1518 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 1519 struct { 1520 unsigned long ptr; 1521 short int entry_str_pos; 1522 bool pending_open; 1523 unsigned int namelen; 1524 char *name; 1525 } filename; 1526 struct { 1527 int max; 1528 struct file *table; 1529 } files; 1530 1531 struct hashmap *syscall_stats; 1532 }; 1533 1534 static size_t syscall_id_hash(long key, void *ctx __maybe_unused) 1535 { 1536 return key; 1537 } 1538 1539 static bool syscall_id_equal(long key1, long key2, void *ctx __maybe_unused) 1540 { 1541 return key1 == key2; 1542 } 1543 1544 static struct hashmap *alloc_syscall_stats(void) 1545 { 1546 return hashmap__new(syscall_id_hash, syscall_id_equal, NULL); 1547 } 1548 1549 static void delete_syscall_stats(struct hashmap *syscall_stats) 1550 { 1551 struct hashmap_entry *pos; 1552 size_t bkt; 1553 1554 if (syscall_stats == NULL) 1555 return; 1556 1557 hashmap__for_each_entry(syscall_stats, pos, bkt) 1558 zfree(&pos->pvalue); 1559 hashmap__free(syscall_stats); 1560 } 1561 1562 static struct thread_trace *thread_trace__new(struct trace *trace) 1563 { 1564 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace)); 1565 1566 if (ttrace) { 1567 ttrace->files.max = -1; 1568 if (trace->summary) { 1569 ttrace->syscall_stats = alloc_syscall_stats(); 1570 if (IS_ERR(ttrace->syscall_stats)) 1571 zfree(&ttrace); 1572 } 1573 } 1574 1575 return ttrace; 1576 } 1577 1578 static void thread_trace__free_files(struct thread_trace *ttrace); 1579 1580 static void thread_trace__delete(void *pttrace) 1581 { 1582 struct thread_trace *ttrace = pttrace; 1583 1584 if (!ttrace) 1585 return; 1586 1587 delete_syscall_stats(ttrace->syscall_stats); 1588 ttrace->syscall_stats = NULL; 1589 thread_trace__free_files(ttrace); 1590 zfree(&ttrace->entry_str); 1591 free(ttrace); 1592 } 1593 1594 static struct thread_trace *thread__trace(struct thread *thread, struct trace *trace) 1595 { 1596 struct thread_trace *ttrace; 1597 1598 if (thread == NULL) 1599 goto fail; 1600 1601 if (thread__priv(thread) == NULL) 1602 thread__set_priv(thread, thread_trace__new(trace)); 1603 1604 if (thread__priv(thread) == NULL) 1605 goto fail; 1606 1607 ttrace = thread__priv(thread); 1608 ++ttrace->nr_events; 1609 1610 return ttrace; 1611 fail: 1612 color_fprintf(trace->output, PERF_COLOR_RED, 1613 "WARNING: not enough memory, dropping samples!\n"); 1614 return NULL; 1615 } 1616 1617 1618 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg, 1619 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg)) 1620 { 1621 struct thread_trace *ttrace = thread__priv(arg->thread); 1622 1623 ttrace->ret_scnprintf = ret_scnprintf; 1624 } 1625 1626 #define TRACE_PFMAJ (1 << 0) 1627 #define TRACE_PFMIN (1 << 1) 1628 1629 static const size_t trace__entry_str_size = 2048; 1630 1631 static void thread_trace__free_files(struct thread_trace *ttrace) 1632 { 1633 for (int i = 0; i < ttrace->files.max; ++i) { 1634 struct file *file = ttrace->files.table + i; 1635 zfree(&file->pathname); 1636 } 1637 1638 zfree(&ttrace->files.table); 1639 ttrace->files.max = -1; 1640 } 1641 1642 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd) 1643 { 1644 if (fd < 0) 1645 return NULL; 1646 1647 if (fd > ttrace->files.max) { 1648 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); 1649 1650 if (nfiles == NULL) 1651 return NULL; 1652 1653 if (ttrace->files.max != -1) { 1654 memset(nfiles + ttrace->files.max + 1, 0, 1655 (fd - ttrace->files.max) * sizeof(struct file)); 1656 } else { 1657 memset(nfiles, 0, (fd + 1) * sizeof(struct file)); 1658 } 1659 1660 ttrace->files.table = nfiles; 1661 ttrace->files.max = fd; 1662 } 1663 1664 return ttrace->files.table + fd; 1665 } 1666 1667 struct file *thread__files_entry(struct thread *thread, int fd) 1668 { 1669 return thread_trace__files_entry(thread__priv(thread), fd); 1670 } 1671 1672 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) 1673 { 1674 struct thread_trace *ttrace = thread__priv(thread); 1675 struct file *file = thread_trace__files_entry(ttrace, fd); 1676 1677 if (file != NULL) { 1678 struct stat st; 1679 if (stat(pathname, &st) == 0) 1680 file->dev_maj = major(st.st_rdev); 1681 file->pathname = strdup(pathname); 1682 if (file->pathname) 1683 return 0; 1684 } 1685 1686 return -1; 1687 } 1688 1689 static int thread__read_fd_path(struct thread *thread, int fd) 1690 { 1691 char linkname[PATH_MAX], pathname[PATH_MAX]; 1692 struct stat st; 1693 int ret; 1694 1695 if (thread__pid(thread) == thread__tid(thread)) { 1696 scnprintf(linkname, sizeof(linkname), 1697 "/proc/%d/fd/%d", thread__pid(thread), fd); 1698 } else { 1699 scnprintf(linkname, sizeof(linkname), 1700 "/proc/%d/task/%d/fd/%d", 1701 thread__pid(thread), thread__tid(thread), fd); 1702 } 1703 1704 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname)) 1705 return -1; 1706 1707 ret = readlink(linkname, pathname, sizeof(pathname)); 1708 1709 if (ret < 0 || ret > st.st_size) 1710 return -1; 1711 1712 pathname[ret] = '\0'; 1713 return trace__set_fd_pathname(thread, fd, pathname); 1714 } 1715 1716 static const char *thread__fd_path(struct thread *thread, int fd, 1717 struct trace *trace) 1718 { 1719 struct thread_trace *ttrace = thread__priv(thread); 1720 1721 if (ttrace == NULL || trace->fd_path_disabled) 1722 return NULL; 1723 1724 if (fd < 0) 1725 return NULL; 1726 1727 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { 1728 if (!trace->live) 1729 return NULL; 1730 ++trace->stats.proc_getname; 1731 if (thread__read_fd_path(thread, fd)) 1732 return NULL; 1733 } 1734 1735 return ttrace->files.table[fd].pathname; 1736 } 1737 1738 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg) 1739 { 1740 int fd = arg->val; 1741 size_t printed = scnprintf(bf, size, "%d", fd); 1742 const char *path = thread__fd_path(arg->thread, fd, arg->trace); 1743 1744 if (path) 1745 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1746 1747 return printed; 1748 } 1749 1750 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) 1751 { 1752 size_t printed = scnprintf(bf, size, "%d", fd); 1753 struct thread *thread = machine__find_thread(trace->host, pid, pid); 1754 1755 if (thread) { 1756 const char *path = thread__fd_path(thread, fd, trace); 1757 1758 if (path) 1759 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1760 1761 thread__put(thread); 1762 } 1763 1764 return printed; 1765 } 1766 1767 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 1768 struct syscall_arg *arg) 1769 { 1770 int fd = arg->val; 1771 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg); 1772 struct thread_trace *ttrace = thread__priv(arg->thread); 1773 1774 if (ttrace && fd >= 0 && fd <= ttrace->files.max) 1775 zfree(&ttrace->files.table[fd].pathname); 1776 1777 return printed; 1778 } 1779 1780 static void thread__set_filename_pos(struct thread *thread, const char *bf, 1781 unsigned long ptr) 1782 { 1783 struct thread_trace *ttrace = thread__priv(thread); 1784 1785 ttrace->filename.ptr = ptr; 1786 ttrace->filename.entry_str_pos = bf - ttrace->entry_str; 1787 } 1788 1789 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size) 1790 { 1791 struct augmented_arg *augmented_arg = arg->augmented.args; 1792 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value); 1793 /* 1794 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls 1795 * we would have two strings, each prefixed by its size. 1796 */ 1797 int consumed = sizeof(*augmented_arg) + augmented_arg->size; 1798 1799 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1800 arg->augmented.size -= consumed; 1801 1802 return printed; 1803 } 1804 1805 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 1806 struct syscall_arg *arg) 1807 { 1808 unsigned long ptr = arg->val; 1809 1810 if (arg->augmented.args) 1811 return syscall_arg__scnprintf_augmented_string(arg, bf, size); 1812 1813 if (!arg->trace->vfs_getname) 1814 return scnprintf(bf, size, "%#x", ptr); 1815 1816 thread__set_filename_pos(arg->thread, bf, ptr); 1817 return 0; 1818 } 1819 1820 #define MAX_CONTROL_CHAR 31 1821 #define MAX_ASCII 127 1822 1823 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg) 1824 { 1825 struct augmented_arg *augmented_arg = arg->augmented.args; 1826 unsigned char *orig = (unsigned char *)augmented_arg->value; 1827 size_t printed = 0; 1828 int consumed; 1829 1830 if (augmented_arg == NULL) 1831 return 0; 1832 1833 for (int j = 0; j < augmented_arg->size; ++j) { 1834 bool control_char = orig[j] <= MAX_CONTROL_CHAR || orig[j] >= MAX_ASCII; 1835 /* print control characters (0~31 and 127), and non-ascii characters in \(digits) */ 1836 printed += scnprintf(bf + printed, size - printed, control_char ? "\\%d" : "%c", (int)orig[j]); 1837 } 1838 1839 consumed = sizeof(*augmented_arg) + augmented_arg->size; 1840 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1841 arg->augmented.size -= consumed; 1842 1843 return printed; 1844 } 1845 1846 static bool trace__filter_duration(struct trace *trace, double t) 1847 { 1848 return t < (trace->duration_filter * NSEC_PER_MSEC); 1849 } 1850 1851 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1852 { 1853 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; 1854 1855 return fprintf(fp, "%10.3f ", ts); 1856 } 1857 1858 /* 1859 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are 1860 * using ttrace->entry_time for a thread that receives a sys_exit without 1861 * first having received a sys_enter ("poll" issued before tracing session 1862 * starts, lost sys_enter exit due to ring buffer overflow). 1863 */ 1864 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1865 { 1866 if (tstamp > 0) 1867 return __trace__fprintf_tstamp(trace, tstamp, fp); 1868 1869 return fprintf(fp, " ? "); 1870 } 1871 1872 static pid_t workload_pid = -1; 1873 static volatile sig_atomic_t done = false; 1874 static volatile sig_atomic_t interrupted = false; 1875 1876 static void sighandler_interrupt(int sig __maybe_unused) 1877 { 1878 done = interrupted = true; 1879 } 1880 1881 static void sighandler_chld(int sig __maybe_unused, siginfo_t *info, 1882 void *context __maybe_unused) 1883 { 1884 if (info->si_pid == workload_pid) 1885 done = true; 1886 } 1887 1888 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) 1889 { 1890 size_t printed = 0; 1891 1892 if (trace->multiple_threads) { 1893 if (trace->show_comm) 1894 printed += fprintf(fp, "%.14s/", thread__comm_str(thread)); 1895 printed += fprintf(fp, "%d ", thread__tid(thread)); 1896 } 1897 1898 return printed; 1899 } 1900 1901 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, 1902 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp) 1903 { 1904 size_t printed = 0; 1905 1906 if (trace->show_tstamp) 1907 printed = trace__fprintf_tstamp(trace, tstamp, fp); 1908 if (trace->show_duration) 1909 printed += fprintf_duration(duration, duration_calculated, fp); 1910 return printed + trace__fprintf_comm_tid(trace, thread, fp); 1911 } 1912 1913 static int trace__process_event(struct trace *trace, struct machine *machine, 1914 union perf_event *event, struct perf_sample *sample) 1915 { 1916 int ret = 0; 1917 1918 switch (event->header.type) { 1919 case PERF_RECORD_LOST: 1920 color_fprintf(trace->output, PERF_COLOR_RED, 1921 "LOST %" PRIu64 " events!\n", (u64)event->lost.lost); 1922 ret = machine__process_lost_event(machine, event, sample); 1923 break; 1924 default: 1925 ret = machine__process_event(machine, event, sample); 1926 break; 1927 } 1928 1929 return ret; 1930 } 1931 1932 static int trace__tool_process(const struct perf_tool *tool, 1933 union perf_event *event, 1934 struct perf_sample *sample, 1935 struct machine *machine) 1936 { 1937 struct trace *trace = container_of(tool, struct trace, tool); 1938 return trace__process_event(trace, machine, event, sample); 1939 } 1940 1941 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 1942 { 1943 struct machine *machine = vmachine; 1944 1945 if (machine->kptr_restrict_warned) 1946 return NULL; 1947 1948 if (symbol_conf.kptr_restrict) { 1949 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 1950 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n" 1951 "Kernel samples will not be resolved.\n"); 1952 machine->kptr_restrict_warned = true; 1953 return NULL; 1954 } 1955 1956 return machine__resolve_kernel_addr(vmachine, addrp, modp); 1957 } 1958 1959 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) 1960 { 1961 int err = symbol__init(NULL); 1962 1963 if (err) 1964 return err; 1965 1966 trace->host = machine__new_host(); 1967 if (trace->host == NULL) 1968 return -ENOMEM; 1969 1970 thread__set_priv_destructor(thread_trace__delete); 1971 1972 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); 1973 if (err < 0) 1974 goto out; 1975 1976 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1977 evlist->core.threads, trace__tool_process, 1978 true, false, 1); 1979 out: 1980 if (err) 1981 symbol__exit(); 1982 1983 return err; 1984 } 1985 1986 static void trace__symbols__exit(struct trace *trace) 1987 { 1988 machine__exit(trace->host); 1989 trace->host = NULL; 1990 1991 symbol__exit(); 1992 } 1993 1994 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) 1995 { 1996 int idx; 1997 1998 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) 1999 nr_args = sc->fmt->nr_args; 2000 2001 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); 2002 if (sc->arg_fmt == NULL) 2003 return -1; 2004 2005 for (idx = 0; idx < nr_args; ++idx) { 2006 if (sc->fmt) 2007 sc->arg_fmt[idx] = sc->fmt->arg[idx]; 2008 } 2009 2010 sc->nr_args = nr_args; 2011 return 0; 2012 } 2013 2014 static const struct syscall_arg_fmt syscall_arg_fmts__by_name[] = { 2015 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, }, 2016 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, }, 2017 }; 2018 2019 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp) 2020 { 2021 const struct syscall_arg_fmt *fmt = fmtp; 2022 return strcmp(name, fmt->name); 2023 } 2024 2025 static const struct syscall_arg_fmt * 2026 __syscall_arg_fmt__find_by_name(const struct syscall_arg_fmt *fmts, const int nmemb, 2027 const char *name) 2028 { 2029 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp); 2030 } 2031 2032 static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name) 2033 { 2034 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name); 2035 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name); 2036 } 2037 2038 static struct tep_format_field * 2039 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field, 2040 bool *use_btf) 2041 { 2042 struct tep_format_field *last_field = NULL; 2043 int len; 2044 2045 for (; field; field = field->next, ++arg) { 2046 last_field = field; 2047 2048 if (arg->scnprintf) 2049 continue; 2050 2051 len = strlen(field->name); 2052 2053 // As far as heuristics (or intention) goes this seems to hold true, and makes sense! 2054 if ((field->flags & TEP_FIELD_IS_POINTER) && strstarts(field->type, "const ")) 2055 arg->from_user = true; 2056 2057 if (strcmp(field->type, "const char *") == 0 && 2058 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) || 2059 strstr(field->name, "path") != NULL)) { 2060 arg->scnprintf = SCA_FILENAME; 2061 } else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr")) 2062 arg->scnprintf = SCA_PTR; 2063 else if (strcmp(field->type, "pid_t") == 0) 2064 arg->scnprintf = SCA_PID; 2065 else if (strcmp(field->type, "umode_t") == 0) 2066 arg->scnprintf = SCA_MODE_T; 2067 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) { 2068 arg->scnprintf = SCA_CHAR_ARRAY; 2069 arg->nr_entries = field->arraylen; 2070 } else if ((strcmp(field->type, "int") == 0 || 2071 strcmp(field->type, "unsigned int") == 0 || 2072 strcmp(field->type, "long") == 0) && 2073 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) { 2074 /* 2075 * /sys/kernel/tracing/events/syscalls/sys_enter* 2076 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c 2077 * 65 int 2078 * 23 unsigned int 2079 * 7 unsigned long 2080 */ 2081 arg->scnprintf = SCA_FD; 2082 } else if (strstr(field->type, "enum") && use_btf != NULL) { 2083 *use_btf = true; 2084 arg->strtoul = STUL_BTF_TYPE; 2085 } else { 2086 const struct syscall_arg_fmt *fmt = 2087 syscall_arg_fmt__find_by_name(field->name); 2088 2089 if (fmt) { 2090 arg->scnprintf = fmt->scnprintf; 2091 arg->strtoul = fmt->strtoul; 2092 } 2093 } 2094 } 2095 2096 return last_field; 2097 } 2098 2099 static int syscall__set_arg_fmts(struct syscall *sc) 2100 { 2101 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args, 2102 &sc->use_btf); 2103 2104 if (last_field) 2105 sc->args_size = last_field->offset + last_field->size; 2106 2107 return 0; 2108 } 2109 2110 static int trace__read_syscall_info(struct trace *trace, int id) 2111 { 2112 char tp_name[128]; 2113 struct syscall *sc; 2114 const char *name = syscalltbl__name(trace->sctbl, id); 2115 int err; 2116 2117 if (trace->syscalls.table == NULL) { 2118 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); 2119 if (trace->syscalls.table == NULL) 2120 return -ENOMEM; 2121 } 2122 sc = trace->syscalls.table + id; 2123 if (sc->nonexistent) 2124 return -EEXIST; 2125 2126 if (name == NULL) { 2127 sc->nonexistent = true; 2128 return -EEXIST; 2129 } 2130 2131 sc->name = name; 2132 sc->fmt = syscall_fmt__find(sc->name); 2133 2134 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 2135 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 2136 2137 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) { 2138 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); 2139 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 2140 } 2141 2142 /* 2143 * Fails to read trace point format via sysfs node, so the trace point 2144 * doesn't exist. Set the 'nonexistent' flag as true. 2145 */ 2146 if (IS_ERR(sc->tp_format)) { 2147 sc->nonexistent = true; 2148 return PTR_ERR(sc->tp_format); 2149 } 2150 2151 /* 2152 * The tracepoint format contains __syscall_nr field, so it's one more 2153 * than the actual number of syscall arguments. 2154 */ 2155 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 2156 RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields - 1)) 2157 return -ENOMEM; 2158 2159 sc->args = sc->tp_format->format.fields; 2160 /* 2161 * We need to check and discard the first variable '__syscall_nr' 2162 * or 'nr' that mean the syscall number. It is needless here. 2163 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels. 2164 */ 2165 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) { 2166 sc->args = sc->args->next; 2167 --sc->nr_args; 2168 } 2169 2170 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit"); 2171 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat"); 2172 2173 err = syscall__set_arg_fmts(sc); 2174 2175 /* after calling syscall__set_arg_fmts() we'll know whether use_btf is true */ 2176 if (sc->use_btf) 2177 trace__load_vmlinux_btf(trace); 2178 2179 return err; 2180 } 2181 2182 static int evsel__init_tp_arg_scnprintf(struct evsel *evsel, bool *use_btf) 2183 { 2184 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 2185 2186 if (fmt != NULL) { 2187 const struct tep_event *tp_format = evsel__tp_format(evsel); 2188 2189 if (tp_format) { 2190 syscall_arg_fmt__init_array(fmt, tp_format->format.fields, use_btf); 2191 return 0; 2192 } 2193 } 2194 2195 return -ENOMEM; 2196 } 2197 2198 static int intcmp(const void *a, const void *b) 2199 { 2200 const int *one = a, *another = b; 2201 2202 return *one - *another; 2203 } 2204 2205 static int trace__validate_ev_qualifier(struct trace *trace) 2206 { 2207 int err = 0; 2208 bool printed_invalid_prefix = false; 2209 struct str_node *pos; 2210 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); 2211 2212 trace->ev_qualifier_ids.entries = malloc(nr_allocated * 2213 sizeof(trace->ev_qualifier_ids.entries[0])); 2214 2215 if (trace->ev_qualifier_ids.entries == NULL) { 2216 fputs("Error:\tNot enough memory for allocating events qualifier ids\n", 2217 trace->output); 2218 err = -EINVAL; 2219 goto out; 2220 } 2221 2222 strlist__for_each_entry(pos, trace->ev_qualifier) { 2223 const char *sc = pos->s; 2224 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; 2225 2226 if (id < 0) { 2227 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); 2228 if (id >= 0) 2229 goto matches; 2230 2231 if (!printed_invalid_prefix) { 2232 pr_debug("Skipping unknown syscalls: "); 2233 printed_invalid_prefix = true; 2234 } else { 2235 pr_debug(", "); 2236 } 2237 2238 pr_debug("%s", sc); 2239 continue; 2240 } 2241 matches: 2242 trace->ev_qualifier_ids.entries[nr_used++] = id; 2243 if (match_next == -1) 2244 continue; 2245 2246 while (1) { 2247 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); 2248 if (id < 0) 2249 break; 2250 if (nr_allocated == nr_used) { 2251 void *entries; 2252 2253 nr_allocated += 8; 2254 entries = realloc(trace->ev_qualifier_ids.entries, 2255 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); 2256 if (entries == NULL) { 2257 err = -ENOMEM; 2258 fputs("\nError:\t Not enough memory for parsing\n", trace->output); 2259 goto out_free; 2260 } 2261 trace->ev_qualifier_ids.entries = entries; 2262 } 2263 trace->ev_qualifier_ids.entries[nr_used++] = id; 2264 } 2265 } 2266 2267 trace->ev_qualifier_ids.nr = nr_used; 2268 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); 2269 out: 2270 if (printed_invalid_prefix) 2271 pr_debug("\n"); 2272 return err; 2273 out_free: 2274 zfree(&trace->ev_qualifier_ids.entries); 2275 trace->ev_qualifier_ids.nr = 0; 2276 goto out; 2277 } 2278 2279 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) 2280 { 2281 bool in_ev_qualifier; 2282 2283 if (trace->ev_qualifier_ids.nr == 0) 2284 return true; 2285 2286 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, 2287 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; 2288 2289 if (in_ev_qualifier) 2290 return !trace->not_ev_qualifier; 2291 2292 return trace->not_ev_qualifier; 2293 } 2294 2295 /* 2296 * args is to be interpreted as a series of longs but we need to handle 2297 * 8-byte unaligned accesses. args points to raw_data within the event 2298 * and raw_data is guaranteed to be 8-byte unaligned because it is 2299 * preceded by raw_size which is a u32. So we need to copy args to a temp 2300 * variable to read it. Most notably this avoids extended load instructions 2301 * on unaligned addresses 2302 */ 2303 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx) 2304 { 2305 unsigned long val; 2306 unsigned char *p = arg->args + sizeof(unsigned long) * idx; 2307 2308 memcpy(&val, p, sizeof(val)); 2309 return val; 2310 } 2311 2312 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size, 2313 struct syscall_arg *arg) 2314 { 2315 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name) 2316 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name); 2317 2318 return scnprintf(bf, size, "arg%d: ", arg->idx); 2319 } 2320 2321 /* 2322 * Check if the value is in fact zero, i.e. mask whatever needs masking, such 2323 * as mount 'flags' argument that needs ignoring some magic flag, see comment 2324 * in tools/perf/trace/beauty/mount_flags.c 2325 */ 2326 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val) 2327 { 2328 if (fmt && fmt->mask_val) 2329 return fmt->mask_val(arg, val); 2330 2331 return val; 2332 } 2333 2334 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size, 2335 struct syscall_arg *arg, unsigned long val) 2336 { 2337 if (fmt && fmt->scnprintf) { 2338 arg->val = val; 2339 if (fmt->parm) 2340 arg->parm = fmt->parm; 2341 return fmt->scnprintf(bf, size, arg); 2342 } 2343 return scnprintf(bf, size, "%ld", val); 2344 } 2345 2346 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, 2347 unsigned char *args, void *augmented_args, int augmented_args_size, 2348 struct trace *trace, struct thread *thread) 2349 { 2350 size_t printed = 0, btf_printed; 2351 unsigned long val; 2352 u8 bit = 1; 2353 struct syscall_arg arg = { 2354 .args = args, 2355 .augmented = { 2356 .size = augmented_args_size, 2357 .args = augmented_args, 2358 }, 2359 .idx = 0, 2360 .mask = 0, 2361 .trace = trace, 2362 .thread = thread, 2363 .show_string_prefix = trace->show_string_prefix, 2364 }; 2365 struct thread_trace *ttrace = thread__priv(thread); 2366 void *default_scnprintf; 2367 2368 /* 2369 * Things like fcntl will set this in its 'cmd' formatter to pick the 2370 * right formatter for the return value (an fd? file flags?), which is 2371 * not needed for syscalls that always return a given type, say an fd. 2372 */ 2373 ttrace->ret_scnprintf = NULL; 2374 2375 if (sc->args != NULL) { 2376 struct tep_format_field *field; 2377 2378 for (field = sc->args; field; 2379 field = field->next, ++arg.idx, bit <<= 1) { 2380 if (arg.mask & bit) 2381 continue; 2382 2383 arg.fmt = &sc->arg_fmt[arg.idx]; 2384 val = syscall_arg__val(&arg, arg.idx); 2385 /* 2386 * Some syscall args need some mask, most don't and 2387 * return val untouched. 2388 */ 2389 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val); 2390 2391 /* 2392 * Suppress this argument if its value is zero and show_zero 2393 * property isn't set. 2394 * 2395 * If it has a BTF type, then override the zero suppression knob 2396 * as the common case is for zero in an enum to have an associated entry. 2397 */ 2398 if (val == 0 && !trace->show_zeros && 2399 !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero) && 2400 !(sc->arg_fmt && sc->arg_fmt[arg.idx].strtoul == STUL_BTF_TYPE)) 2401 continue; 2402 2403 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2404 2405 if (trace->show_arg_names) 2406 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2407 2408 default_scnprintf = sc->arg_fmt[arg.idx].scnprintf; 2409 2410 if (trace->force_btf || default_scnprintf == NULL || default_scnprintf == SCA_PTR) { 2411 btf_printed = trace__btf_scnprintf(trace, &arg, bf + printed, 2412 size - printed, val, field->type); 2413 if (btf_printed) { 2414 printed += btf_printed; 2415 continue; 2416 } 2417 } 2418 2419 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], 2420 bf + printed, size - printed, &arg, val); 2421 } 2422 } else if (IS_ERR(sc->tp_format)) { 2423 /* 2424 * If we managed to read the tracepoint /format file, then we 2425 * may end up not having any args, like with gettid(), so only 2426 * print the raw args when we didn't manage to read it. 2427 */ 2428 while (arg.idx < sc->nr_args) { 2429 if (arg.mask & bit) 2430 goto next_arg; 2431 val = syscall_arg__val(&arg, arg.idx); 2432 if (printed) 2433 printed += scnprintf(bf + printed, size - printed, ", "); 2434 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg); 2435 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val); 2436 next_arg: 2437 ++arg.idx; 2438 bit <<= 1; 2439 } 2440 } 2441 2442 return printed; 2443 } 2444 2445 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel, 2446 union perf_event *event, 2447 struct perf_sample *sample); 2448 2449 static struct syscall *trace__syscall_info(struct trace *trace, 2450 struct evsel *evsel, int id) 2451 { 2452 int err = 0; 2453 2454 if (id < 0) { 2455 2456 /* 2457 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried 2458 * before that, leaving at a higher verbosity level till that is 2459 * explained. Reproduced with plain ftrace with: 2460 * 2461 * echo 1 > /t/events/raw_syscalls/sys_exit/enable 2462 * grep "NR -1 " /t/trace_pipe 2463 * 2464 * After generating some load on the machine. 2465 */ 2466 if (verbose > 1) { 2467 static u64 n; 2468 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", 2469 id, evsel__name(evsel), ++n); 2470 } 2471 return NULL; 2472 } 2473 2474 err = -EINVAL; 2475 2476 if (id > trace->sctbl->syscalls.max_id) { 2477 goto out_cant_read; 2478 } 2479 2480 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) && 2481 (err = trace__read_syscall_info(trace, id)) != 0) 2482 goto out_cant_read; 2483 2484 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent) 2485 goto out_cant_read; 2486 2487 return &trace->syscalls.table[id]; 2488 2489 out_cant_read: 2490 if (verbose > 0) { 2491 char sbuf[STRERR_BUFSIZE]; 2492 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf))); 2493 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL) 2494 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); 2495 fputs(" information\n", trace->output); 2496 } 2497 return NULL; 2498 } 2499 2500 struct syscall_stats { 2501 struct stats stats; 2502 u64 nr_failures; 2503 int max_errno; 2504 u32 *errnos; 2505 }; 2506 2507 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace, 2508 int id, struct perf_sample *sample, long err, 2509 struct trace *trace) 2510 { 2511 struct hashmap *syscall_stats = ttrace->syscall_stats; 2512 struct syscall_stats *stats = NULL; 2513 u64 duration = 0; 2514 2515 if (trace->summary_mode == SUMMARY__BY_TOTAL) 2516 syscall_stats = trace->syscall_stats; 2517 2518 if (!hashmap__find(syscall_stats, id, &stats)) { 2519 stats = zalloc(sizeof(*stats)); 2520 if (stats == NULL) 2521 return; 2522 2523 init_stats(&stats->stats); 2524 if (hashmap__add(syscall_stats, id, stats) < 0) { 2525 free(stats); 2526 return; 2527 } 2528 } 2529 2530 if (ttrace->entry_time && sample->time > ttrace->entry_time) 2531 duration = sample->time - ttrace->entry_time; 2532 2533 update_stats(&stats->stats, duration); 2534 2535 if (err < 0) { 2536 ++stats->nr_failures; 2537 2538 if (!trace->errno_summary) 2539 return; 2540 2541 err = -err; 2542 if (err > stats->max_errno) { 2543 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32)); 2544 2545 if (new_errnos) { 2546 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32)); 2547 } else { 2548 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n", 2549 thread__comm_str(thread), thread__pid(thread), 2550 thread__tid(thread)); 2551 return; 2552 } 2553 2554 stats->errnos = new_errnos; 2555 stats->max_errno = err; 2556 } 2557 2558 ++stats->errnos[err - 1]; 2559 } 2560 } 2561 2562 static int trace__printf_interrupted_entry(struct trace *trace) 2563 { 2564 struct thread_trace *ttrace; 2565 size_t printed; 2566 int len; 2567 2568 if (trace->failure_only || trace->current == NULL) 2569 return 0; 2570 2571 ttrace = thread__priv(trace->current); 2572 2573 if (!ttrace->entry_pending) 2574 return 0; 2575 2576 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); 2577 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); 2578 2579 if (len < trace->args_alignment - 4) 2580 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); 2581 2582 printed += fprintf(trace->output, " ...\n"); 2583 2584 ttrace->entry_pending = false; 2585 ++trace->nr_events_printed; 2586 2587 return printed; 2588 } 2589 2590 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, 2591 struct perf_sample *sample, struct thread *thread) 2592 { 2593 int printed = 0; 2594 2595 if (trace->print_sample) { 2596 double ts = (double)sample->time / NSEC_PER_MSEC; 2597 2598 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n", 2599 evsel__name(evsel), ts, 2600 thread__comm_str(thread), 2601 sample->pid, sample->tid, sample->cpu); 2602 } 2603 2604 return printed; 2605 } 2606 2607 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size) 2608 { 2609 /* 2610 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter 2611 * and there we get all 6 syscall args plus the tracepoint common fields 2612 * that gets calculated at the start and the syscall_nr (another long). 2613 * So we check if that is the case and if so don't look after the 2614 * sc->args_size but always after the full raw_syscalls:sys_enter payload, 2615 * which is fixed. 2616 * 2617 * We'll revisit this later to pass s->args_size to the BPF augmenter 2618 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it 2619 * copies only what we need for each syscall, like what happens when we 2620 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace 2621 * traffic to just what is needed for each syscall. 2622 */ 2623 int args_size = raw_augmented_args_size ?: sc->args_size; 2624 2625 *augmented_args_size = sample->raw_size - args_size; 2626 if (*augmented_args_size > 0) { 2627 static uintptr_t argbuf[1024]; /* assuming single-threaded */ 2628 2629 if ((size_t)(*augmented_args_size) > sizeof(argbuf)) 2630 return NULL; 2631 2632 /* 2633 * The perf ring-buffer is 8-byte aligned but sample->raw_data 2634 * is not because it's preceded by u32 size. Later, beautifier 2635 * will use the augmented args with stricter alignments like in 2636 * some struct. To make sure it's aligned, let's copy the args 2637 * into a static buffer as it's single-threaded for now. 2638 */ 2639 memcpy(argbuf, sample->raw_data + args_size, *augmented_args_size); 2640 2641 return argbuf; 2642 } 2643 return NULL; 2644 } 2645 2646 static void syscall__exit(struct syscall *sc) 2647 { 2648 if (!sc) 2649 return; 2650 2651 zfree(&sc->arg_fmt); 2652 } 2653 2654 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, 2655 union perf_event *event __maybe_unused, 2656 struct perf_sample *sample) 2657 { 2658 char *msg; 2659 void *args; 2660 int printed = 0; 2661 struct thread *thread; 2662 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2663 int augmented_args_size = 0; 2664 void *augmented_args = NULL; 2665 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2666 struct thread_trace *ttrace; 2667 2668 if (sc == NULL) 2669 return -1; 2670 2671 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2672 ttrace = thread__trace(thread, trace); 2673 if (ttrace == NULL) 2674 goto out_put; 2675 2676 trace__fprintf_sample(trace, evsel, sample, thread); 2677 2678 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2679 2680 if (ttrace->entry_str == NULL) { 2681 ttrace->entry_str = malloc(trace__entry_str_size); 2682 if (!ttrace->entry_str) 2683 goto out_put; 2684 } 2685 2686 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) 2687 trace__printf_interrupted_entry(trace); 2688 /* 2689 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible 2690 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments 2691 * this breaks syscall__augmented_args() check for augmented args, as we calculate 2692 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file, 2693 * so when handling, say the openat syscall, we end up getting 6 args for the 2694 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly 2695 * thinking that the extra 2 u64 args are the augmented filename, so just check 2696 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one. 2697 */ 2698 if (evsel != trace->syscalls.events.sys_enter) 2699 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2700 ttrace->entry_time = sample->time; 2701 msg = ttrace->entry_str; 2702 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); 2703 2704 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed, 2705 args, augmented_args, augmented_args_size, trace, thread); 2706 2707 if (sc->is_exit) { 2708 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { 2709 int alignment = 0; 2710 2711 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); 2712 printed = fprintf(trace->output, "%s)", ttrace->entry_str); 2713 if (trace->args_alignment > printed) 2714 alignment = trace->args_alignment - printed; 2715 fprintf(trace->output, "%*s= ?\n", alignment, " "); 2716 } 2717 } else { 2718 ttrace->entry_pending = true; 2719 /* See trace__vfs_getname & trace__sys_exit */ 2720 ttrace->filename.pending_open = false; 2721 } 2722 2723 if (trace->current != thread) { 2724 thread__put(trace->current); 2725 trace->current = thread__get(thread); 2726 } 2727 err = 0; 2728 out_put: 2729 thread__put(thread); 2730 return err; 2731 } 2732 2733 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, 2734 struct perf_sample *sample) 2735 { 2736 struct thread_trace *ttrace; 2737 struct thread *thread; 2738 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2739 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2740 char msg[1024]; 2741 void *args, *augmented_args = NULL; 2742 int augmented_args_size; 2743 size_t printed = 0; 2744 2745 if (sc == NULL) 2746 return -1; 2747 2748 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2749 ttrace = thread__trace(thread, trace); 2750 /* 2751 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args() 2752 * and the rest of the beautifiers accessing it via struct syscall_arg touches it. 2753 */ 2754 if (ttrace == NULL) 2755 goto out_put; 2756 2757 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2758 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2759 printed += syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); 2760 fprintf(trace->output, "%.*s", (int)printed, msg); 2761 err = 0; 2762 out_put: 2763 thread__put(thread); 2764 return err; 2765 } 2766 2767 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, 2768 struct perf_sample *sample, 2769 struct callchain_cursor *cursor) 2770 { 2771 struct addr_location al; 2772 int max_stack = evsel->core.attr.sample_max_stack ? 2773 evsel->core.attr.sample_max_stack : 2774 trace->max_stack; 2775 int err = -1; 2776 2777 addr_location__init(&al); 2778 if (machine__resolve(trace->host, &al, sample) < 0) 2779 goto out; 2780 2781 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack); 2782 out: 2783 addr_location__exit(&al); 2784 return err; 2785 } 2786 2787 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) 2788 { 2789 /* TODO: user-configurable print_opts */ 2790 const unsigned int print_opts = EVSEL__PRINT_SYM | 2791 EVSEL__PRINT_DSO | 2792 EVSEL__PRINT_UNKNOWN_AS_ADDR; 2793 2794 return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output); 2795 } 2796 2797 static const char *errno_to_name(struct evsel *evsel, int err) 2798 { 2799 struct perf_env *env = evsel__env(evsel); 2800 2801 return perf_env__arch_strerrno(env, err); 2802 } 2803 2804 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, 2805 union perf_event *event __maybe_unused, 2806 struct perf_sample *sample) 2807 { 2808 long ret; 2809 u64 duration = 0; 2810 bool duration_calculated = false; 2811 struct thread *thread; 2812 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0; 2813 int alignment = trace->args_alignment; 2814 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2815 struct thread_trace *ttrace; 2816 2817 if (sc == NULL) 2818 return -1; 2819 2820 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2821 ttrace = thread__trace(thread, trace); 2822 if (ttrace == NULL) 2823 goto out_put; 2824 2825 trace__fprintf_sample(trace, evsel, sample, thread); 2826 2827 ret = perf_evsel__sc_tp_uint(evsel, ret, sample); 2828 2829 if (trace->summary) 2830 thread__update_stats(thread, ttrace, id, sample, ret, trace); 2831 2832 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) { 2833 trace__set_fd_pathname(thread, ret, ttrace->filename.name); 2834 ttrace->filename.pending_open = false; 2835 ++trace->stats.vfs_getname; 2836 } 2837 2838 if (ttrace->entry_time) { 2839 duration = sample->time - ttrace->entry_time; 2840 if (trace__filter_duration(trace, duration)) 2841 goto out; 2842 duration_calculated = true; 2843 } else if (trace->duration_filter) 2844 goto out; 2845 2846 if (sample->callchain) { 2847 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 2848 2849 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 2850 if (callchain_ret == 0) { 2851 if (cursor->nr < trace->min_stack) 2852 goto out; 2853 callchain_ret = 1; 2854 } 2855 } 2856 2857 if (trace->summary_only || (ret >= 0 && trace->failure_only)) 2858 goto out; 2859 2860 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output); 2861 2862 if (ttrace->entry_pending) { 2863 printed = fprintf(trace->output, "%s", ttrace->entry_str); 2864 } else { 2865 printed += fprintf(trace->output, " ... ["); 2866 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 2867 printed += 9; 2868 printed += fprintf(trace->output, "]: %s()", sc->name); 2869 } 2870 2871 printed++; /* the closing ')' */ 2872 2873 if (alignment > printed) 2874 alignment -= printed; 2875 else 2876 alignment = 0; 2877 2878 fprintf(trace->output, ")%*s= ", alignment, " "); 2879 2880 if (sc->fmt == NULL) { 2881 if (ret < 0) 2882 goto errno_print; 2883 signed_print: 2884 fprintf(trace->output, "%ld", ret); 2885 } else if (ret < 0) { 2886 errno_print: { 2887 char bf[STRERR_BUFSIZE]; 2888 const char *emsg = str_error_r(-ret, bf, sizeof(bf)), 2889 *e = errno_to_name(evsel, -ret); 2890 2891 fprintf(trace->output, "-1 %s (%s)", e, emsg); 2892 } 2893 } else if (ret == 0 && sc->fmt->timeout) 2894 fprintf(trace->output, "0 (Timeout)"); 2895 else if (ttrace->ret_scnprintf) { 2896 char bf[1024]; 2897 struct syscall_arg arg = { 2898 .val = ret, 2899 .thread = thread, 2900 .trace = trace, 2901 }; 2902 ttrace->ret_scnprintf(bf, sizeof(bf), &arg); 2903 ttrace->ret_scnprintf = NULL; 2904 fprintf(trace->output, "%s", bf); 2905 } else if (sc->fmt->hexret) 2906 fprintf(trace->output, "%#lx", ret); 2907 else if (sc->fmt->errpid) { 2908 struct thread *child = machine__find_thread(trace->host, ret, ret); 2909 2910 if (child != NULL) { 2911 fprintf(trace->output, "%ld", ret); 2912 if (thread__comm_set(child)) 2913 fprintf(trace->output, " (%s)", thread__comm_str(child)); 2914 thread__put(child); 2915 } 2916 } else 2917 goto signed_print; 2918 2919 fputc('\n', trace->output); 2920 2921 /* 2922 * We only consider an 'event' for the sake of --max-events a non-filtered 2923 * sys_enter + sys_exit and other tracepoint events. 2924 */ 2925 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) 2926 interrupted = true; 2927 2928 if (callchain_ret > 0) 2929 trace__fprintf_callchain(trace, sample); 2930 else if (callchain_ret < 0) 2931 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 2932 out: 2933 ttrace->entry_pending = false; 2934 err = 0; 2935 out_put: 2936 thread__put(thread); 2937 return err; 2938 } 2939 2940 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, 2941 union perf_event *event __maybe_unused, 2942 struct perf_sample *sample) 2943 { 2944 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2945 struct thread_trace *ttrace; 2946 size_t filename_len, entry_str_len, to_move; 2947 ssize_t remaining_space; 2948 char *pos; 2949 const char *filename = evsel__rawptr(evsel, sample, "pathname"); 2950 2951 if (!thread) 2952 goto out; 2953 2954 ttrace = thread__priv(thread); 2955 if (!ttrace) 2956 goto out_put; 2957 2958 filename_len = strlen(filename); 2959 if (filename_len == 0) 2960 goto out_put; 2961 2962 if (ttrace->filename.namelen < filename_len) { 2963 char *f = realloc(ttrace->filename.name, filename_len + 1); 2964 2965 if (f == NULL) 2966 goto out_put; 2967 2968 ttrace->filename.namelen = filename_len; 2969 ttrace->filename.name = f; 2970 } 2971 2972 strcpy(ttrace->filename.name, filename); 2973 ttrace->filename.pending_open = true; 2974 2975 if (!ttrace->filename.ptr) 2976 goto out_put; 2977 2978 entry_str_len = strlen(ttrace->entry_str); 2979 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */ 2980 if (remaining_space <= 0) 2981 goto out_put; 2982 2983 if (filename_len > (size_t)remaining_space) { 2984 filename += filename_len - remaining_space; 2985 filename_len = remaining_space; 2986 } 2987 2988 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */ 2989 pos = ttrace->entry_str + ttrace->filename.entry_str_pos; 2990 memmove(pos + filename_len, pos, to_move); 2991 memcpy(pos, filename, filename_len); 2992 2993 ttrace->filename.ptr = 0; 2994 ttrace->filename.entry_str_pos = 0; 2995 out_put: 2996 thread__put(thread); 2997 out: 2998 return 0; 2999 } 3000 3001 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, 3002 union perf_event *event __maybe_unused, 3003 struct perf_sample *sample) 3004 { 3005 u64 runtime = evsel__intval(evsel, sample, "runtime"); 3006 double runtime_ms = (double)runtime / NSEC_PER_MSEC; 3007 struct thread *thread = machine__findnew_thread(trace->host, 3008 sample->pid, 3009 sample->tid); 3010 struct thread_trace *ttrace = thread__trace(thread, trace); 3011 3012 if (ttrace == NULL) 3013 goto out_dump; 3014 3015 ttrace->runtime_ms += runtime_ms; 3016 trace->runtime_ms += runtime_ms; 3017 out_put: 3018 thread__put(thread); 3019 return 0; 3020 3021 out_dump: 3022 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", 3023 evsel->name, 3024 evsel__strval(evsel, sample, "comm"), 3025 (pid_t)evsel__intval(evsel, sample, "pid"), 3026 runtime, 3027 evsel__intval(evsel, sample, "vruntime")); 3028 goto out_put; 3029 } 3030 3031 static int bpf_output__printer(enum binary_printer_ops op, 3032 unsigned int val, void *extra __maybe_unused, FILE *fp) 3033 { 3034 unsigned char ch = (unsigned char)val; 3035 3036 switch (op) { 3037 case BINARY_PRINT_CHAR_DATA: 3038 return fprintf(fp, "%c", isprint(ch) ? ch : '.'); 3039 case BINARY_PRINT_DATA_BEGIN: 3040 case BINARY_PRINT_LINE_BEGIN: 3041 case BINARY_PRINT_ADDR: 3042 case BINARY_PRINT_NUM_DATA: 3043 case BINARY_PRINT_NUM_PAD: 3044 case BINARY_PRINT_SEP: 3045 case BINARY_PRINT_CHAR_PAD: 3046 case BINARY_PRINT_LINE_END: 3047 case BINARY_PRINT_DATA_END: 3048 default: 3049 break; 3050 } 3051 3052 return 0; 3053 } 3054 3055 static void bpf_output__fprintf(struct trace *trace, 3056 struct perf_sample *sample) 3057 { 3058 binary__fprintf(sample->raw_data, sample->raw_size, 8, 3059 bpf_output__printer, NULL, trace->output); 3060 ++trace->nr_events_printed; 3061 } 3062 3063 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample, 3064 struct thread *thread, void *augmented_args, int augmented_args_size) 3065 { 3066 char bf[2048]; 3067 size_t size = sizeof(bf); 3068 const struct tep_event *tp_format = evsel__tp_format(evsel); 3069 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL; 3070 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel); 3071 size_t printed = 0, btf_printed; 3072 unsigned long val; 3073 u8 bit = 1; 3074 struct syscall_arg syscall_arg = { 3075 .augmented = { 3076 .size = augmented_args_size, 3077 .args = augmented_args, 3078 }, 3079 .idx = 0, 3080 .mask = 0, 3081 .trace = trace, 3082 .thread = thread, 3083 .show_string_prefix = trace->show_string_prefix, 3084 }; 3085 3086 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) { 3087 if (syscall_arg.mask & bit) 3088 continue; 3089 3090 syscall_arg.len = 0; 3091 syscall_arg.fmt = arg; 3092 if (field->flags & TEP_FIELD_IS_ARRAY) { 3093 int offset = field->offset; 3094 3095 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 3096 offset = format_field__intval(field, sample, evsel->needs_swap); 3097 syscall_arg.len = offset >> 16; 3098 offset &= 0xffff; 3099 if (tep_field_is_relative(field->flags)) 3100 offset += field->offset + field->size; 3101 } 3102 3103 val = (uintptr_t)(sample->raw_data + offset); 3104 } else 3105 val = format_field__intval(field, sample, evsel->needs_swap); 3106 /* 3107 * Some syscall args need some mask, most don't and 3108 * return val untouched. 3109 */ 3110 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val); 3111 3112 /* Suppress this argument if its value is zero and show_zero property isn't set. */ 3113 if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE) 3114 continue; 3115 3116 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 3117 3118 if (trace->show_arg_names) 3119 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 3120 3121 btf_printed = trace__btf_scnprintf(trace, &syscall_arg, bf + printed, size - printed, val, field->type); 3122 if (btf_printed) { 3123 printed += btf_printed; 3124 continue; 3125 } 3126 3127 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val); 3128 } 3129 3130 return fprintf(trace->output, "%.*s", (int)printed, bf); 3131 } 3132 3133 static int trace__event_handler(struct trace *trace, struct evsel *evsel, 3134 union perf_event *event __maybe_unused, 3135 struct perf_sample *sample) 3136 { 3137 struct thread *thread; 3138 int callchain_ret = 0; 3139 3140 if (evsel->nr_events_printed >= evsel->max_events) 3141 return 0; 3142 3143 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3144 3145 if (sample->callchain) { 3146 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 3147 3148 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 3149 if (callchain_ret == 0) { 3150 if (cursor->nr < trace->min_stack) 3151 goto out; 3152 callchain_ret = 1; 3153 } 3154 } 3155 3156 trace__printf_interrupted_entry(trace); 3157 trace__fprintf_tstamp(trace, sample->time, trace->output); 3158 3159 if (trace->trace_syscalls && trace->show_duration) 3160 fprintf(trace->output, "( ): "); 3161 3162 if (thread) 3163 trace__fprintf_comm_tid(trace, thread, trace->output); 3164 3165 if (evsel == trace->syscalls.events.bpf_output) { 3166 int id = perf_evsel__sc_tp_uint(evsel, id, sample); 3167 struct syscall *sc = trace__syscall_info(trace, evsel, id); 3168 3169 if (sc) { 3170 fprintf(trace->output, "%s(", sc->name); 3171 trace__fprintf_sys_enter(trace, evsel, sample); 3172 fputc(')', trace->output); 3173 goto newline; 3174 } 3175 3176 /* 3177 * XXX: Not having the associated syscall info or not finding/adding 3178 * the thread should never happen, but if it does... 3179 * fall thru and print it as a bpf_output event. 3180 */ 3181 } 3182 3183 fprintf(trace->output, "%s(", evsel->name); 3184 3185 if (evsel__is_bpf_output(evsel)) { 3186 bpf_output__fprintf(trace, sample); 3187 } else { 3188 const struct tep_event *tp_format = evsel__tp_format(evsel); 3189 3190 if (tp_format && (strncmp(tp_format->name, "sys_enter_", 10) || 3191 trace__fprintf_sys_enter(trace, evsel, sample))) { 3192 if (trace->libtraceevent_print) { 3193 event_format__fprintf(tp_format, sample->cpu, 3194 sample->raw_data, sample->raw_size, 3195 trace->output); 3196 } else { 3197 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0); 3198 } 3199 } 3200 } 3201 3202 newline: 3203 fprintf(trace->output, ")\n"); 3204 3205 if (callchain_ret > 0) 3206 trace__fprintf_callchain(trace, sample); 3207 else if (callchain_ret < 0) 3208 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3209 3210 ++trace->nr_events_printed; 3211 3212 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { 3213 evsel__disable(evsel); 3214 evsel__close(evsel); 3215 } 3216 out: 3217 thread__put(thread); 3218 return 0; 3219 } 3220 3221 static void print_location(FILE *f, struct perf_sample *sample, 3222 struct addr_location *al, 3223 bool print_dso, bool print_sym) 3224 { 3225 3226 if ((verbose > 0 || print_dso) && al->map) 3227 fprintf(f, "%s@", dso__long_name(map__dso(al->map))); 3228 3229 if ((verbose > 0 || print_sym) && al->sym) 3230 fprintf(f, "%s+0x%" PRIx64, al->sym->name, 3231 al->addr - al->sym->start); 3232 else if (al->map) 3233 fprintf(f, "0x%" PRIx64, al->addr); 3234 else 3235 fprintf(f, "0x%" PRIx64, sample->addr); 3236 } 3237 3238 static int trace__pgfault(struct trace *trace, 3239 struct evsel *evsel, 3240 union perf_event *event __maybe_unused, 3241 struct perf_sample *sample) 3242 { 3243 struct thread *thread; 3244 struct addr_location al; 3245 char map_type = 'd'; 3246 struct thread_trace *ttrace; 3247 int err = -1; 3248 int callchain_ret = 0; 3249 3250 addr_location__init(&al); 3251 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3252 3253 if (sample->callchain) { 3254 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 3255 3256 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 3257 if (callchain_ret == 0) { 3258 if (cursor->nr < trace->min_stack) 3259 goto out_put; 3260 callchain_ret = 1; 3261 } 3262 } 3263 3264 ttrace = thread__trace(thread, trace); 3265 if (ttrace == NULL) 3266 goto out_put; 3267 3268 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ) { 3269 ttrace->pfmaj++; 3270 trace->pfmaj++; 3271 } else { 3272 ttrace->pfmin++; 3273 trace->pfmin++; 3274 } 3275 3276 if (trace->summary_only) 3277 goto out; 3278 3279 thread__find_symbol(thread, sample->cpumode, sample->ip, &al); 3280 3281 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); 3282 3283 fprintf(trace->output, "%sfault [", 3284 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ? 3285 "maj" : "min"); 3286 3287 print_location(trace->output, sample, &al, false, true); 3288 3289 fprintf(trace->output, "] => "); 3290 3291 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 3292 3293 if (!al.map) { 3294 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 3295 3296 if (al.map) 3297 map_type = 'x'; 3298 else 3299 map_type = '?'; 3300 } 3301 3302 print_location(trace->output, sample, &al, true, false); 3303 3304 fprintf(trace->output, " (%c%c)\n", map_type, al.level); 3305 3306 if (callchain_ret > 0) 3307 trace__fprintf_callchain(trace, sample); 3308 else if (callchain_ret < 0) 3309 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3310 3311 ++trace->nr_events_printed; 3312 out: 3313 err = 0; 3314 out_put: 3315 thread__put(thread); 3316 addr_location__exit(&al); 3317 return err; 3318 } 3319 3320 static void trace__set_base_time(struct trace *trace, 3321 struct evsel *evsel, 3322 struct perf_sample *sample) 3323 { 3324 /* 3325 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust 3326 * and don't use sample->time unconditionally, we may end up having 3327 * some other event in the future without PERF_SAMPLE_TIME for good 3328 * reason, i.e. we may not be interested in its timestamps, just in 3329 * it taking place, picking some piece of information when it 3330 * appears in our event stream (vfs_getname comes to mind). 3331 */ 3332 if (trace->base_time == 0 && !trace->full_time && 3333 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 3334 trace->base_time = sample->time; 3335 } 3336 3337 static int trace__process_sample(const struct perf_tool *tool, 3338 union perf_event *event, 3339 struct perf_sample *sample, 3340 struct evsel *evsel, 3341 struct machine *machine __maybe_unused) 3342 { 3343 struct trace *trace = container_of(tool, struct trace, tool); 3344 struct thread *thread; 3345 int err = 0; 3346 3347 tracepoint_handler handler = evsel->handler; 3348 3349 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3350 if (thread && thread__is_filtered(thread)) 3351 goto out; 3352 3353 trace__set_base_time(trace, evsel, sample); 3354 3355 if (handler) { 3356 ++trace->nr_events; 3357 handler(trace, evsel, event, sample); 3358 } 3359 out: 3360 thread__put(thread); 3361 return err; 3362 } 3363 3364 static int trace__record(struct trace *trace, int argc, const char **argv) 3365 { 3366 unsigned int rec_argc, i, j; 3367 const char **rec_argv; 3368 const char * const record_args[] = { 3369 "record", 3370 "-R", 3371 "-m", "1024", 3372 "-c", "1", 3373 }; 3374 pid_t pid = getpid(); 3375 char *filter = asprintf__tp_filter_pids(1, &pid); 3376 const char * const sc_args[] = { "-e", }; 3377 unsigned int sc_args_nr = ARRAY_SIZE(sc_args); 3378 const char * const majpf_args[] = { "-e", "major-faults" }; 3379 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args); 3380 const char * const minpf_args[] = { "-e", "minor-faults" }; 3381 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args); 3382 int err = -1; 3383 3384 /* +3 is for the event string below and the pid filter */ 3385 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 + 3386 majpf_args_nr + minpf_args_nr + argc; 3387 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3388 3389 if (rec_argv == NULL || filter == NULL) 3390 goto out_free; 3391 3392 j = 0; 3393 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3394 rec_argv[j++] = record_args[i]; 3395 3396 if (trace->trace_syscalls) { 3397 for (i = 0; i < sc_args_nr; i++) 3398 rec_argv[j++] = sc_args[i]; 3399 3400 /* event string may be different for older kernels - e.g., RHEL6 */ 3401 if (is_valid_tracepoint("raw_syscalls:sys_enter")) 3402 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit"; 3403 else if (is_valid_tracepoint("syscalls:sys_enter")) 3404 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit"; 3405 else { 3406 pr_err("Neither raw_syscalls nor syscalls events exist.\n"); 3407 goto out_free; 3408 } 3409 } 3410 3411 rec_argv[j++] = "--filter"; 3412 rec_argv[j++] = filter; 3413 3414 if (trace->trace_pgfaults & TRACE_PFMAJ) 3415 for (i = 0; i < majpf_args_nr; i++) 3416 rec_argv[j++] = majpf_args[i]; 3417 3418 if (trace->trace_pgfaults & TRACE_PFMIN) 3419 for (i = 0; i < minpf_args_nr; i++) 3420 rec_argv[j++] = minpf_args[i]; 3421 3422 for (i = 0; i < (unsigned int)argc; i++) 3423 rec_argv[j++] = argv[i]; 3424 3425 err = cmd_record(j, rec_argv); 3426 out_free: 3427 free(filter); 3428 free(rec_argv); 3429 return err; 3430 } 3431 3432 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); 3433 static size_t trace__fprintf_total_summary(struct trace *trace, FILE *fp); 3434 3435 static bool evlist__add_vfs_getname(struct evlist *evlist) 3436 { 3437 bool found = false; 3438 struct evsel *evsel, *tmp; 3439 struct parse_events_error err; 3440 int ret; 3441 3442 parse_events_error__init(&err); 3443 ret = parse_events(evlist, "probe:vfs_getname*", &err); 3444 parse_events_error__exit(&err); 3445 if (ret) 3446 return false; 3447 3448 evlist__for_each_entry_safe(evlist, evsel, tmp) { 3449 if (!strstarts(evsel__name(evsel), "probe:vfs_getname")) 3450 continue; 3451 3452 if (evsel__field(evsel, "pathname")) { 3453 evsel->handler = trace__vfs_getname; 3454 found = true; 3455 continue; 3456 } 3457 3458 list_del_init(&evsel->core.node); 3459 evsel->evlist = NULL; 3460 evsel__delete(evsel); 3461 } 3462 3463 return found; 3464 } 3465 3466 static struct evsel *evsel__new_pgfault(u64 config) 3467 { 3468 struct evsel *evsel; 3469 struct perf_event_attr attr = { 3470 .type = PERF_TYPE_SOFTWARE, 3471 .mmap_data = 1, 3472 }; 3473 3474 attr.config = config; 3475 attr.sample_period = 1; 3476 3477 event_attr_init(&attr); 3478 3479 evsel = evsel__new(&attr); 3480 if (evsel) 3481 evsel->handler = trace__pgfault; 3482 3483 return evsel; 3484 } 3485 3486 static void evlist__free_syscall_tp_fields(struct evlist *evlist) 3487 { 3488 struct evsel *evsel; 3489 3490 evlist__for_each_entry(evlist, evsel) { 3491 evsel_trace__delete(evsel->priv); 3492 evsel->priv = NULL; 3493 } 3494 } 3495 3496 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) 3497 { 3498 const u32 type = event->header.type; 3499 struct evsel *evsel; 3500 3501 if (type != PERF_RECORD_SAMPLE) { 3502 trace__process_event(trace, trace->host, event, sample); 3503 return; 3504 } 3505 3506 evsel = evlist__id2evsel(trace->evlist, sample->id); 3507 if (evsel == NULL) { 3508 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); 3509 return; 3510 } 3511 3512 if (evswitch__discard(&trace->evswitch, evsel)) 3513 return; 3514 3515 trace__set_base_time(trace, evsel, sample); 3516 3517 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 3518 sample->raw_data == NULL) { 3519 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", 3520 evsel__name(evsel), sample->tid, 3521 sample->cpu, sample->raw_size); 3522 } else { 3523 tracepoint_handler handler = evsel->handler; 3524 handler(trace, evsel, event, sample); 3525 } 3526 3527 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) 3528 interrupted = true; 3529 } 3530 3531 static int trace__add_syscall_newtp(struct trace *trace) 3532 { 3533 int ret = -1; 3534 struct evlist *evlist = trace->evlist; 3535 struct evsel *sys_enter, *sys_exit; 3536 3537 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter); 3538 if (sys_enter == NULL) 3539 goto out; 3540 3541 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args)) 3542 goto out_delete_sys_enter; 3543 3544 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit); 3545 if (sys_exit == NULL) 3546 goto out_delete_sys_enter; 3547 3548 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret)) 3549 goto out_delete_sys_exit; 3550 3551 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param); 3552 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param); 3553 3554 evlist__add(evlist, sys_enter); 3555 evlist__add(evlist, sys_exit); 3556 3557 if (callchain_param.enabled && !trace->kernel_syscallchains) { 3558 /* 3559 * We're interested only in the user space callchain 3560 * leading to the syscall, allow overriding that for 3561 * debugging reasons using --kernel_syscall_callchains 3562 */ 3563 sys_exit->core.attr.exclude_callchain_kernel = 1; 3564 } 3565 3566 trace->syscalls.events.sys_enter = sys_enter; 3567 trace->syscalls.events.sys_exit = sys_exit; 3568 3569 ret = 0; 3570 out: 3571 return ret; 3572 3573 out_delete_sys_exit: 3574 evsel__delete_priv(sys_exit); 3575 out_delete_sys_enter: 3576 evsel__delete_priv(sys_enter); 3577 goto out; 3578 } 3579 3580 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) 3581 { 3582 int err = -1; 3583 struct evsel *sys_exit; 3584 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier, 3585 trace->ev_qualifier_ids.nr, 3586 trace->ev_qualifier_ids.entries); 3587 3588 if (filter == NULL) 3589 goto out_enomem; 3590 3591 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) { 3592 sys_exit = trace->syscalls.events.sys_exit; 3593 err = evsel__append_tp_filter(sys_exit, filter); 3594 } 3595 3596 free(filter); 3597 out: 3598 return err; 3599 out_enomem: 3600 errno = ENOMEM; 3601 goto out; 3602 } 3603 3604 #ifdef HAVE_BPF_SKEL 3605 static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type) 3606 { 3607 int id; 3608 3609 if (arg_fmt->type != NULL) 3610 return -1; 3611 3612 id = btf__find_by_name(btf, type); 3613 if (id < 0) 3614 return -1; 3615 3616 arg_fmt->type = btf__type_by_id(btf, id); 3617 arg_fmt->type_id = id; 3618 3619 return 0; 3620 } 3621 3622 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) 3623 { 3624 struct bpf_program *pos, *prog = NULL; 3625 const char *sec_name; 3626 3627 if (trace->skel->obj == NULL) 3628 return NULL; 3629 3630 bpf_object__for_each_program(pos, trace->skel->obj) { 3631 sec_name = bpf_program__section_name(pos); 3632 if (sec_name && !strcmp(sec_name, name)) { 3633 prog = pos; 3634 break; 3635 } 3636 } 3637 3638 return prog; 3639 } 3640 3641 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, 3642 const char *prog_name, const char *type) 3643 { 3644 struct bpf_program *prog; 3645 3646 if (prog_name == NULL) { 3647 char default_prog_name[256]; 3648 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name); 3649 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3650 if (prog != NULL) 3651 goto out_found; 3652 if (sc->fmt && sc->fmt->alias) { 3653 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias); 3654 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3655 if (prog != NULL) 3656 goto out_found; 3657 } 3658 goto out_unaugmented; 3659 } 3660 3661 prog = trace__find_bpf_program_by_title(trace, prog_name); 3662 3663 if (prog != NULL) { 3664 out_found: 3665 return prog; 3666 } 3667 3668 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n", 3669 prog_name, type, sc->name); 3670 out_unaugmented: 3671 return trace->skel->progs.syscall_unaugmented; 3672 } 3673 3674 static void trace__init_syscall_bpf_progs(struct trace *trace, int id) 3675 { 3676 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3677 3678 if (sc == NULL) 3679 return; 3680 3681 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3682 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit"); 3683 } 3684 3685 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) 3686 { 3687 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3688 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3689 } 3690 3691 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) 3692 { 3693 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3694 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3695 } 3696 3697 static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int key, unsigned int *beauty_array) 3698 { 3699 struct tep_format_field *field; 3700 struct syscall *sc = trace__syscall_info(trace, NULL, key); 3701 const struct btf_type *bt; 3702 char *struct_offset, *tmp, name[32]; 3703 bool can_augment = false; 3704 int i, cnt; 3705 3706 if (sc == NULL) 3707 return -1; 3708 3709 trace__load_vmlinux_btf(trace); 3710 if (trace->btf == NULL) 3711 return -1; 3712 3713 for (i = 0, field = sc->args; field; ++i, field = field->next) { 3714 // XXX We're only collecting pointer payloads _from_ user space 3715 if (!sc->arg_fmt[i].from_user) 3716 continue; 3717 3718 struct_offset = strstr(field->type, "struct "); 3719 if (struct_offset == NULL) 3720 struct_offset = strstr(field->type, "union "); 3721 else 3722 struct_offset++; // "union" is shorter 3723 3724 if (field->flags & TEP_FIELD_IS_POINTER && struct_offset) { /* struct or union (think BPF's attr arg) */ 3725 struct_offset += 6; 3726 3727 /* for 'struct foo *', we only want 'foo' */ 3728 for (tmp = struct_offset, cnt = 0; *tmp != ' ' && *tmp != '\0'; ++tmp, ++cnt) { 3729 } 3730 3731 strncpy(name, struct_offset, cnt); 3732 name[cnt] = '\0'; 3733 3734 /* cache struct's btf_type and type_id */ 3735 if (syscall_arg_fmt__cache_btf_struct(&sc->arg_fmt[i], trace->btf, name)) 3736 continue; 3737 3738 bt = sc->arg_fmt[i].type; 3739 beauty_array[i] = bt->size; 3740 can_augment = true; 3741 } else if (field->flags & TEP_FIELD_IS_POINTER && /* string */ 3742 strcmp(field->type, "const char *") == 0 && 3743 (strstr(field->name, "name") || 3744 strstr(field->name, "path") || 3745 strstr(field->name, "file") || 3746 strstr(field->name, "root") || 3747 strstr(field->name, "key") || 3748 strstr(field->name, "special") || 3749 strstr(field->name, "type") || 3750 strstr(field->name, "description"))) { 3751 beauty_array[i] = 1; 3752 can_augment = true; 3753 } else if (field->flags & TEP_FIELD_IS_POINTER && /* buffer */ 3754 strstr(field->type, "char *") && 3755 (strstr(field->name, "buf") || 3756 strstr(field->name, "val") || 3757 strstr(field->name, "msg"))) { 3758 int j; 3759 struct tep_format_field *field_tmp; 3760 3761 /* find the size of the buffer that appears in pairs with buf */ 3762 for (j = 0, field_tmp = sc->args; field_tmp; ++j, field_tmp = field_tmp->next) { 3763 if (!(field_tmp->flags & TEP_FIELD_IS_POINTER) && /* only integers */ 3764 (strstr(field_tmp->name, "count") || 3765 strstr(field_tmp->name, "siz") || /* size, bufsiz */ 3766 (strstr(field_tmp->name, "len") && strcmp(field_tmp->name, "filename")))) { 3767 /* filename's got 'len' in it, we don't want that */ 3768 beauty_array[i] = -(j + 1); 3769 can_augment = true; 3770 break; 3771 } 3772 } 3773 } 3774 } 3775 3776 if (can_augment) 3777 return 0; 3778 3779 return -1; 3780 } 3781 3782 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc) 3783 { 3784 struct tep_format_field *field, *candidate_field; 3785 /* 3786 * We're only interested in syscalls that have a pointer: 3787 */ 3788 for (field = sc->args; field; field = field->next) { 3789 if (field->flags & TEP_FIELD_IS_POINTER) 3790 goto try_to_find_pair; 3791 } 3792 3793 return NULL; 3794 3795 try_to_find_pair: 3796 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { 3797 int id = syscalltbl__id_at_idx(trace->sctbl, i); 3798 struct syscall *pair = trace__syscall_info(trace, NULL, id); 3799 struct bpf_program *pair_prog; 3800 bool is_candidate = false; 3801 3802 if (pair == NULL || pair == sc || 3803 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented) 3804 continue; 3805 3806 for (field = sc->args, candidate_field = pair->args; 3807 field && candidate_field; field = field->next, candidate_field = candidate_field->next) { 3808 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER, 3809 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER; 3810 3811 if (is_pointer) { 3812 if (!candidate_is_pointer) { 3813 // The candidate just doesn't copies our pointer arg, might copy other pointers we want. 3814 continue; 3815 } 3816 } else { 3817 if (candidate_is_pointer) { 3818 // The candidate might copy a pointer we don't have, skip it. 3819 goto next_candidate; 3820 } 3821 continue; 3822 } 3823 3824 if (strcmp(field->type, candidate_field->type)) 3825 goto next_candidate; 3826 3827 /* 3828 * This is limited in the BPF program but sys_write 3829 * uses "const char *" for its "buf" arg so we need to 3830 * use some heuristic that is kinda future proof... 3831 */ 3832 if (strcmp(field->type, "const char *") == 0 && 3833 !(strstr(field->name, "name") || 3834 strstr(field->name, "path") || 3835 strstr(field->name, "file") || 3836 strstr(field->name, "root") || 3837 strstr(field->name, "description"))) 3838 goto next_candidate; 3839 3840 is_candidate = true; 3841 } 3842 3843 if (!is_candidate) 3844 goto next_candidate; 3845 3846 /* 3847 * Check if the tentative pair syscall augmenter has more pointers, if it has, 3848 * then it may be collecting that and we then can't use it, as it would collect 3849 * more than what is common to the two syscalls. 3850 */ 3851 if (candidate_field) { 3852 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next) 3853 if (candidate_field->flags & TEP_FIELD_IS_POINTER) 3854 goto next_candidate; 3855 } 3856 3857 pair_prog = pair->bpf_prog.sys_enter; 3858 /* 3859 * If the pair isn't enabled, then its bpf_prog.sys_enter will not 3860 * have been searched for, so search it here and if it returns the 3861 * unaugmented one, then ignore it, otherwise we'll reuse that BPF 3862 * program for a filtered syscall on a non-filtered one. 3863 * 3864 * For instance, we have "!syscalls:sys_enter_renameat" and that is 3865 * useful for "renameat2". 3866 */ 3867 if (pair_prog == NULL) { 3868 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3869 if (pair_prog == trace->skel->progs.syscall_unaugmented) 3870 goto next_candidate; 3871 } 3872 3873 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name); 3874 return pair_prog; 3875 next_candidate: 3876 continue; 3877 } 3878 3879 return NULL; 3880 } 3881 3882 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) 3883 { 3884 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter); 3885 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit); 3886 int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter); 3887 int err = 0; 3888 unsigned int beauty_array[6]; 3889 3890 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { 3891 int prog_fd, key = syscalltbl__id_at_idx(trace->sctbl, i); 3892 3893 if (!trace__syscall_enabled(trace, key)) 3894 continue; 3895 3896 trace__init_syscall_bpf_progs(trace, key); 3897 3898 // It'll get at least the "!raw_syscalls:unaugmented" 3899 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key); 3900 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3901 if (err) 3902 break; 3903 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key); 3904 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY); 3905 if (err) 3906 break; 3907 3908 /* use beauty_map to tell BPF how many bytes to collect, set beauty_map's value here */ 3909 memset(beauty_array, 0, sizeof(beauty_array)); 3910 err = trace__bpf_sys_enter_beauty_map(trace, key, (unsigned int *)beauty_array); 3911 if (err) 3912 continue; 3913 err = bpf_map_update_elem(beauty_map_fd, &key, beauty_array, BPF_ANY); 3914 if (err) 3915 break; 3916 } 3917 3918 /* 3919 * Now lets do a second pass looking for enabled syscalls without 3920 * an augmenter that have a signature that is a superset of another 3921 * syscall with an augmenter so that we can auto-reuse it. 3922 * 3923 * I.e. if we have an augmenter for the "open" syscall that has 3924 * this signature: 3925 * 3926 * int open(const char *pathname, int flags, mode_t mode); 3927 * 3928 * I.e. that will collect just the first string argument, then we 3929 * can reuse it for the 'creat' syscall, that has this signature: 3930 * 3931 * int creat(const char *pathname, mode_t mode); 3932 * 3933 * and for: 3934 * 3935 * int stat(const char *pathname, struct stat *statbuf); 3936 * int lstat(const char *pathname, struct stat *statbuf); 3937 * 3938 * Because the 'open' augmenter will collect the first arg as a string, 3939 * and leave alone all the other args, which already helps with 3940 * beautifying 'stat' and 'lstat''s pathname arg. 3941 * 3942 * Then, in time, when 'stat' gets an augmenter that collects both 3943 * first and second arg (this one on the raw_syscalls:sys_exit prog 3944 * array tail call, then that one will be used. 3945 */ 3946 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { 3947 int key = syscalltbl__id_at_idx(trace->sctbl, i); 3948 struct syscall *sc = trace__syscall_info(trace, NULL, key); 3949 struct bpf_program *pair_prog; 3950 int prog_fd; 3951 3952 if (sc == NULL || sc->bpf_prog.sys_enter == NULL) 3953 continue; 3954 3955 /* 3956 * For now we're just reusing the sys_enter prog, and if it 3957 * already has an augmenter, we don't need to find one. 3958 */ 3959 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented) 3960 continue; 3961 3962 /* 3963 * Look at all the other syscalls for one that has a signature 3964 * that is close enough that we can share: 3965 */ 3966 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc); 3967 if (pair_prog == NULL) 3968 continue; 3969 3970 sc->bpf_prog.sys_enter = pair_prog; 3971 3972 /* 3973 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter 3974 * with the fd for the program we're reusing: 3975 */ 3976 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter); 3977 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3978 if (err) 3979 break; 3980 } 3981 3982 return err; 3983 } 3984 #endif // HAVE_BPF_SKEL 3985 3986 static int trace__set_ev_qualifier_filter(struct trace *trace) 3987 { 3988 if (trace->syscalls.events.sys_enter) 3989 return trace__set_ev_qualifier_tp_filter(trace); 3990 return 0; 3991 } 3992 3993 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, 3994 size_t npids __maybe_unused, pid_t *pids __maybe_unused) 3995 { 3996 int err = 0; 3997 #ifdef HAVE_LIBBPF_SUPPORT 3998 bool value = true; 3999 int map_fd = bpf_map__fd(map); 4000 size_t i; 4001 4002 for (i = 0; i < npids; ++i) { 4003 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY); 4004 if (err) 4005 break; 4006 } 4007 #endif 4008 return err; 4009 } 4010 4011 static int trace__set_filter_loop_pids(struct trace *trace) 4012 { 4013 unsigned int nr = 1, err; 4014 pid_t pids[32] = { 4015 getpid(), 4016 }; 4017 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]); 4018 4019 while (thread && nr < ARRAY_SIZE(pids)) { 4020 struct thread *parent = machine__find_thread(trace->host, 4021 thread__ppid(thread), 4022 thread__ppid(thread)); 4023 4024 if (parent == NULL) 4025 break; 4026 4027 if (!strcmp(thread__comm_str(parent), "sshd") || 4028 strstarts(thread__comm_str(parent), "gnome-terminal")) { 4029 pids[nr++] = thread__tid(parent); 4030 break; 4031 } 4032 thread = parent; 4033 } 4034 4035 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids); 4036 if (!err && trace->filter_pids.map) 4037 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); 4038 4039 return err; 4040 } 4041 4042 static int trace__set_filter_pids(struct trace *trace) 4043 { 4044 int err = 0; 4045 /* 4046 * Better not use !target__has_task() here because we need to cover the 4047 * case where no threads were specified in the command line, but a 4048 * workload was, and in that case we will fill in the thread_map when 4049 * we fork the workload in evlist__prepare_workload. 4050 */ 4051 if (trace->filter_pids.nr > 0) { 4052 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr, 4053 trace->filter_pids.entries); 4054 if (!err && trace->filter_pids.map) { 4055 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, 4056 trace->filter_pids.entries); 4057 } 4058 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { 4059 err = trace__set_filter_loop_pids(trace); 4060 } 4061 4062 return err; 4063 } 4064 4065 static int __trace__deliver_event(struct trace *trace, union perf_event *event) 4066 { 4067 struct evlist *evlist = trace->evlist; 4068 struct perf_sample sample; 4069 int err = evlist__parse_sample(evlist, event, &sample); 4070 4071 if (err) 4072 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); 4073 else 4074 trace__handle_event(trace, event, &sample); 4075 4076 return 0; 4077 } 4078 4079 static int __trace__flush_events(struct trace *trace) 4080 { 4081 u64 first = ordered_events__first_time(&trace->oe.data); 4082 u64 flush = trace->oe.last - NSEC_PER_SEC; 4083 4084 /* Is there some thing to flush.. */ 4085 if (first && first < flush) 4086 return ordered_events__flush_time(&trace->oe.data, flush); 4087 4088 return 0; 4089 } 4090 4091 static int trace__flush_events(struct trace *trace) 4092 { 4093 return !trace->sort_events ? 0 : __trace__flush_events(trace); 4094 } 4095 4096 static int trace__deliver_event(struct trace *trace, union perf_event *event) 4097 { 4098 int err; 4099 4100 if (!trace->sort_events) 4101 return __trace__deliver_event(trace, event); 4102 4103 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); 4104 if (err && err != -1) 4105 return err; 4106 4107 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL); 4108 if (err) 4109 return err; 4110 4111 return trace__flush_events(trace); 4112 } 4113 4114 static int ordered_events__deliver_event(struct ordered_events *oe, 4115 struct ordered_event *event) 4116 { 4117 struct trace *trace = container_of(oe, struct trace, oe.data); 4118 4119 return __trace__deliver_event(trace, event->event); 4120 } 4121 4122 static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg, 4123 char **type) 4124 { 4125 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel); 4126 const struct tep_event *tp_format; 4127 4128 if (!fmt) 4129 return NULL; 4130 4131 tp_format = evsel__tp_format(evsel); 4132 if (!tp_format) 4133 return NULL; 4134 4135 for (const struct tep_format_field *field = tp_format->format.fields; field; 4136 field = field->next, ++fmt) { 4137 if (strcmp(field->name, arg) == 0) { 4138 *type = field->type; 4139 return fmt; 4140 } 4141 } 4142 4143 return NULL; 4144 } 4145 4146 static int trace__expand_filter(struct trace *trace, struct evsel *evsel) 4147 { 4148 char *tok, *left = evsel->filter, *new_filter = evsel->filter; 4149 4150 while ((tok = strpbrk(left, "=<>!")) != NULL) { 4151 char *right = tok + 1, *right_end; 4152 4153 if (*right == '=') 4154 ++right; 4155 4156 while (isspace(*right)) 4157 ++right; 4158 4159 if (*right == '\0') 4160 break; 4161 4162 while (!isalpha(*left)) 4163 if (++left == tok) { 4164 /* 4165 * Bail out, can't find the name of the argument that is being 4166 * used in the filter, let it try to set this filter, will fail later. 4167 */ 4168 return 0; 4169 } 4170 4171 right_end = right + 1; 4172 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|') 4173 ++right_end; 4174 4175 if (isalpha(*right)) { 4176 struct syscall_arg_fmt *fmt; 4177 int left_size = tok - left, 4178 right_size = right_end - right; 4179 char arg[128], *type; 4180 4181 while (isspace(left[left_size - 1])) 4182 --left_size; 4183 4184 scnprintf(arg, sizeof(arg), "%.*s", left_size, left); 4185 4186 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg, &type); 4187 if (fmt == NULL) { 4188 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n", 4189 arg, evsel->name, evsel->filter); 4190 return -1; 4191 } 4192 4193 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ", 4194 arg, (int)(right - tok), tok, right_size, right); 4195 4196 if (fmt->strtoul) { 4197 u64 val; 4198 struct syscall_arg syscall_arg = { 4199 .trace = trace, 4200 .fmt = fmt, 4201 .type_name = type, 4202 .parm = fmt->parm, 4203 }; 4204 4205 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) { 4206 char *n, expansion[19]; 4207 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val); 4208 int expansion_offset = right - new_filter; 4209 4210 pr_debug("%s", expansion); 4211 4212 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) { 4213 pr_debug(" out of memory!\n"); 4214 free(new_filter); 4215 return -1; 4216 } 4217 if (new_filter != evsel->filter) 4218 free(new_filter); 4219 left = n + expansion_offset + expansion_lenght; 4220 new_filter = n; 4221 } else { 4222 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n", 4223 right_size, right, arg, evsel->name, evsel->filter); 4224 return -1; 4225 } 4226 } else { 4227 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n", 4228 arg, evsel->name, evsel->filter); 4229 return -1; 4230 } 4231 4232 pr_debug("\n"); 4233 } else { 4234 left = right_end; 4235 } 4236 } 4237 4238 if (new_filter != evsel->filter) { 4239 pr_debug("New filter for %s: %s\n", evsel->name, new_filter); 4240 evsel__set_filter(evsel, new_filter); 4241 free(new_filter); 4242 } 4243 4244 return 0; 4245 } 4246 4247 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) 4248 { 4249 struct evlist *evlist = trace->evlist; 4250 struct evsel *evsel; 4251 4252 evlist__for_each_entry(evlist, evsel) { 4253 if (evsel->filter == NULL) 4254 continue; 4255 4256 if (trace__expand_filter(trace, evsel)) { 4257 *err_evsel = evsel; 4258 return -1; 4259 } 4260 } 4261 4262 return 0; 4263 } 4264 4265 static int trace__run(struct trace *trace, int argc, const char **argv) 4266 { 4267 struct evlist *evlist = trace->evlist; 4268 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL; 4269 int err = -1, i; 4270 unsigned long before; 4271 const bool forks = argc > 0; 4272 bool draining = false; 4273 4274 trace->live = true; 4275 4276 if (!trace->raw_augmented_syscalls) { 4277 if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) 4278 goto out_error_raw_syscalls; 4279 4280 if (trace->trace_syscalls) 4281 trace->vfs_getname = evlist__add_vfs_getname(evlist); 4282 } 4283 4284 if ((trace->trace_pgfaults & TRACE_PFMAJ)) { 4285 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ); 4286 if (pgfault_maj == NULL) 4287 goto out_error_mem; 4288 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param); 4289 evlist__add(evlist, pgfault_maj); 4290 } 4291 4292 if ((trace->trace_pgfaults & TRACE_PFMIN)) { 4293 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN); 4294 if (pgfault_min == NULL) 4295 goto out_error_mem; 4296 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param); 4297 evlist__add(evlist, pgfault_min); 4298 } 4299 4300 /* Enable ignoring missing threads when -u/-p option is defined. */ 4301 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid; 4302 4303 if (trace->sched && 4304 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime)) 4305 goto out_error_sched_stat_runtime; 4306 /* 4307 * If a global cgroup was set, apply it to all the events without an 4308 * explicit cgroup. I.e.: 4309 * 4310 * trace -G A -e sched:*switch 4311 * 4312 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc 4313 * _and_ sched:sched_switch to the 'A' cgroup, while: 4314 * 4315 * trace -e sched:*switch -G A 4316 * 4317 * will only set the sched:sched_switch event to the 'A' cgroup, all the 4318 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without" 4319 * a cgroup (on the root cgroup, sys wide, etc). 4320 * 4321 * Multiple cgroups: 4322 * 4323 * trace -G A -e sched:*switch -G B 4324 * 4325 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes 4326 * to the 'B' cgroup. 4327 * 4328 * evlist__set_default_cgroup() grabs a reference of the passed cgroup 4329 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL. 4330 */ 4331 if (trace->cgroup) 4332 evlist__set_default_cgroup(trace->evlist, trace->cgroup); 4333 4334 err = evlist__create_maps(evlist, &trace->opts.target); 4335 if (err < 0) { 4336 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); 4337 goto out_delete_evlist; 4338 } 4339 4340 err = trace__symbols_init(trace, evlist); 4341 if (err < 0) { 4342 fprintf(trace->output, "Problems initializing symbol libraries!\n"); 4343 goto out_delete_evlist; 4344 } 4345 4346 if (trace->summary_mode == SUMMARY__BY_TOTAL) { 4347 trace->syscall_stats = alloc_syscall_stats(); 4348 if (trace->syscall_stats == NULL) 4349 goto out_delete_evlist; 4350 } 4351 4352 evlist__config(evlist, &trace->opts, &callchain_param); 4353 4354 if (forks) { 4355 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL); 4356 if (err < 0) { 4357 fprintf(trace->output, "Couldn't run the workload!\n"); 4358 goto out_delete_evlist; 4359 } 4360 workload_pid = evlist->workload.pid; 4361 } 4362 4363 err = evlist__open(evlist); 4364 if (err < 0) 4365 goto out_error_open; 4366 #ifdef HAVE_BPF_SKEL 4367 if (trace->syscalls.events.bpf_output) { 4368 struct perf_cpu cpu; 4369 4370 /* 4371 * Set up the __augmented_syscalls__ BPF map to hold for each 4372 * CPU the bpf-output event's file descriptor. 4373 */ 4374 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) { 4375 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__, 4376 &cpu.cpu, sizeof(int), 4377 xyarray__entry(trace->syscalls.events.bpf_output->core.fd, 4378 cpu.cpu, 0), 4379 sizeof(__u32), BPF_ANY); 4380 } 4381 } 4382 4383 if (trace->skel) 4384 trace->filter_pids.map = trace->skel->maps.pids_filtered; 4385 #endif 4386 err = trace__set_filter_pids(trace); 4387 if (err < 0) 4388 goto out_error_mem; 4389 4390 #ifdef HAVE_BPF_SKEL 4391 if (trace->skel && trace->skel->progs.sys_enter) 4392 trace__init_syscalls_bpf_prog_array_maps(trace); 4393 #endif 4394 4395 if (trace->ev_qualifier_ids.nr > 0) { 4396 err = trace__set_ev_qualifier_filter(trace); 4397 if (err < 0) 4398 goto out_errno; 4399 4400 if (trace->syscalls.events.sys_exit) { 4401 pr_debug("event qualifier tracepoint filter: %s\n", 4402 trace->syscalls.events.sys_exit->filter); 4403 } 4404 } 4405 4406 /* 4407 * If the "close" syscall is not traced, then we will not have the 4408 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the 4409 * fd->pathname table and were ending up showing the last value set by 4410 * syscalls opening a pathname and associating it with a descriptor or 4411 * reading it from /proc/pid/fd/ in cases where that doesn't make 4412 * sense. 4413 * 4414 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is 4415 * not in use. 4416 */ 4417 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close")); 4418 4419 err = trace__expand_filters(trace, &evsel); 4420 if (err) 4421 goto out_delete_evlist; 4422 err = evlist__apply_filters(evlist, &evsel, &trace->opts.target); 4423 if (err < 0) 4424 goto out_error_apply_filters; 4425 4426 err = evlist__mmap(evlist, trace->opts.mmap_pages); 4427 if (err < 0) 4428 goto out_error_mmap; 4429 4430 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay) 4431 evlist__enable(evlist); 4432 4433 if (forks) 4434 evlist__start_workload(evlist); 4435 4436 if (trace->opts.target.initial_delay) { 4437 usleep(trace->opts.target.initial_delay * 1000); 4438 evlist__enable(evlist); 4439 } 4440 4441 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || 4442 perf_thread_map__nr(evlist->core.threads) > 1 || 4443 evlist__first(evlist)->core.attr.inherit; 4444 4445 /* 4446 * Now that we already used evsel->core.attr to ask the kernel to setup the 4447 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in 4448 * trace__resolve_callchain(), allowing per-event max-stack settings 4449 * to override an explicitly set --max-stack global setting. 4450 */ 4451 evlist__for_each_entry(evlist, evsel) { 4452 if (evsel__has_callchain(evsel) && 4453 evsel->core.attr.sample_max_stack == 0) 4454 evsel->core.attr.sample_max_stack = trace->max_stack; 4455 } 4456 again: 4457 before = trace->nr_events; 4458 4459 for (i = 0; i < evlist->core.nr_mmaps; i++) { 4460 union perf_event *event; 4461 struct mmap *md; 4462 4463 md = &evlist->mmap[i]; 4464 if (perf_mmap__read_init(&md->core) < 0) 4465 continue; 4466 4467 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 4468 ++trace->nr_events; 4469 4470 err = trace__deliver_event(trace, event); 4471 if (err) 4472 goto out_disable; 4473 4474 perf_mmap__consume(&md->core); 4475 4476 if (interrupted) 4477 goto out_disable; 4478 4479 if (done && !draining) { 4480 evlist__disable(evlist); 4481 draining = true; 4482 } 4483 } 4484 perf_mmap__read_done(&md->core); 4485 } 4486 4487 if (trace->nr_events == before) { 4488 int timeout = done ? 100 : -1; 4489 4490 if (!draining && evlist__poll(evlist, timeout) > 0) { 4491 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) 4492 draining = true; 4493 4494 goto again; 4495 } else { 4496 if (trace__flush_events(trace)) 4497 goto out_disable; 4498 } 4499 } else { 4500 goto again; 4501 } 4502 4503 out_disable: 4504 thread__zput(trace->current); 4505 4506 evlist__disable(evlist); 4507 4508 if (trace->sort_events) 4509 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL); 4510 4511 if (!err) { 4512 if (trace->summary) { 4513 if (trace->summary_mode == SUMMARY__BY_TOTAL) 4514 trace__fprintf_total_summary(trace, trace->output); 4515 else 4516 trace__fprintf_thread_summary(trace, trace->output); 4517 } 4518 4519 if (trace->show_tool_stats) { 4520 fprintf(trace->output, "Stats:\n " 4521 " vfs_getname : %" PRIu64 "\n" 4522 " proc_getname: %" PRIu64 "\n", 4523 trace->stats.vfs_getname, 4524 trace->stats.proc_getname); 4525 } 4526 } 4527 4528 out_delete_evlist: 4529 delete_syscall_stats(trace->syscall_stats); 4530 trace__symbols__exit(trace); 4531 evlist__free_syscall_tp_fields(evlist); 4532 evlist__delete(evlist); 4533 cgroup__put(trace->cgroup); 4534 trace->evlist = NULL; 4535 trace->live = false; 4536 return err; 4537 { 4538 char errbuf[BUFSIZ]; 4539 4540 out_error_sched_stat_runtime: 4541 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime"); 4542 goto out_error; 4543 4544 out_error_raw_syscalls: 4545 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)"); 4546 goto out_error; 4547 4548 out_error_mmap: 4549 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf)); 4550 goto out_error; 4551 4552 out_error_open: 4553 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); 4554 4555 out_error: 4556 fprintf(trace->output, "%s\n", errbuf); 4557 goto out_delete_evlist; 4558 4559 out_error_apply_filters: 4560 fprintf(trace->output, 4561 "Failed to set filter \"%s\" on event %s with %d (%s)\n", 4562 evsel->filter, evsel__name(evsel), errno, 4563 str_error_r(errno, errbuf, sizeof(errbuf))); 4564 goto out_delete_evlist; 4565 } 4566 out_error_mem: 4567 fprintf(trace->output, "Not enough memory to run!\n"); 4568 goto out_delete_evlist; 4569 4570 out_errno: 4571 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno)); 4572 goto out_delete_evlist; 4573 } 4574 4575 static int trace__replay(struct trace *trace) 4576 { 4577 const struct evsel_str_handler handlers[] = { 4578 { "probe:vfs_getname", trace__vfs_getname, }, 4579 }; 4580 struct perf_data data = { 4581 .path = input_name, 4582 .mode = PERF_DATA_MODE_READ, 4583 .force = trace->force, 4584 }; 4585 struct perf_session *session; 4586 struct evsel *evsel; 4587 int err = -1; 4588 4589 trace->tool.sample = trace__process_sample; 4590 trace->tool.mmap = perf_event__process_mmap; 4591 trace->tool.mmap2 = perf_event__process_mmap2; 4592 trace->tool.comm = perf_event__process_comm; 4593 trace->tool.exit = perf_event__process_exit; 4594 trace->tool.fork = perf_event__process_fork; 4595 trace->tool.attr = perf_event__process_attr; 4596 trace->tool.tracing_data = perf_event__process_tracing_data; 4597 trace->tool.build_id = perf_event__process_build_id; 4598 trace->tool.namespaces = perf_event__process_namespaces; 4599 4600 trace->tool.ordered_events = true; 4601 trace->tool.ordering_requires_timestamps = true; 4602 4603 /* add tid to output */ 4604 trace->multiple_threads = true; 4605 4606 session = perf_session__new(&data, &trace->tool); 4607 if (IS_ERR(session)) 4608 return PTR_ERR(session); 4609 4610 if (trace->opts.target.pid) 4611 symbol_conf.pid_list_str = strdup(trace->opts.target.pid); 4612 4613 if (trace->opts.target.tid) 4614 symbol_conf.tid_list_str = strdup(trace->opts.target.tid); 4615 4616 if (symbol__init(&session->header.env) < 0) 4617 goto out; 4618 4619 trace->host = &session->machines.host; 4620 4621 err = perf_session__set_tracepoints_handlers(session, handlers); 4622 if (err) 4623 goto out; 4624 4625 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter"); 4626 trace->syscalls.events.sys_enter = evsel; 4627 /* older kernels have syscalls tp versus raw_syscalls */ 4628 if (evsel == NULL) 4629 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter"); 4630 4631 if (evsel && 4632 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 || 4633 perf_evsel__init_sc_tp_ptr_field(evsel, args))) { 4634 pr_err("Error during initialize raw_syscalls:sys_enter event\n"); 4635 goto out; 4636 } 4637 4638 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit"); 4639 trace->syscalls.events.sys_exit = evsel; 4640 if (evsel == NULL) 4641 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit"); 4642 if (evsel && 4643 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 || 4644 perf_evsel__init_sc_tp_uint_field(evsel, ret))) { 4645 pr_err("Error during initialize raw_syscalls:sys_exit event\n"); 4646 goto out; 4647 } 4648 4649 evlist__for_each_entry(session->evlist, evsel) { 4650 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && 4651 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ || 4652 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 4653 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS)) 4654 evsel->handler = trace__pgfault; 4655 } 4656 4657 if (trace->summary_mode == SUMMARY__BY_TOTAL) { 4658 trace->syscall_stats = alloc_syscall_stats(); 4659 if (trace->syscall_stats == NULL) 4660 goto out; 4661 } 4662 4663 setup_pager(); 4664 4665 err = perf_session__process_events(session); 4666 if (err) 4667 pr_err("Failed to process events, error %d", err); 4668 4669 else if (trace->summary) 4670 trace__fprintf_thread_summary(trace, trace->output); 4671 4672 out: 4673 delete_syscall_stats(trace->syscall_stats); 4674 perf_session__delete(session); 4675 4676 return err; 4677 } 4678 4679 static size_t trace__fprintf_summary_header(FILE *fp) 4680 { 4681 size_t printed; 4682 4683 printed = fprintf(fp, "\n Summary of events:\n\n"); 4684 4685 return printed; 4686 } 4687 4688 struct syscall_entry { 4689 struct syscall_stats *stats; 4690 double msecs; 4691 int syscall; 4692 }; 4693 4694 static int entry_cmp(const void *e1, const void *e2) 4695 { 4696 const struct syscall_entry *entry1 = e1; 4697 const struct syscall_entry *entry2 = e2; 4698 4699 return entry1->msecs > entry2->msecs ? -1 : 1; 4700 } 4701 4702 static struct syscall_entry *syscall__sort_stats(struct hashmap *syscall_stats) 4703 { 4704 struct syscall_entry *entry; 4705 struct hashmap_entry *pos; 4706 unsigned bkt, i, nr; 4707 4708 nr = syscall_stats->sz; 4709 entry = malloc(nr * sizeof(*entry)); 4710 if (entry == NULL) 4711 return NULL; 4712 4713 i = 0; 4714 hashmap__for_each_entry(syscall_stats, pos, bkt) { 4715 struct syscall_stats *ss = pos->pvalue; 4716 struct stats *st = &ss->stats; 4717 4718 entry[i].stats = ss; 4719 entry[i].msecs = (u64)st->n * (avg_stats(st) / NSEC_PER_MSEC); 4720 entry[i].syscall = pos->key; 4721 i++; 4722 } 4723 assert(i == nr); 4724 4725 qsort(entry, nr, sizeof(*entry), entry_cmp); 4726 return entry; 4727 } 4728 4729 static size_t syscall__dump_stats(struct trace *trace, FILE *fp, 4730 struct hashmap *syscall_stats) 4731 { 4732 size_t printed = 0; 4733 struct syscall *sc; 4734 struct syscall_entry *entries; 4735 4736 entries = syscall__sort_stats(syscall_stats); 4737 if (entries == NULL) 4738 return 0; 4739 4740 printed += fprintf(fp, "\n"); 4741 4742 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n"); 4743 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n"); 4744 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n"); 4745 4746 for (size_t i = 0; i < syscall_stats->sz; i++) { 4747 struct syscall_entry *entry = &entries[i]; 4748 struct syscall_stats *stats = entry->stats; 4749 4750 if (stats) { 4751 double min = (double)(stats->stats.min) / NSEC_PER_MSEC; 4752 double max = (double)(stats->stats.max) / NSEC_PER_MSEC; 4753 double avg = avg_stats(&stats->stats); 4754 double pct; 4755 u64 n = (u64)stats->stats.n; 4756 4757 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0; 4758 avg /= NSEC_PER_MSEC; 4759 4760 sc = &trace->syscalls.table[entry->syscall]; 4761 printed += fprintf(fp, " %-15s", sc->name); 4762 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f", 4763 n, stats->nr_failures, entry->msecs, min, avg); 4764 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct); 4765 4766 if (trace->errno_summary && stats->nr_failures) { 4767 int e; 4768 4769 for (e = 0; e < stats->max_errno; ++e) { 4770 if (stats->errnos[e] != 0) 4771 fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]); 4772 } 4773 } 4774 } 4775 } 4776 4777 free(entries); 4778 printed += fprintf(fp, "\n\n"); 4779 4780 return printed; 4781 } 4782 4783 static size_t thread__dump_stats(struct thread_trace *ttrace, 4784 struct trace *trace, FILE *fp) 4785 { 4786 return syscall__dump_stats(trace, fp, ttrace->syscall_stats); 4787 } 4788 4789 static size_t system__dump_stats(struct trace *trace, FILE *fp) 4790 { 4791 return syscall__dump_stats(trace, fp, trace->syscall_stats); 4792 } 4793 4794 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) 4795 { 4796 size_t printed = 0; 4797 struct thread_trace *ttrace = thread__priv(thread); 4798 double ratio; 4799 4800 if (ttrace == NULL) 4801 return 0; 4802 4803 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; 4804 4805 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread)); 4806 printed += fprintf(fp, "%lu events, ", ttrace->nr_events); 4807 printed += fprintf(fp, "%.1f%%", ratio); 4808 if (ttrace->pfmaj) 4809 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj); 4810 if (ttrace->pfmin) 4811 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin); 4812 if (trace->sched) 4813 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); 4814 else if (fputc('\n', fp) != EOF) 4815 ++printed; 4816 4817 printed += thread__dump_stats(ttrace, trace, fp); 4818 4819 return printed; 4820 } 4821 4822 static unsigned long thread__nr_events(struct thread_trace *ttrace) 4823 { 4824 return ttrace ? ttrace->nr_events : 0; 4825 } 4826 4827 static int trace_nr_events_cmp(void *priv __maybe_unused, 4828 const struct list_head *la, 4829 const struct list_head *lb) 4830 { 4831 struct thread_list *a = list_entry(la, struct thread_list, list); 4832 struct thread_list *b = list_entry(lb, struct thread_list, list); 4833 unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread)); 4834 unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread)); 4835 4836 if (a_nr_events != b_nr_events) 4837 return a_nr_events < b_nr_events ? -1 : 1; 4838 4839 /* Identical number of threads, place smaller tids first. */ 4840 return thread__tid(a->thread) < thread__tid(b->thread) 4841 ? -1 4842 : (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0); 4843 } 4844 4845 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) 4846 { 4847 size_t printed = trace__fprintf_summary_header(fp); 4848 LIST_HEAD(threads); 4849 4850 if (machine__thread_list(trace->host, &threads) == 0) { 4851 struct thread_list *pos; 4852 4853 list_sort(NULL, &threads, trace_nr_events_cmp); 4854 4855 list_for_each_entry(pos, &threads, list) 4856 printed += trace__fprintf_thread(fp, pos->thread, trace); 4857 } 4858 thread_list__delete(&threads); 4859 return printed; 4860 } 4861 4862 static size_t trace__fprintf_total_summary(struct trace *trace, FILE *fp) 4863 { 4864 size_t printed = trace__fprintf_summary_header(fp); 4865 4866 printed += fprintf(fp, " total, "); 4867 printed += fprintf(fp, "%lu events", trace->nr_events); 4868 4869 if (trace->pfmaj) 4870 printed += fprintf(fp, ", %lu majfaults", trace->pfmaj); 4871 if (trace->pfmin) 4872 printed += fprintf(fp, ", %lu minfaults", trace->pfmin); 4873 if (trace->sched) 4874 printed += fprintf(fp, ", %.3f msec\n", trace->runtime_ms); 4875 else if (fputc('\n', fp) != EOF) 4876 ++printed; 4877 4878 printed += system__dump_stats(trace, fp); 4879 4880 return printed; 4881 } 4882 4883 static int trace__set_duration(const struct option *opt, const char *str, 4884 int unset __maybe_unused) 4885 { 4886 struct trace *trace = opt->value; 4887 4888 trace->duration_filter = atof(str); 4889 return 0; 4890 } 4891 4892 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str, 4893 int unset __maybe_unused) 4894 { 4895 int ret = -1; 4896 size_t i; 4897 struct trace *trace = opt->value; 4898 /* 4899 * FIXME: introduce a intarray class, plain parse csv and create a 4900 * { int nr, int entries[] } struct... 4901 */ 4902 struct intlist *list = intlist__new(str); 4903 4904 if (list == NULL) 4905 return -1; 4906 4907 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1; 4908 trace->filter_pids.entries = calloc(i, sizeof(pid_t)); 4909 4910 if (trace->filter_pids.entries == NULL) 4911 goto out; 4912 4913 trace->filter_pids.entries[0] = getpid(); 4914 4915 for (i = 1; i < trace->filter_pids.nr; ++i) 4916 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; 4917 4918 intlist__delete(list); 4919 ret = 0; 4920 out: 4921 return ret; 4922 } 4923 4924 static int trace__open_output(struct trace *trace, const char *filename) 4925 { 4926 struct stat st; 4927 4928 if (!stat(filename, &st) && st.st_size) { 4929 char oldname[PATH_MAX]; 4930 4931 scnprintf(oldname, sizeof(oldname), "%s.old", filename); 4932 unlink(oldname); 4933 rename(filename, oldname); 4934 } 4935 4936 trace->output = fopen(filename, "w"); 4937 4938 return trace->output == NULL ? -errno : 0; 4939 } 4940 4941 static int parse_pagefaults(const struct option *opt, const char *str, 4942 int unset __maybe_unused) 4943 { 4944 int *trace_pgfaults = opt->value; 4945 4946 if (strcmp(str, "all") == 0) 4947 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN; 4948 else if (strcmp(str, "maj") == 0) 4949 *trace_pgfaults |= TRACE_PFMAJ; 4950 else if (strcmp(str, "min") == 0) 4951 *trace_pgfaults |= TRACE_PFMIN; 4952 else 4953 return -1; 4954 4955 return 0; 4956 } 4957 4958 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler) 4959 { 4960 struct evsel *evsel; 4961 4962 evlist__for_each_entry(evlist, evsel) { 4963 if (evsel->handler == NULL) 4964 evsel->handler = handler; 4965 } 4966 } 4967 4968 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name) 4969 { 4970 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 4971 4972 if (fmt) { 4973 const struct syscall_fmt *scfmt = syscall_fmt__find(name); 4974 4975 if (scfmt) { 4976 const struct tep_event *tp_format = evsel__tp_format(evsel); 4977 4978 if (tp_format) { 4979 int skip = 0; 4980 4981 if (strcmp(tp_format->format.fields->name, "__syscall_nr") == 0 || 4982 strcmp(tp_format->format.fields->name, "nr") == 0) 4983 ++skip; 4984 4985 memcpy(fmt + skip, scfmt->arg, 4986 (tp_format->format.nr_fields - skip) * sizeof(*fmt)); 4987 } 4988 } 4989 } 4990 } 4991 4992 static int evlist__set_syscall_tp_fields(struct evlist *evlist, bool *use_btf) 4993 { 4994 struct evsel *evsel; 4995 4996 evlist__for_each_entry(evlist, evsel) { 4997 const struct tep_event *tp_format; 4998 4999 if (evsel->priv) 5000 continue; 5001 5002 tp_format = evsel__tp_format(evsel); 5003 if (!tp_format) 5004 continue; 5005 5006 if (strcmp(tp_format->system, "syscalls")) { 5007 evsel__init_tp_arg_scnprintf(evsel, use_btf); 5008 continue; 5009 } 5010 5011 if (evsel__init_syscall_tp(evsel)) 5012 return -1; 5013 5014 if (!strncmp(tp_format->name, "sys_enter_", 10)) { 5015 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 5016 5017 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64))) 5018 return -1; 5019 5020 evsel__set_syscall_arg_fmt(evsel, 5021 tp_format->name + sizeof("sys_enter_") - 1); 5022 } else if (!strncmp(tp_format->name, "sys_exit_", 9)) { 5023 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 5024 5025 if (__tp_field__init_uint(&sc->ret, sizeof(u64), 5026 sc->id.offset + sizeof(u64), 5027 evsel->needs_swap)) 5028 return -1; 5029 5030 evsel__set_syscall_arg_fmt(evsel, 5031 tp_format->name + sizeof("sys_exit_") - 1); 5032 } 5033 } 5034 5035 return 0; 5036 } 5037 5038 /* 5039 * XXX: Hackish, just splitting the combined -e+--event (syscalls 5040 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use 5041 * existing facilities unchanged (trace->ev_qualifier + parse_options()). 5042 * 5043 * It'd be better to introduce a parse_options() variant that would return a 5044 * list with the terms it didn't match to an event... 5045 */ 5046 static int trace__parse_events_option(const struct option *opt, const char *str, 5047 int unset __maybe_unused) 5048 { 5049 struct trace *trace = (struct trace *)opt->value; 5050 const char *s = str; 5051 char *sep = NULL, *lists[2] = { NULL, NULL, }; 5052 int len = strlen(str) + 1, err = -1, list, idx; 5053 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR); 5054 char group_name[PATH_MAX]; 5055 const struct syscall_fmt *fmt; 5056 5057 if (strace_groups_dir == NULL) 5058 return -1; 5059 5060 if (*s == '!') { 5061 ++s; 5062 trace->not_ev_qualifier = true; 5063 } 5064 5065 while (1) { 5066 if ((sep = strchr(s, ',')) != NULL) 5067 *sep = '\0'; 5068 5069 list = 0; 5070 if (syscalltbl__id(trace->sctbl, s) >= 0 || 5071 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) { 5072 list = 1; 5073 goto do_concat; 5074 } 5075 5076 fmt = syscall_fmt__find_by_alias(s); 5077 if (fmt != NULL) { 5078 list = 1; 5079 s = fmt->name; 5080 } else { 5081 path__join(group_name, sizeof(group_name), strace_groups_dir, s); 5082 if (access(group_name, R_OK) == 0) 5083 list = 1; 5084 } 5085 do_concat: 5086 if (lists[list]) { 5087 sprintf(lists[list] + strlen(lists[list]), ",%s", s); 5088 } else { 5089 lists[list] = malloc(len); 5090 if (lists[list] == NULL) 5091 goto out; 5092 strcpy(lists[list], s); 5093 } 5094 5095 if (!sep) 5096 break; 5097 5098 *sep = ','; 5099 s = sep + 1; 5100 } 5101 5102 if (lists[1] != NULL) { 5103 struct strlist_config slist_config = { 5104 .dirname = strace_groups_dir, 5105 }; 5106 5107 trace->ev_qualifier = strlist__new(lists[1], &slist_config); 5108 if (trace->ev_qualifier == NULL) { 5109 fputs("Not enough memory to parse event qualifier", trace->output); 5110 goto out; 5111 } 5112 5113 if (trace__validate_ev_qualifier(trace)) 5114 goto out; 5115 trace->trace_syscalls = true; 5116 } 5117 5118 err = 0; 5119 5120 if (lists[0]) { 5121 struct parse_events_option_args parse_events_option_args = { 5122 .evlistp = &trace->evlist, 5123 }; 5124 struct option o = { 5125 .value = &parse_events_option_args, 5126 }; 5127 err = parse_events_option(&o, lists[0], 0); 5128 } 5129 out: 5130 free(strace_groups_dir); 5131 free(lists[0]); 5132 free(lists[1]); 5133 if (sep) 5134 *sep = ','; 5135 5136 return err; 5137 } 5138 5139 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset) 5140 { 5141 struct trace *trace = opt->value; 5142 5143 if (!list_empty(&trace->evlist->core.entries)) { 5144 struct option o = { 5145 .value = &trace->evlist, 5146 }; 5147 return parse_cgroups(&o, str, unset); 5148 } 5149 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); 5150 5151 return 0; 5152 } 5153 5154 static int trace__parse_summary_mode(const struct option *opt, const char *str, 5155 int unset __maybe_unused) 5156 { 5157 struct trace *trace = opt->value; 5158 5159 if (!strcmp(str, "thread")) { 5160 trace->summary_mode = SUMMARY__BY_THREAD; 5161 } else if (!strcmp(str, "total")) { 5162 trace->summary_mode = SUMMARY__BY_TOTAL; 5163 } else { 5164 pr_err("Unknown summary mode: %s\n", str); 5165 return -1; 5166 } 5167 5168 return 0; 5169 } 5170 5171 static int trace__config(const char *var, const char *value, void *arg) 5172 { 5173 struct trace *trace = arg; 5174 int err = 0; 5175 5176 if (!strcmp(var, "trace.add_events")) { 5177 trace->perfconfig_events = strdup(value); 5178 if (trace->perfconfig_events == NULL) { 5179 pr_err("Not enough memory for %s\n", "trace.add_events"); 5180 return -1; 5181 } 5182 } else if (!strcmp(var, "trace.show_timestamp")) { 5183 trace->show_tstamp = perf_config_bool(var, value); 5184 } else if (!strcmp(var, "trace.show_duration")) { 5185 trace->show_duration = perf_config_bool(var, value); 5186 } else if (!strcmp(var, "trace.show_arg_names")) { 5187 trace->show_arg_names = perf_config_bool(var, value); 5188 if (!trace->show_arg_names) 5189 trace->show_zeros = true; 5190 } else if (!strcmp(var, "trace.show_zeros")) { 5191 bool new_show_zeros = perf_config_bool(var, value); 5192 if (!trace->show_arg_names && !new_show_zeros) { 5193 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n"); 5194 goto out; 5195 } 5196 trace->show_zeros = new_show_zeros; 5197 } else if (!strcmp(var, "trace.show_prefix")) { 5198 trace->show_string_prefix = perf_config_bool(var, value); 5199 } else if (!strcmp(var, "trace.no_inherit")) { 5200 trace->opts.no_inherit = perf_config_bool(var, value); 5201 } else if (!strcmp(var, "trace.args_alignment")) { 5202 int args_alignment = 0; 5203 if (perf_config_int(&args_alignment, var, value) == 0) 5204 trace->args_alignment = args_alignment; 5205 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) { 5206 if (strcasecmp(value, "libtraceevent") == 0) 5207 trace->libtraceevent_print = true; 5208 else if (strcasecmp(value, "libbeauty") == 0) 5209 trace->libtraceevent_print = false; 5210 } 5211 out: 5212 return err; 5213 } 5214 5215 static void trace__exit(struct trace *trace) 5216 { 5217 int i; 5218 5219 strlist__delete(trace->ev_qualifier); 5220 zfree(&trace->ev_qualifier_ids.entries); 5221 if (trace->syscalls.table) { 5222 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++) 5223 syscall__exit(&trace->syscalls.table[i]); 5224 zfree(&trace->syscalls.table); 5225 } 5226 syscalltbl__delete(trace->sctbl); 5227 zfree(&trace->perfconfig_events); 5228 } 5229 5230 #ifdef HAVE_BPF_SKEL 5231 static int bpf__setup_bpf_output(struct evlist *evlist) 5232 { 5233 int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/"); 5234 5235 if (err) 5236 pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n"); 5237 5238 return err; 5239 } 5240 #endif 5241 5242 int cmd_trace(int argc, const char **argv) 5243 { 5244 const char *trace_usage[] = { 5245 "perf trace [<options>] [<command>]", 5246 "perf trace [<options>] -- <command> [<options>]", 5247 "perf trace record [<options>] [<command>]", 5248 "perf trace record [<options>] -- <command> [<options>]", 5249 NULL 5250 }; 5251 struct trace trace = { 5252 .opts = { 5253 .target = { 5254 .uid = UINT_MAX, 5255 .uses_mmap = true, 5256 }, 5257 .user_freq = UINT_MAX, 5258 .user_interval = ULLONG_MAX, 5259 .no_buffering = true, 5260 .mmap_pages = UINT_MAX, 5261 }, 5262 .output = stderr, 5263 .show_comm = true, 5264 .show_tstamp = true, 5265 .show_duration = true, 5266 .show_arg_names = true, 5267 .args_alignment = 70, 5268 .trace_syscalls = false, 5269 .kernel_syscallchains = false, 5270 .max_stack = UINT_MAX, 5271 .max_events = ULONG_MAX, 5272 }; 5273 const char *output_name = NULL; 5274 const struct option trace_options[] = { 5275 OPT_CALLBACK('e', "event", &trace, "event", 5276 "event/syscall selector. use 'perf list' to list available events", 5277 trace__parse_events_option), 5278 OPT_CALLBACK(0, "filter", &trace.evlist, "filter", 5279 "event filter", parse_filter), 5280 OPT_BOOLEAN(0, "comm", &trace.show_comm, 5281 "show the thread COMM next to its id"), 5282 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"), 5283 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace", 5284 trace__parse_events_option), 5285 OPT_STRING('o', "output", &output_name, "file", "output file name"), 5286 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"), 5287 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", 5288 "trace events on existing process id"), 5289 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", 5290 "trace events on existing thread id"), 5291 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids", 5292 "pids to filter (by the kernel)", trace__set_filter_pids_from_option), 5293 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, 5294 "system-wide collection from all CPUs"), 5295 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", 5296 "list of cpus to monitor"), 5297 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, 5298 "child tasks do not inherit counters"), 5299 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", 5300 "number of mmap data pages", evlist__parse_mmap_pages), 5301 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user", 5302 "user to profile"), 5303 OPT_CALLBACK(0, "duration", &trace, "float", 5304 "show only events with duration > N.M ms", 5305 trace__set_duration), 5306 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), 5307 OPT_INCR('v', "verbose", &verbose, "be more verbose"), 5308 OPT_BOOLEAN('T', "time", &trace.full_time, 5309 "Show full timestamp, not time relative to first start"), 5310 OPT_BOOLEAN(0, "failure", &trace.failure_only, 5311 "Show only syscalls that failed"), 5312 OPT_BOOLEAN('s', "summary", &trace.summary_only, 5313 "Show only syscall summary with statistics"), 5314 OPT_BOOLEAN('S', "with-summary", &trace.summary, 5315 "Show all syscalls and summary with statistics"), 5316 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary, 5317 "Show errno stats per syscall, use with -s or -S"), 5318 OPT_CALLBACK(0, "summary-mode", &trace, "mode", 5319 "How to show summary: select thread (default) or total", 5320 trace__parse_summary_mode), 5321 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min", 5322 "Trace pagefaults", parse_pagefaults, "maj"), 5323 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"), 5324 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"), 5325 OPT_CALLBACK(0, "call-graph", &trace.opts, 5326 "record_mode[,record_size]", record_callchain_help, 5327 &record_parse_callchain_opt), 5328 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print, 5329 "Use libtraceevent to print the tracepoint arguments."), 5330 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, 5331 "Show the kernel callchains on the syscall exit path"), 5332 OPT_ULONG(0, "max-events", &trace.max_events, 5333 "Set the maximum number of events to print, exit after that is reached. "), 5334 OPT_UINTEGER(0, "min-stack", &trace.min_stack, 5335 "Set the minimum stack depth when parsing the callchain, " 5336 "anything below the specified depth will be ignored."), 5337 OPT_UINTEGER(0, "max-stack", &trace.max_stack, 5338 "Set the maximum stack depth when parsing the callchain, " 5339 "anything beyond the specified depth will be ignored. " 5340 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 5341 OPT_BOOLEAN(0, "sort-events", &trace.sort_events, 5342 "Sort batch of events before processing, use if getting out of order events"), 5343 OPT_BOOLEAN(0, "print-sample", &trace.print_sample, 5344 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"), 5345 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, 5346 "per thread proc mmap processing timeout in ms"), 5347 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", 5348 trace__parse_cgroups), 5349 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay, 5350 "ms to wait before starting measurement after program " 5351 "start"), 5352 OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer" 5353 "to customized ones"), 5354 OPTS_EVSWITCH(&trace.evswitch), 5355 OPT_END() 5356 }; 5357 bool __maybe_unused max_stack_user_set = true; 5358 bool mmap_pages_user_set = true; 5359 struct evsel *evsel; 5360 const char * const trace_subcommands[] = { "record", NULL }; 5361 int err = -1; 5362 char bf[BUFSIZ]; 5363 struct sigaction sigchld_act; 5364 5365 signal(SIGSEGV, sighandler_dump_stack); 5366 signal(SIGFPE, sighandler_dump_stack); 5367 signal(SIGINT, sighandler_interrupt); 5368 5369 memset(&sigchld_act, 0, sizeof(sigchld_act)); 5370 sigchld_act.sa_flags = SA_SIGINFO; 5371 sigchld_act.sa_sigaction = sighandler_chld; 5372 sigaction(SIGCHLD, &sigchld_act, NULL); 5373 5374 trace.evlist = evlist__new(); 5375 trace.sctbl = syscalltbl__new(); 5376 5377 if (trace.evlist == NULL || trace.sctbl == NULL) { 5378 pr_err("Not enough memory to run!\n"); 5379 err = -ENOMEM; 5380 goto out; 5381 } 5382 5383 /* 5384 * Parsing .perfconfig may entail creating a BPF event, that may need 5385 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting 5386 * is too small. This affects just this process, not touching the 5387 * global setting. If it fails we'll get something in 'perf trace -v' 5388 * to help diagnose the problem. 5389 */ 5390 rlimit__bump_memlock(); 5391 5392 err = perf_config(trace__config, &trace); 5393 if (err) 5394 goto out; 5395 5396 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands, 5397 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION); 5398 5399 /* 5400 * Here we already passed thru trace__parse_events_option() and it has 5401 * already figured out if -e syscall_name, if not but if --event 5402 * foo:bar was used, the user is interested _just_ in those, say, 5403 * tracepoint events, not in the strace-like syscall-name-based mode. 5404 * 5405 * This is important because we need to check if strace-like mode is 5406 * needed to decided if we should filter out the eBPF 5407 * __augmented_syscalls__ code, if it is in the mix, say, via 5408 * .perfconfig trace.add_events, and filter those out. 5409 */ 5410 if (!trace.trace_syscalls && !trace.trace_pgfaults && 5411 trace.evlist->core.nr_entries == 0 /* Was --events used? */) { 5412 trace.trace_syscalls = true; 5413 } 5414 /* 5415 * Now that we have --verbose figured out, lets see if we need to parse 5416 * events from .perfconfig, so that if those events fail parsing, say some 5417 * BPF program fails, then we'll be able to use --verbose to see what went 5418 * wrong in more detail. 5419 */ 5420 if (trace.perfconfig_events != NULL) { 5421 struct parse_events_error parse_err; 5422 5423 parse_events_error__init(&parse_err); 5424 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err); 5425 if (err) 5426 parse_events_error__print(&parse_err, trace.perfconfig_events); 5427 parse_events_error__exit(&parse_err); 5428 if (err) 5429 goto out; 5430 } 5431 5432 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) { 5433 usage_with_options_msg(trace_usage, trace_options, 5434 "cgroup monitoring only available in system-wide mode"); 5435 } 5436 5437 #ifdef HAVE_BPF_SKEL 5438 if (!trace.trace_syscalls) 5439 goto skip_augmentation; 5440 5441 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) { 5442 pr_debug("Syscall augmentation fails with record, disabling augmentation"); 5443 goto skip_augmentation; 5444 } 5445 5446 trace.skel = augmented_raw_syscalls_bpf__open(); 5447 if (!trace.skel) { 5448 pr_debug("Failed to open augmented syscalls BPF skeleton"); 5449 } else { 5450 /* 5451 * Disable attaching the BPF programs except for sys_enter and 5452 * sys_exit that tail call into this as necessary. 5453 */ 5454 struct bpf_program *prog; 5455 5456 bpf_object__for_each_program(prog, trace.skel->obj) { 5457 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit) 5458 bpf_program__set_autoattach(prog, /*autoattach=*/false); 5459 } 5460 5461 err = augmented_raw_syscalls_bpf__load(trace.skel); 5462 5463 if (err < 0) { 5464 libbpf_strerror(err, bf, sizeof(bf)); 5465 pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf); 5466 } else { 5467 augmented_raw_syscalls_bpf__attach(trace.skel); 5468 trace__add_syscall_newtp(&trace); 5469 } 5470 } 5471 5472 err = bpf__setup_bpf_output(trace.evlist); 5473 if (err) { 5474 libbpf_strerror(err, bf, sizeof(bf)); 5475 pr_err("ERROR: Setup BPF output event failed: %s\n", bf); 5476 goto out; 5477 } 5478 trace.syscalls.events.bpf_output = evlist__last(trace.evlist); 5479 assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__")); 5480 skip_augmentation: 5481 #endif 5482 err = -1; 5483 5484 if (trace.trace_pgfaults) { 5485 trace.opts.sample_address = true; 5486 trace.opts.sample_time = true; 5487 } 5488 5489 if (trace.opts.mmap_pages == UINT_MAX) 5490 mmap_pages_user_set = false; 5491 5492 if (trace.max_stack == UINT_MAX) { 5493 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack(); 5494 max_stack_user_set = false; 5495 } 5496 5497 #ifdef HAVE_DWARF_UNWIND_SUPPORT 5498 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) { 5499 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); 5500 } 5501 #endif 5502 5503 if (callchain_param.enabled) { 5504 if (!mmap_pages_user_set && geteuid() == 0) 5505 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4; 5506 5507 symbol_conf.use_callchain = true; 5508 } 5509 5510 if (trace.evlist->core.nr_entries > 0) { 5511 bool use_btf = false; 5512 5513 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler); 5514 if (evlist__set_syscall_tp_fields(trace.evlist, &use_btf)) { 5515 perror("failed to set syscalls:* tracepoint fields"); 5516 goto out; 5517 } 5518 5519 if (use_btf) 5520 trace__load_vmlinux_btf(&trace); 5521 } 5522 5523 if (trace.sort_events) { 5524 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace); 5525 ordered_events__set_copy_on_queue(&trace.oe.data, true); 5526 } 5527 5528 /* 5529 * If we are augmenting syscalls, then combine what we put in the 5530 * __augmented_syscalls__ BPF map with what is in the 5531 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF, 5532 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit. 5533 * 5534 * We'll switch to look at two BPF maps, one for sys_enter and the 5535 * other for sys_exit when we start augmenting the sys_exit paths with 5536 * buffers that are being copied from kernel to userspace, think 'read' 5537 * syscall. 5538 */ 5539 if (trace.syscalls.events.bpf_output) { 5540 evlist__for_each_entry(trace.evlist, evsel) { 5541 bool raw_syscalls_sys_exit = evsel__name_is(evsel, "raw_syscalls:sys_exit"); 5542 5543 if (raw_syscalls_sys_exit) { 5544 trace.raw_augmented_syscalls = true; 5545 goto init_augmented_syscall_tp; 5546 } 5547 5548 if (trace.syscalls.events.bpf_output->priv == NULL && 5549 strstr(evsel__name(evsel), "syscalls:sys_enter")) { 5550 struct evsel *augmented = trace.syscalls.events.bpf_output; 5551 if (evsel__init_augmented_syscall_tp(augmented, evsel) || 5552 evsel__init_augmented_syscall_tp_args(augmented)) 5553 goto out; 5554 /* 5555 * Augmented is __augmented_syscalls__ BPF_OUTPUT event 5556 * Above we made sure we can get from the payload the tp fields 5557 * that we get from syscalls:sys_enter tracefs format file. 5558 */ 5559 augmented->handler = trace__sys_enter; 5560 /* 5561 * Now we do the same for the *syscalls:sys_enter event so that 5562 * if we handle it directly, i.e. if the BPF prog returns 0 so 5563 * as not to filter it, then we'll handle it just like we would 5564 * for the BPF_OUTPUT one: 5565 */ 5566 if (evsel__init_augmented_syscall_tp(evsel, evsel) || 5567 evsel__init_augmented_syscall_tp_args(evsel)) 5568 goto out; 5569 evsel->handler = trace__sys_enter; 5570 } 5571 5572 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) { 5573 struct syscall_tp *sc; 5574 init_augmented_syscall_tp: 5575 if (evsel__init_augmented_syscall_tp(evsel, evsel)) 5576 goto out; 5577 sc = __evsel__syscall_tp(evsel); 5578 /* 5579 * For now with BPF raw_augmented we hook into 5580 * raw_syscalls:sys_enter and there we get all 5581 * 6 syscall args plus the tracepoint common 5582 * fields and the syscall_nr (another long). 5583 * So we check if that is the case and if so 5584 * don't look after the sc->args_size but 5585 * always after the full raw_syscalls:sys_enter 5586 * payload, which is fixed. 5587 * 5588 * We'll revisit this later to pass 5589 * s->args_size to the BPF augmenter (now 5590 * tools/perf/examples/bpf/augmented_raw_syscalls.c, 5591 * so that it copies only what we need for each 5592 * syscall, like what happens when we use 5593 * syscalls:sys_enter_NAME, so that we reduce 5594 * the kernel/userspace traffic to just what is 5595 * needed for each syscall. 5596 */ 5597 if (trace.raw_augmented_syscalls) 5598 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; 5599 evsel__init_augmented_syscall_tp_ret(evsel); 5600 evsel->handler = trace__sys_exit; 5601 } 5602 } 5603 } 5604 5605 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) 5606 return trace__record(&trace, argc-1, &argv[1]); 5607 5608 /* Using just --errno-summary will trigger --summary */ 5609 if (trace.errno_summary && !trace.summary && !trace.summary_only) 5610 trace.summary_only = true; 5611 5612 /* summary_only implies summary option, but don't overwrite summary if set */ 5613 if (trace.summary_only) 5614 trace.summary = trace.summary_only; 5615 5616 /* Keep exited threads, otherwise information might be lost for summary */ 5617 if (trace.summary) { 5618 symbol_conf.keep_exited_threads = true; 5619 if (trace.summary_mode == SUMMARY__NONE) 5620 trace.summary_mode = SUMMARY__BY_THREAD; 5621 } 5622 5623 if (output_name != NULL) { 5624 err = trace__open_output(&trace, output_name); 5625 if (err < 0) { 5626 perror("failed to create output file"); 5627 goto out; 5628 } 5629 } 5630 5631 err = evswitch__init(&trace.evswitch, trace.evlist, stderr); 5632 if (err) 5633 goto out_close; 5634 5635 err = target__validate(&trace.opts.target); 5636 if (err) { 5637 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5638 fprintf(trace.output, "%s", bf); 5639 goto out_close; 5640 } 5641 5642 err = target__parse_uid(&trace.opts.target); 5643 if (err) { 5644 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5645 fprintf(trace.output, "%s", bf); 5646 goto out_close; 5647 } 5648 5649 if (!argc && target__none(&trace.opts.target)) 5650 trace.opts.target.system_wide = true; 5651 5652 if (input_name) 5653 err = trace__replay(&trace); 5654 else 5655 err = trace__run(&trace, argc, argv); 5656 5657 out_close: 5658 if (output_name != NULL) 5659 fclose(trace.output); 5660 out: 5661 trace__exit(&trace); 5662 #ifdef HAVE_BPF_SKEL 5663 augmented_raw_syscalls_bpf__destroy(trace.skel); 5664 #endif 5665 return err; 5666 } 5667