1 /* 2 * builtin-trace.c 3 * 4 * Builtin 'trace' command: 5 * 6 * Display a continuously updated trace of any workload, CPU, specific PID, 7 * system wide, etc. Default format is loosely strace like, but any other 8 * event may be specified using --event. 9 * 10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 11 * 12 * Initially based on the 'trace' prototype by Thomas Gleixner: 13 * 14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'") 15 */ 16 17 #include "util/record.h" 18 #include <api/fs/tracing_path.h> 19 #ifdef HAVE_LIBBPF_SUPPORT 20 #include <bpf/bpf.h> 21 #include <bpf/libbpf.h> 22 #include <bpf/btf.h> 23 #ifdef HAVE_BPF_SKEL 24 #include "bpf_skel/augmented_raw_syscalls.skel.h" 25 #endif 26 #endif 27 #include "util/bpf_map.h" 28 #include "util/rlimit.h" 29 #include "builtin.h" 30 #include "util/cgroup.h" 31 #include "util/color.h" 32 #include "util/config.h" 33 #include "util/debug.h" 34 #include "util/dso.h" 35 #include "util/env.h" 36 #include "util/event.h" 37 #include "util/evsel.h" 38 #include "util/evsel_fprintf.h" 39 #include "util/synthetic-events.h" 40 #include "util/evlist.h" 41 #include "util/evswitch.h" 42 #include "util/hashmap.h" 43 #include "util/mmap.h" 44 #include <subcmd/pager.h> 45 #include <subcmd/exec-cmd.h> 46 #include "util/machine.h" 47 #include "util/map.h" 48 #include "util/symbol.h" 49 #include "util/path.h" 50 #include "util/session.h" 51 #include "util/thread.h" 52 #include <subcmd/parse-options.h> 53 #include "util/strlist.h" 54 #include "util/intlist.h" 55 #include "util/thread_map.h" 56 #include "util/stat.h" 57 #include "util/tool.h" 58 #include "util/trace.h" 59 #include "util/util.h" 60 #include "trace/beauty/beauty.h" 61 #include "trace-event.h" 62 #include "util/parse-events.h" 63 #include "util/tracepoint.h" 64 #include "callchain.h" 65 #include "print_binary.h" 66 #include "string2.h" 67 #include "syscalltbl.h" 68 #include "../perf.h" 69 #include "trace_augment.h" 70 #include "dwarf-regs.h" 71 72 #include <errno.h> 73 #include <inttypes.h> 74 #include <poll.h> 75 #include <signal.h> 76 #include <stdlib.h> 77 #include <string.h> 78 #include <linux/err.h> 79 #include <linux/filter.h> 80 #include <linux/kernel.h> 81 #include <linux/list_sort.h> 82 #include <linux/random.h> 83 #include <linux/stringify.h> 84 #include <linux/time64.h> 85 #include <linux/zalloc.h> 86 #include <fcntl.h> 87 #include <sys/sysmacros.h> 88 89 #include <linux/ctype.h> 90 #include <perf/mmap.h> 91 #include <tools/libc_compat.h> 92 93 #ifdef HAVE_LIBTRACEEVENT 94 #include <event-parse.h> 95 #endif 96 97 #ifndef O_CLOEXEC 98 # define O_CLOEXEC 02000000 99 #endif 100 101 #ifndef F_LINUX_SPECIFIC_BASE 102 # define F_LINUX_SPECIFIC_BASE 1024 103 #endif 104 105 #define RAW_SYSCALL_ARGS_NUM 6 106 107 /* 108 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100 109 * 110 * We have to explicitely mark the direction of the flow of data, if from the 111 * kernel to user space or the other way around, since the BPF collector we 112 * have so far copies only from user to kernel space, mark the arguments that 113 * go that direction, so that we don´t end up collecting the previous contents 114 * for syscall args that goes from kernel to user space. 115 */ 116 struct syscall_arg_fmt { 117 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 118 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val); 119 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val); 120 void *parm; 121 const char *name; 122 u16 nr_entries; // for arrays 123 bool from_user; 124 bool show_zero; 125 #ifdef HAVE_LIBBPF_SUPPORT 126 const struct btf_type *type; 127 int type_id; /* used in btf_dump */ 128 #endif 129 }; 130 131 struct syscall_fmt { 132 const char *name; 133 const char *alias; 134 struct { 135 const char *sys_enter, 136 *sys_exit; 137 } bpf_prog_name; 138 struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM]; 139 u8 nr_args; 140 bool errpid; 141 bool timeout; 142 bool hexret; 143 }; 144 145 struct trace { 146 struct perf_tool tool; 147 struct { 148 /** Sorted sycall numbers used by the trace. */ 149 struct syscall **table; 150 /** Size of table. */ 151 size_t table_size; 152 struct { 153 struct evsel *sys_enter, 154 *sys_exit, 155 *bpf_output; 156 } events; 157 } syscalls; 158 #ifdef HAVE_BPF_SKEL 159 struct augmented_raw_syscalls_bpf *skel; 160 #endif 161 #ifdef HAVE_LIBBPF_SUPPORT 162 struct btf *btf; 163 #endif 164 struct record_opts opts; 165 struct evlist *evlist; 166 struct machine *host; 167 struct thread *current; 168 struct cgroup *cgroup; 169 u64 base_time; 170 FILE *output; 171 unsigned long nr_events; 172 unsigned long nr_events_printed; 173 unsigned long max_events; 174 struct evswitch evswitch; 175 struct strlist *ev_qualifier; 176 struct { 177 size_t nr; 178 int *entries; 179 } ev_qualifier_ids; 180 struct { 181 size_t nr; 182 pid_t *entries; 183 struct bpf_map *map; 184 } filter_pids; 185 /* 186 * TODO: The map is from an ID (aka system call number) to struct 187 * syscall_stats. If there is >1 e_machine, such as i386 and x86-64 188 * processes, then the stats here will gather wrong the statistics for 189 * the non EM_HOST system calls. A fix would be to add the e_machine 190 * into the key, but this would make the code inconsistent with the 191 * per-thread version. 192 */ 193 struct hashmap *syscall_stats; 194 double duration_filter; 195 double runtime_ms; 196 unsigned long pfmaj, pfmin; 197 struct { 198 u64 vfs_getname, 199 proc_getname; 200 } stats; 201 unsigned int max_stack; 202 unsigned int min_stack; 203 enum trace_summary_mode summary_mode; 204 int raw_augmented_syscalls_args_size; 205 bool raw_augmented_syscalls; 206 bool fd_path_disabled; 207 bool sort_events; 208 bool not_ev_qualifier; 209 bool live; 210 bool full_time; 211 bool sched; 212 bool multiple_threads; 213 bool summary; 214 bool summary_only; 215 bool errno_summary; 216 bool failure_only; 217 bool show_comm; 218 bool print_sample; 219 bool show_tool_stats; 220 bool trace_syscalls; 221 bool libtraceevent_print; 222 bool kernel_syscallchains; 223 s16 args_alignment; 224 bool show_tstamp; 225 bool show_duration; 226 bool show_zeros; 227 bool show_arg_names; 228 bool show_string_prefix; 229 bool force; 230 bool vfs_getname; 231 bool force_btf; 232 bool summary_bpf; 233 int trace_pgfaults; 234 char *perfconfig_events; 235 struct { 236 struct ordered_events data; 237 u64 last; 238 } oe; 239 const char *uid_str; 240 }; 241 242 static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused) 243 { 244 #ifdef HAVE_LIBBPF_SUPPORT 245 if (trace->btf != NULL) 246 return; 247 248 trace->btf = btf__load_vmlinux_btf(); 249 if (verbose > 0) { 250 fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" : 251 "Failed to load vmlinux BTF\n"); 252 } 253 #endif 254 } 255 256 struct tp_field { 257 int offset; 258 union { 259 u64 (*integer)(struct tp_field *field, struct perf_sample *sample); 260 void *(*pointer)(struct tp_field *field, struct perf_sample *sample); 261 }; 262 }; 263 264 #define TP_UINT_FIELD(bits) \ 265 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \ 266 { \ 267 u##bits value; \ 268 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 269 return value; \ 270 } 271 272 TP_UINT_FIELD(8); 273 TP_UINT_FIELD(16); 274 TP_UINT_FIELD(32); 275 TP_UINT_FIELD(64); 276 277 #define TP_UINT_FIELD__SWAPPED(bits) \ 278 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \ 279 { \ 280 u##bits value; \ 281 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 282 return bswap_##bits(value);\ 283 } 284 285 TP_UINT_FIELD__SWAPPED(16); 286 TP_UINT_FIELD__SWAPPED(32); 287 TP_UINT_FIELD__SWAPPED(64); 288 289 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap) 290 { 291 field->offset = offset; 292 293 switch (size) { 294 case 1: 295 field->integer = tp_field__u8; 296 break; 297 case 2: 298 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; 299 break; 300 case 4: 301 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; 302 break; 303 case 8: 304 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; 305 break; 306 default: 307 return -1; 308 } 309 310 return 0; 311 } 312 313 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap) 314 { 315 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); 316 } 317 318 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample) 319 { 320 return sample->raw_data + field->offset; 321 } 322 323 static int __tp_field__init_ptr(struct tp_field *field, int offset) 324 { 325 field->offset = offset; 326 field->pointer = tp_field__ptr; 327 return 0; 328 } 329 330 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field) 331 { 332 return __tp_field__init_ptr(field, format_field->offset); 333 } 334 335 struct syscall_tp { 336 struct tp_field id; 337 union { 338 struct tp_field args, ret; 339 }; 340 }; 341 342 /* 343 * The evsel->priv as used by 'perf trace' 344 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME 345 * fmt: for all the other tracepoints 346 */ 347 struct evsel_trace { 348 struct syscall_tp sc; 349 struct syscall_arg_fmt *fmt; 350 }; 351 352 static struct evsel_trace *evsel_trace__new(void) 353 { 354 return zalloc(sizeof(struct evsel_trace)); 355 } 356 357 static void evsel_trace__delete(struct evsel_trace *et) 358 { 359 if (et == NULL) 360 return; 361 362 zfree(&et->fmt); 363 free(et); 364 } 365 366 /* 367 * Used with raw_syscalls:sys_{enter,exit} and with the 368 * syscalls:sys_{enter,exit}_SYSCALL tracepoints 369 */ 370 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel) 371 { 372 struct evsel_trace *et = evsel->priv; 373 374 return &et->sc; 375 } 376 377 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel) 378 { 379 if (evsel->priv == NULL) { 380 evsel->priv = evsel_trace__new(); 381 if (evsel->priv == NULL) 382 return NULL; 383 } 384 385 return __evsel__syscall_tp(evsel); 386 } 387 388 /* 389 * Used with all the other tracepoints. 390 */ 391 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel) 392 { 393 struct evsel_trace *et = evsel->priv; 394 395 return et->fmt; 396 } 397 398 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel) 399 { 400 struct evsel_trace *et = evsel->priv; 401 402 if (evsel->priv == NULL) { 403 et = evsel->priv = evsel_trace__new(); 404 405 if (et == NULL) 406 return NULL; 407 } 408 409 if (et->fmt == NULL) { 410 const struct tep_event *tp_format = evsel__tp_format(evsel); 411 412 if (tp_format == NULL) 413 goto out_delete; 414 415 et->fmt = calloc(tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt)); 416 if (et->fmt == NULL) 417 goto out_delete; 418 } 419 420 return __evsel__syscall_arg_fmt(evsel); 421 422 out_delete: 423 evsel_trace__delete(evsel->priv); 424 evsel->priv = NULL; 425 return NULL; 426 } 427 428 static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name) 429 { 430 struct tep_format_field *format_field = evsel__field(evsel, name); 431 432 if (format_field == NULL) 433 return -1; 434 435 return tp_field__init_uint(field, format_field, evsel->needs_swap); 436 } 437 438 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \ 439 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 440 evsel__init_tp_uint_field(evsel, &sc->name, #name); }) 441 442 static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name) 443 { 444 struct tep_format_field *format_field = evsel__field(evsel, name); 445 446 if (format_field == NULL) 447 return -1; 448 449 return tp_field__init_ptr(field, format_field); 450 } 451 452 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \ 453 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 454 evsel__init_tp_ptr_field(evsel, &sc->name, #name); }) 455 456 static void evsel__delete_priv(struct evsel *evsel) 457 { 458 zfree(&evsel->priv); 459 evsel__delete(evsel); 460 } 461 462 static int evsel__init_syscall_tp(struct evsel *evsel) 463 { 464 struct syscall_tp *sc = evsel__syscall_tp(evsel); 465 466 if (sc != NULL) { 467 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") && 468 evsel__init_tp_uint_field(evsel, &sc->id, "nr")) 469 return -ENOENT; 470 471 return 0; 472 } 473 474 return -ENOMEM; 475 } 476 477 static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp) 478 { 479 struct syscall_tp *sc = evsel__syscall_tp(evsel); 480 481 if (sc != NULL) { 482 struct tep_format_field *syscall_id = evsel__field(tp, "id"); 483 if (syscall_id == NULL) 484 syscall_id = evsel__field(tp, "__syscall_nr"); 485 if (syscall_id == NULL || 486 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) 487 return -EINVAL; 488 489 return 0; 490 } 491 492 return -ENOMEM; 493 } 494 495 static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel) 496 { 497 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 498 499 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); 500 } 501 502 static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel) 503 { 504 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 505 506 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap); 507 } 508 509 static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler) 510 { 511 if (evsel__syscall_tp(evsel) != NULL) { 512 if (perf_evsel__init_sc_tp_uint_field(evsel, id)) 513 return -ENOENT; 514 515 evsel->handler = handler; 516 return 0; 517 } 518 519 return -ENOMEM; 520 } 521 522 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler) 523 { 524 struct evsel *evsel = evsel__newtp("raw_syscalls", direction); 525 526 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */ 527 if (IS_ERR(evsel)) 528 evsel = evsel__newtp("syscalls", direction); 529 530 if (IS_ERR(evsel)) 531 return NULL; 532 533 if (evsel__init_raw_syscall_tp(evsel, handler)) 534 goto out_delete; 535 536 return evsel; 537 538 out_delete: 539 evsel__delete_priv(evsel); 540 return NULL; 541 } 542 543 #define perf_evsel__sc_tp_uint(evsel, name, sample) \ 544 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 545 fields->name.integer(&fields->name, sample); }) 546 547 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \ 548 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 549 fields->name.pointer(&fields->name, sample); }) 550 551 size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val) 552 { 553 int idx = val - sa->offset; 554 555 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 556 size_t printed = scnprintf(bf, size, intfmt, val); 557 if (show_suffix) 558 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 559 return printed; 560 } 561 562 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : ""); 563 } 564 565 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 566 { 567 int idx = val - sa->offset; 568 569 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 570 size_t printed = scnprintf(bf, size, intfmt, val); 571 if (show_prefix) 572 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 573 return printed; 574 } 575 576 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 577 } 578 579 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size, 580 const char *intfmt, 581 struct syscall_arg *arg) 582 { 583 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val); 584 } 585 586 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, 587 struct syscall_arg *arg) 588 { 589 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg); 590 } 591 592 #define SCA_STRARRAY syscall_arg__scnprintf_strarray 593 594 bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 595 { 596 return strarray__strtoul(arg->parm, bf, size, ret); 597 } 598 599 bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 600 { 601 return strarray__strtoul_flags(arg->parm, bf, size, ret); 602 } 603 604 bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 605 { 606 return strarrays__strtoul(arg->parm, bf, size, ret); 607 } 608 609 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg) 610 { 611 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val); 612 } 613 614 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 615 { 616 size_t printed; 617 int i; 618 619 for (i = 0; i < sas->nr_entries; ++i) { 620 struct strarray *sa = sas->entries[i]; 621 int idx = val - sa->offset; 622 623 if (idx >= 0 && idx < sa->nr_entries) { 624 if (sa->entries[idx] == NULL) 625 break; 626 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 627 } 628 } 629 630 printed = scnprintf(bf, size, intfmt, val); 631 if (show_prefix) 632 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); 633 return printed; 634 } 635 636 bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret) 637 { 638 int i; 639 640 for (i = 0; i < sa->nr_entries; ++i) { 641 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') { 642 *ret = sa->offset + i; 643 return true; 644 } 645 } 646 647 return false; 648 } 649 650 bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret) 651 { 652 u64 val = 0; 653 char *tok = bf, *sep, *end; 654 655 *ret = 0; 656 657 while (size != 0) { 658 int toklen = size; 659 660 sep = memchr(tok, '|', size); 661 if (sep != NULL) { 662 size -= sep - tok + 1; 663 664 end = sep - 1; 665 while (end > tok && isspace(*end)) 666 --end; 667 668 toklen = end - tok + 1; 669 } 670 671 while (isspace(*tok)) 672 ++tok; 673 674 if (isalpha(*tok) || *tok == '_') { 675 if (!strarray__strtoul(sa, tok, toklen, &val)) 676 return false; 677 } else 678 val = strtoul(tok, NULL, 0); 679 680 *ret |= (1 << (val - 1)); 681 682 if (sep == NULL) 683 break; 684 tok = sep + 1; 685 } 686 687 return true; 688 } 689 690 bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret) 691 { 692 int i; 693 694 for (i = 0; i < sas->nr_entries; ++i) { 695 struct strarray *sa = sas->entries[i]; 696 697 if (strarray__strtoul(sa, bf, size, ret)) 698 return true; 699 } 700 701 return false; 702 } 703 704 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size, 705 struct syscall_arg *arg) 706 { 707 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val); 708 } 709 710 #ifndef AT_FDCWD 711 #define AT_FDCWD -100 712 #endif 713 714 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size, 715 struct syscall_arg *arg) 716 { 717 int fd = arg->val; 718 const char *prefix = "AT_FD"; 719 720 if (fd == AT_FDCWD) 721 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD"); 722 723 return syscall_arg__scnprintf_fd(bf, size, arg); 724 } 725 726 #define SCA_FDAT syscall_arg__scnprintf_fd_at 727 728 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 729 struct syscall_arg *arg); 730 731 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd 732 733 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg) 734 { 735 return scnprintf(bf, size, "%#lx", arg->val); 736 } 737 738 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg) 739 { 740 if (arg->val == 0) 741 return scnprintf(bf, size, "NULL"); 742 return syscall_arg__scnprintf_hex(bf, size, arg); 743 } 744 745 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg) 746 { 747 return scnprintf(bf, size, "%d", arg->val); 748 } 749 750 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg) 751 { 752 return scnprintf(bf, size, "%ld", arg->val); 753 } 754 755 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg) 756 { 757 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can 758 // fill missing comms using thread__set_comm()... 759 // here or in a special syscall_arg__scnprintf_pid_sched_tp... 760 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val); 761 } 762 763 #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array 764 765 static const char *bpf_cmd[] = { 766 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM", 767 "MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH", 768 "PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID", 769 "PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD", 770 "PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID", 771 "TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE", 772 "BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH", 773 "MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE", 774 "LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE", 775 "LINK_DETACH", "PROG_BIND_MAP", 776 }; 777 static DEFINE_STRARRAY(bpf_cmd, "BPF_"); 778 779 static const char *fsmount_flags[] = { 780 [1] = "CLOEXEC", 781 }; 782 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_"); 783 784 #include "trace/beauty/generated/fsconfig_arrays.c" 785 786 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_"); 787 788 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", }; 789 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1); 790 791 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", }; 792 static DEFINE_STRARRAY(itimers, "ITIMER_"); 793 794 static const char *keyctl_options[] = { 795 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN", 796 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ", 797 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT", 798 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT", 799 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT", 800 }; 801 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_"); 802 803 static const char *whences[] = { "SET", "CUR", "END", 804 #ifdef SEEK_DATA 805 "DATA", 806 #endif 807 #ifdef SEEK_HOLE 808 "HOLE", 809 #endif 810 }; 811 static DEFINE_STRARRAY(whences, "SEEK_"); 812 813 static const char *fcntl_cmds[] = { 814 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK", 815 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64", 816 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX", 817 "GETOWNER_UIDS", 818 }; 819 static DEFINE_STRARRAY(fcntl_cmds, "F_"); 820 821 static const char *fcntl_linux_specific_cmds[] = { 822 "SETLEASE", "GETLEASE", "NOTIFY", "DUPFD_QUERY", [5] = "CANCELLK", "DUPFD_CLOEXEC", 823 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS", 824 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT", 825 }; 826 827 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE); 828 829 static struct strarray *fcntl_cmds_arrays[] = { 830 &strarray__fcntl_cmds, 831 &strarray__fcntl_linux_specific_cmds, 832 }; 833 834 static DEFINE_STRARRAYS(fcntl_cmds_arrays); 835 836 static const char *rlimit_resources[] = { 837 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE", 838 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO", 839 "RTTIME", 840 }; 841 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_"); 842 843 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", }; 844 static DEFINE_STRARRAY(sighow, "SIG_"); 845 846 static const char *clockid[] = { 847 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID", 848 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME", 849 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI" 850 }; 851 static DEFINE_STRARRAY(clockid, "CLOCK_"); 852 853 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size, 854 struct syscall_arg *arg) 855 { 856 bool show_prefix = arg->show_string_prefix; 857 const char *suffix = "_OK"; 858 size_t printed = 0; 859 int mode = arg->val; 860 861 if (mode == F_OK) /* 0 */ 862 return scnprintf(bf, size, "F%s", show_prefix ? suffix : ""); 863 #define P_MODE(n) \ 864 if (mode & n##_OK) { \ 865 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ 866 mode &= ~n##_OK; \ 867 } 868 869 P_MODE(R); 870 P_MODE(W); 871 P_MODE(X); 872 #undef P_MODE 873 874 if (mode) 875 printed += scnprintf(bf + printed, size - printed, "|%#x", mode); 876 877 return printed; 878 } 879 880 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode 881 882 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 883 struct syscall_arg *arg); 884 885 #define SCA_FILENAME syscall_arg__scnprintf_filename 886 887 // 'argname' is just documentational at this point, to remove the previous comment with that info 888 #define SCA_FILENAME_FROM_USER(argname) \ 889 { .scnprintf = SCA_FILENAME, \ 890 .from_user = true, } 891 892 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg); 893 894 #define SCA_BUF syscall_arg__scnprintf_buf 895 896 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size, 897 struct syscall_arg *arg) 898 { 899 bool show_prefix = arg->show_string_prefix; 900 const char *prefix = "O_"; 901 int printed = 0, flags = arg->val; 902 903 #define P_FLAG(n) \ 904 if (flags & O_##n) { \ 905 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 906 flags &= ~O_##n; \ 907 } 908 909 P_FLAG(CLOEXEC); 910 P_FLAG(NONBLOCK); 911 #undef P_FLAG 912 913 if (flags) 914 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 915 916 return printed; 917 } 918 919 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags 920 921 #ifndef GRND_NONBLOCK 922 #define GRND_NONBLOCK 0x0001 923 #endif 924 #ifndef GRND_RANDOM 925 #define GRND_RANDOM 0x0002 926 #endif 927 928 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size, 929 struct syscall_arg *arg) 930 { 931 bool show_prefix = arg->show_string_prefix; 932 const char *prefix = "GRND_"; 933 int printed = 0, flags = arg->val; 934 935 #define P_FLAG(n) \ 936 if (flags & GRND_##n) { \ 937 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 938 flags &= ~GRND_##n; \ 939 } 940 941 P_FLAG(RANDOM); 942 P_FLAG(NONBLOCK); 943 #undef P_FLAG 944 945 if (flags) 946 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 947 948 return printed; 949 } 950 951 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags 952 953 #ifdef HAVE_LIBBPF_SUPPORT 954 static void syscall_arg_fmt__cache_btf_enum(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type) 955 { 956 int id; 957 958 type = strstr(type, "enum "); 959 if (type == NULL) 960 return; 961 962 type += 5; // skip "enum " to get the enumeration name 963 964 id = btf__find_by_name(btf, type); 965 if (id < 0) 966 return; 967 968 arg_fmt->type = btf__type_by_id(btf, id); 969 } 970 971 static bool syscall_arg__strtoul_btf_enum(char *bf, size_t size, struct syscall_arg *arg, u64 *val) 972 { 973 const struct btf_type *bt = arg->fmt->type; 974 struct btf *btf = arg->trace->btf; 975 struct btf_enum *be = btf_enum(bt); 976 977 for (int i = 0; i < btf_vlen(bt); ++i, ++be) { 978 const char *name = btf__name_by_offset(btf, be->name_off); 979 int max_len = max(size, strlen(name)); 980 981 if (strncmp(name, bf, max_len) == 0) { 982 *val = be->val; 983 return true; 984 } 985 } 986 987 return false; 988 } 989 990 static bool syscall_arg__strtoul_btf_type(char *bf, size_t size, struct syscall_arg *arg, u64 *val) 991 { 992 const struct btf_type *bt; 993 char *type = arg->type_name; 994 struct btf *btf; 995 996 trace__load_vmlinux_btf(arg->trace); 997 998 btf = arg->trace->btf; 999 if (btf == NULL) 1000 return false; 1001 1002 if (arg->fmt->type == NULL) { 1003 // See if this is an enum 1004 syscall_arg_fmt__cache_btf_enum(arg->fmt, btf, type); 1005 } 1006 1007 // Now let's see if we have a BTF type resolved 1008 bt = arg->fmt->type; 1009 if (bt == NULL) 1010 return false; 1011 1012 // If it is an enum: 1013 if (btf_is_enum(arg->fmt->type)) 1014 return syscall_arg__strtoul_btf_enum(bf, size, arg, val); 1015 1016 return false; 1017 } 1018 1019 static size_t btf_enum_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, int val) 1020 { 1021 struct btf_enum *be = btf_enum(type); 1022 const int nr_entries = btf_vlen(type); 1023 1024 for (int i = 0; i < nr_entries; ++i, ++be) { 1025 if (be->val == val) { 1026 return scnprintf(bf, size, "%s", 1027 btf__name_by_offset(btf, be->name_off)); 1028 } 1029 } 1030 1031 return 0; 1032 } 1033 1034 struct trace_btf_dump_snprintf_ctx { 1035 char *bf; 1036 size_t printed, size; 1037 }; 1038 1039 static void trace__btf_dump_snprintf(void *vctx, const char *fmt, va_list args) 1040 { 1041 struct trace_btf_dump_snprintf_ctx *ctx = vctx; 1042 1043 ctx->printed += vscnprintf(ctx->bf + ctx->printed, ctx->size - ctx->printed, fmt, args); 1044 } 1045 1046 static size_t btf_struct_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, struct syscall_arg *arg) 1047 { 1048 struct trace_btf_dump_snprintf_ctx ctx = { 1049 .bf = bf, 1050 .size = size, 1051 }; 1052 struct augmented_arg *augmented_arg = arg->augmented.args; 1053 int type_id = arg->fmt->type_id, consumed; 1054 struct btf_dump *btf_dump; 1055 1056 LIBBPF_OPTS(btf_dump_opts, dump_opts); 1057 LIBBPF_OPTS(btf_dump_type_data_opts, dump_data_opts); 1058 1059 if (arg == NULL || arg->augmented.args == NULL) 1060 return 0; 1061 1062 dump_data_opts.compact = true; 1063 dump_data_opts.skip_names = !arg->trace->show_arg_names; 1064 1065 btf_dump = btf_dump__new(btf, trace__btf_dump_snprintf, &ctx, &dump_opts); 1066 if (btf_dump == NULL) 1067 return 0; 1068 1069 /* pretty print the struct data here */ 1070 if (btf_dump__dump_type_data(btf_dump, type_id, arg->augmented.args->value, type->size, &dump_data_opts) == 0) 1071 return 0; 1072 1073 consumed = sizeof(*augmented_arg) + augmented_arg->size; 1074 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1075 arg->augmented.size -= consumed; 1076 1077 btf_dump__free(btf_dump); 1078 1079 return ctx.printed; 1080 } 1081 1082 static size_t trace__btf_scnprintf(struct trace *trace, struct syscall_arg *arg, char *bf, 1083 size_t size, int val, char *type) 1084 { 1085 struct syscall_arg_fmt *arg_fmt = arg->fmt; 1086 1087 if (trace->btf == NULL) 1088 return 0; 1089 1090 if (arg_fmt->type == NULL) { 1091 // Check if this is an enum and if we have the BTF type for it. 1092 syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type); 1093 } 1094 1095 // Did we manage to find a BTF type for the syscall/tracepoint argument? 1096 if (arg_fmt->type == NULL) 1097 return 0; 1098 1099 if (btf_is_enum(arg_fmt->type)) 1100 return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val); 1101 else if (btf_is_struct(arg_fmt->type) || btf_is_union(arg_fmt->type)) 1102 return btf_struct_scnprintf(arg_fmt->type, trace->btf, bf, size, arg); 1103 1104 return 0; 1105 } 1106 1107 #else // HAVE_LIBBPF_SUPPORT 1108 static size_t trace__btf_scnprintf(struct trace *trace __maybe_unused, struct syscall_arg *arg __maybe_unused, 1109 char *bf __maybe_unused, size_t size __maybe_unused, int val __maybe_unused, 1110 char *type __maybe_unused) 1111 { 1112 return 0; 1113 } 1114 1115 static bool syscall_arg__strtoul_btf_type(char *bf __maybe_unused, size_t size __maybe_unused, 1116 struct syscall_arg *arg __maybe_unused, u64 *val __maybe_unused) 1117 { 1118 return false; 1119 } 1120 #endif // HAVE_LIBBPF_SUPPORT 1121 1122 #define STUL_BTF_TYPE syscall_arg__strtoul_btf_type 1123 1124 #define STRARRAY(name, array) \ 1125 { .scnprintf = SCA_STRARRAY, \ 1126 .strtoul = STUL_STRARRAY, \ 1127 .parm = &strarray__##array, \ 1128 .show_zero = true, } 1129 1130 #define STRARRAY_FLAGS(name, array) \ 1131 { .scnprintf = SCA_STRARRAY_FLAGS, \ 1132 .strtoul = STUL_STRARRAY_FLAGS, \ 1133 .parm = &strarray__##array, \ 1134 .show_zero = true, } 1135 1136 #include "trace/beauty/eventfd.c" 1137 #include "trace/beauty/futex_op.c" 1138 #include "trace/beauty/futex_val3.c" 1139 #include "trace/beauty/mmap.c" 1140 #include "trace/beauty/mode_t.c" 1141 #include "trace/beauty/msg_flags.c" 1142 #include "trace/beauty/open_flags.c" 1143 #include "trace/beauty/perf_event_open.c" 1144 #include "trace/beauty/pid.c" 1145 #include "trace/beauty/sched_policy.c" 1146 #include "trace/beauty/seccomp.c" 1147 #include "trace/beauty/signum.c" 1148 #include "trace/beauty/socket_type.c" 1149 #include "trace/beauty/waitid_options.c" 1150 1151 static const struct syscall_fmt syscall_fmts[] = { 1152 { .name = "access", 1153 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 1154 { .name = "arch_prctl", 1155 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ }, 1156 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, }, 1157 { .name = "bind", 1158 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 1159 [1] = SCA_SOCKADDR_FROM_USER(umyaddr), 1160 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 1161 { .name = "bpf", 1162 .arg = { [0] = STRARRAY(cmd, bpf_cmd), 1163 [1] = { .from_user = true /* attr */, }, } }, 1164 { .name = "brk", .hexret = true, 1165 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, }, 1166 { .name = "clock_gettime", 1167 .arg = { [0] = STRARRAY(clk_id, clockid), }, }, 1168 { .name = "clock_nanosleep", 1169 .arg = { [2] = SCA_TIMESPEC_FROM_USER(req), }, }, 1170 { .name = "clone", .errpid = true, .nr_args = 5, 1171 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, }, 1172 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, }, 1173 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, }, 1174 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, }, 1175 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, }, 1176 { .name = "close", 1177 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, }, 1178 { .name = "connect", 1179 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 1180 [1] = SCA_SOCKADDR_FROM_USER(servaddr), 1181 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 1182 { .name = "epoll_ctl", 1183 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, }, 1184 { .name = "eventfd2", 1185 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, }, 1186 { .name = "faccessat", 1187 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1188 [1] = SCA_FILENAME_FROM_USER(pathname), 1189 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 1190 { .name = "faccessat2", 1191 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1192 [1] = SCA_FILENAME_FROM_USER(pathname), 1193 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, 1194 [3] = { .scnprintf = SCA_FACCESSAT2_FLAGS, /* flags */ }, }, }, 1195 { .name = "fchmodat", 1196 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1197 { .name = "fchownat", 1198 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1199 { .name = "fcntl", 1200 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */ 1201 .strtoul = STUL_STRARRAYS, 1202 .parm = &strarrays__fcntl_cmds_arrays, 1203 .show_zero = true, }, 1204 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, }, 1205 { .name = "flock", 1206 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, }, 1207 { .name = "fsconfig", 1208 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, }, 1209 { .name = "fsmount", 1210 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags), 1211 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, }, 1212 { .name = "fspick", 1213 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1214 [1] = SCA_FILENAME_FROM_USER(path), 1215 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, }, 1216 { .name = "fstat", .alias = "newfstat", }, 1217 { .name = "futex", 1218 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, 1219 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, }, 1220 { .name = "futimesat", 1221 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1222 { .name = "getitimer", 1223 .arg = { [0] = STRARRAY(which, itimers), }, }, 1224 { .name = "getpid", .errpid = true, }, 1225 { .name = "getpgid", .errpid = true, }, 1226 { .name = "getppid", .errpid = true, }, 1227 { .name = "getrandom", 1228 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, }, 1229 { .name = "getrlimit", 1230 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 1231 { .name = "getsockopt", 1232 .arg = { [1] = STRARRAY(level, socket_level), }, }, 1233 { .name = "gettid", .errpid = true, }, 1234 { .name = "ioctl", 1235 .arg = { 1236 #if defined(__i386__) || defined(__x86_64__) 1237 /* 1238 * FIXME: Make this available to all arches. 1239 */ 1240 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ }, 1241 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 1242 #else 1243 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 1244 #endif 1245 { .name = "kcmp", .nr_args = 5, 1246 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, }, 1247 [1] = { .name = "pid2", .scnprintf = SCA_PID, }, 1248 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, }, 1249 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, }, 1250 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, }, 1251 { .name = "keyctl", 1252 .arg = { [0] = STRARRAY(option, keyctl_options), }, }, 1253 { .name = "kill", 1254 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1255 { .name = "linkat", 1256 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1257 { .name = "lseek", 1258 .arg = { [2] = STRARRAY(whence, whences), }, }, 1259 { .name = "lstat", .alias = "newlstat", }, 1260 { .name = "madvise", 1261 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1262 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, }, 1263 { .name = "mkdirat", 1264 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1265 { .name = "mknodat", 1266 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1267 { .name = "mmap", .hexret = true, 1268 /* The standard mmap maps to old_mmap on s390x */ 1269 #if defined(__s390x__) 1270 .alias = "old_mmap", 1271 #endif 1272 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, 1273 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ 1274 .strtoul = STUL_STRARRAY_FLAGS, 1275 .parm = &strarray__mmap_flags, }, 1276 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, }, 1277 { .name = "mount", 1278 .arg = { [0] = SCA_FILENAME_FROM_USER(devname), 1279 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */ 1280 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, }, 1281 { .name = "move_mount", 1282 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ }, 1283 [1] = SCA_FILENAME_FROM_USER(pathname), 1284 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ }, 1285 [3] = SCA_FILENAME_FROM_USER(pathname), 1286 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, }, 1287 { .name = "mprotect", 1288 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1289 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, }, }, 1290 { .name = "mq_unlink", 1291 .arg = { [0] = SCA_FILENAME_FROM_USER(u_name), }, }, 1292 { .name = "mremap", .hexret = true, 1293 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, }, 1294 { .name = "name_to_handle_at", 1295 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1296 { .name = "nanosleep", 1297 .arg = { [0] = SCA_TIMESPEC_FROM_USER(req), }, }, 1298 { .name = "newfstatat", .alias = "fstatat", 1299 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1300 [1] = SCA_FILENAME_FROM_USER(pathname), 1301 [3] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, 1302 { .name = "open", 1303 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1304 { .name = "open_by_handle_at", 1305 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1306 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1307 { .name = "openat", 1308 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1309 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1310 { .name = "perf_event_open", 1311 .arg = { [0] = SCA_PERF_ATTR_FROM_USER(attr), 1312 [2] = { .scnprintf = SCA_INT, /* cpu */ }, 1313 [3] = { .scnprintf = SCA_FD, /* group_fd */ }, 1314 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, }, 1315 { .name = "pipe2", 1316 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, }, 1317 { .name = "pkey_alloc", 1318 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, }, 1319 { .name = "pkey_free", 1320 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, }, 1321 { .name = "pkey_mprotect", 1322 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1323 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, 1324 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, }, 1325 { .name = "poll", .timeout = true, }, 1326 { .name = "ppoll", .timeout = true, }, 1327 { .name = "prctl", 1328 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ 1329 .strtoul = STUL_STRARRAY, 1330 .parm = &strarray__prctl_options, }, 1331 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ }, 1332 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, }, 1333 { .name = "pread", .alias = "pread64", }, 1334 { .name = "preadv", .alias = "pread", }, 1335 { .name = "prlimit64", 1336 .arg = { [1] = STRARRAY(resource, rlimit_resources), 1337 [2] = { .from_user = true /* new_rlim */, }, }, }, 1338 { .name = "pwrite", .alias = "pwrite64", }, 1339 { .name = "readlinkat", 1340 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1341 { .name = "recvfrom", 1342 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1343 { .name = "recvmmsg", 1344 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1345 { .name = "recvmsg", 1346 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1347 { .name = "renameat", 1348 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1349 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, }, 1350 { .name = "renameat2", 1351 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1352 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, 1353 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, }, 1354 { .name = "rseq", 1355 .arg = { [0] = { .from_user = true /* rseq */, }, }, }, 1356 { .name = "rt_sigaction", 1357 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1358 { .name = "rt_sigprocmask", 1359 .arg = { [0] = STRARRAY(how, sighow), }, }, 1360 { .name = "rt_sigqueueinfo", 1361 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1362 { .name = "rt_tgsigqueueinfo", 1363 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1364 { .name = "sched_setscheduler", 1365 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, }, 1366 { .name = "seccomp", 1367 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ }, 1368 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, }, 1369 { .name = "select", .timeout = true, }, 1370 { .name = "sendfile", .alias = "sendfile64", }, 1371 { .name = "sendmmsg", 1372 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1373 { .name = "sendmsg", 1374 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1375 { .name = "sendto", 1376 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, 1377 [4] = SCA_SOCKADDR_FROM_USER(addr), }, }, 1378 { .name = "set_robust_list", 1379 .arg = { [0] = { .from_user = true /* head */, }, }, }, 1380 { .name = "set_tid_address", .errpid = true, }, 1381 { .name = "setitimer", 1382 .arg = { [0] = STRARRAY(which, itimers), }, }, 1383 { .name = "setrlimit", 1384 .arg = { [0] = STRARRAY(resource, rlimit_resources), 1385 [1] = { .from_user = true /* rlim */, }, }, }, 1386 { .name = "setsockopt", 1387 .arg = { [1] = STRARRAY(level, socket_level), }, }, 1388 { .name = "socket", 1389 .arg = { [0] = STRARRAY(family, socket_families), 1390 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1391 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1392 { .name = "socketpair", 1393 .arg = { [0] = STRARRAY(family, socket_families), 1394 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1395 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1396 { .name = "stat", .alias = "newstat", }, 1397 { .name = "statx", 1398 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ }, 1399 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ } , 1400 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, }, 1401 { .name = "swapoff", 1402 .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, }, 1403 { .name = "swapon", 1404 .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, }, 1405 { .name = "symlinkat", 1406 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1407 { .name = "sync_file_range", 1408 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, }, 1409 { .name = "tgkill", 1410 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1411 { .name = "tkill", 1412 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1413 { .name = "umount2", .alias = "umount", 1414 .arg = { [0] = SCA_FILENAME_FROM_USER(name), }, }, 1415 { .name = "uname", .alias = "newuname", }, 1416 { .name = "unlinkat", 1417 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1418 [1] = SCA_FILENAME_FROM_USER(pathname), 1419 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, 1420 { .name = "utimensat", 1421 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, }, 1422 { .name = "wait4", .errpid = true, 1423 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1424 { .name = "waitid", .errpid = true, 1425 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1426 { .name = "write", 1427 .arg = { [1] = { .scnprintf = SCA_BUF /* buf */, .from_user = true, }, }, }, 1428 }; 1429 1430 static int syscall_fmt__cmp(const void *name, const void *fmtp) 1431 { 1432 const struct syscall_fmt *fmt = fmtp; 1433 return strcmp(name, fmt->name); 1434 } 1435 1436 static const struct syscall_fmt *__syscall_fmt__find(const struct syscall_fmt *fmts, 1437 const int nmemb, 1438 const char *name) 1439 { 1440 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); 1441 } 1442 1443 static const struct syscall_fmt *syscall_fmt__find(const char *name) 1444 { 1445 const int nmemb = ARRAY_SIZE(syscall_fmts); 1446 return __syscall_fmt__find(syscall_fmts, nmemb, name); 1447 } 1448 1449 static const struct syscall_fmt *__syscall_fmt__find_by_alias(const struct syscall_fmt *fmts, 1450 const int nmemb, const char *alias) 1451 { 1452 int i; 1453 1454 for (i = 0; i < nmemb; ++i) { 1455 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0) 1456 return &fmts[i]; 1457 } 1458 1459 return NULL; 1460 } 1461 1462 static const struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias) 1463 { 1464 const int nmemb = ARRAY_SIZE(syscall_fmts); 1465 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias); 1466 } 1467 1468 /** 1469 * struct syscall 1470 */ 1471 struct syscall { 1472 /** @e_machine: The ELF machine associated with the entry. */ 1473 int e_machine; 1474 /** @id: id value from the tracepoint, the system call number. */ 1475 int id; 1476 struct tep_event *tp_format; 1477 int nr_args; 1478 /** 1479 * @args_size: sum of the sizes of the syscall arguments, anything 1480 * after that is augmented stuff: pathname for openat, etc. 1481 */ 1482 1483 int args_size; 1484 struct { 1485 struct bpf_program *sys_enter, 1486 *sys_exit; 1487 } bpf_prog; 1488 /** @is_exit: is this "exit" or "exit_group"? */ 1489 bool is_exit; 1490 /** 1491 * @is_open: is this "open" or "openat"? To associate the fd returned in 1492 * sys_exit with the pathname in sys_enter. 1493 */ 1494 bool is_open; 1495 /** 1496 * @nonexistent: Name lookup failed. Just a hole in the syscall table, 1497 * syscall id not allocated. 1498 */ 1499 bool nonexistent; 1500 bool use_btf; 1501 struct tep_format_field *args; 1502 const char *name; 1503 const struct syscall_fmt *fmt; 1504 struct syscall_arg_fmt *arg_fmt; 1505 }; 1506 1507 /* 1508 * We need to have this 'calculated' boolean because in some cases we really 1509 * don't know what is the duration of a syscall, for instance, when we start 1510 * a session and some threads are waiting for a syscall to finish, say 'poll', 1511 * in which case all we can do is to print "( ? ) for duration and for the 1512 * start timestamp. 1513 */ 1514 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp) 1515 { 1516 double duration = (double)t / NSEC_PER_MSEC; 1517 size_t printed = fprintf(fp, "("); 1518 1519 if (!calculated) 1520 printed += fprintf(fp, " "); 1521 else if (duration >= 1.0) 1522 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration); 1523 else if (duration >= 0.01) 1524 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration); 1525 else 1526 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration); 1527 return printed + fprintf(fp, "): "); 1528 } 1529 1530 /** 1531 * filename.ptr: The filename char pointer that will be vfs_getname'd 1532 * filename.entry_str_pos: Where to insert the string translated from 1533 * filename.ptr by the vfs_getname tracepoint/kprobe. 1534 * ret_scnprintf: syscall args may set this to a different syscall return 1535 * formatter, for instance, fcntl may return fds, file flags, etc. 1536 */ 1537 struct thread_trace { 1538 u64 entry_time; 1539 bool entry_pending; 1540 unsigned long nr_events; 1541 unsigned long pfmaj, pfmin; 1542 char *entry_str; 1543 double runtime_ms; 1544 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 1545 struct { 1546 unsigned long ptr; 1547 short int entry_str_pos; 1548 bool pending_open; 1549 unsigned int namelen; 1550 char *name; 1551 } filename; 1552 struct { 1553 int max; 1554 struct file *table; 1555 } files; 1556 1557 struct hashmap *syscall_stats; 1558 }; 1559 1560 static size_t syscall_id_hash(long key, void *ctx __maybe_unused) 1561 { 1562 return key; 1563 } 1564 1565 static bool syscall_id_equal(long key1, long key2, void *ctx __maybe_unused) 1566 { 1567 return key1 == key2; 1568 } 1569 1570 static struct hashmap *alloc_syscall_stats(void) 1571 { 1572 return hashmap__new(syscall_id_hash, syscall_id_equal, NULL); 1573 } 1574 1575 static void delete_syscall_stats(struct hashmap *syscall_stats) 1576 { 1577 struct hashmap_entry *pos; 1578 size_t bkt; 1579 1580 if (syscall_stats == NULL) 1581 return; 1582 1583 hashmap__for_each_entry(syscall_stats, pos, bkt) 1584 zfree(&pos->pvalue); 1585 hashmap__free(syscall_stats); 1586 } 1587 1588 static struct thread_trace *thread_trace__new(struct trace *trace) 1589 { 1590 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace)); 1591 1592 if (ttrace) { 1593 ttrace->files.max = -1; 1594 if (trace->summary) { 1595 ttrace->syscall_stats = alloc_syscall_stats(); 1596 if (IS_ERR(ttrace->syscall_stats)) 1597 zfree(&ttrace); 1598 } 1599 } 1600 1601 return ttrace; 1602 } 1603 1604 static void thread_trace__free_files(struct thread_trace *ttrace); 1605 1606 static void thread_trace__delete(void *pttrace) 1607 { 1608 struct thread_trace *ttrace = pttrace; 1609 1610 if (!ttrace) 1611 return; 1612 1613 delete_syscall_stats(ttrace->syscall_stats); 1614 ttrace->syscall_stats = NULL; 1615 thread_trace__free_files(ttrace); 1616 zfree(&ttrace->entry_str); 1617 free(ttrace); 1618 } 1619 1620 static struct thread_trace *thread__trace(struct thread *thread, struct trace *trace) 1621 { 1622 struct thread_trace *ttrace; 1623 1624 if (thread == NULL) 1625 goto fail; 1626 1627 if (thread__priv(thread) == NULL) 1628 thread__set_priv(thread, thread_trace__new(trace)); 1629 1630 if (thread__priv(thread) == NULL) 1631 goto fail; 1632 1633 ttrace = thread__priv(thread); 1634 ++ttrace->nr_events; 1635 1636 return ttrace; 1637 fail: 1638 color_fprintf(trace->output, PERF_COLOR_RED, 1639 "WARNING: not enough memory, dropping samples!\n"); 1640 return NULL; 1641 } 1642 1643 1644 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg, 1645 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg)) 1646 { 1647 struct thread_trace *ttrace = thread__priv(arg->thread); 1648 1649 ttrace->ret_scnprintf = ret_scnprintf; 1650 } 1651 1652 #define TRACE_PFMAJ (1 << 0) 1653 #define TRACE_PFMIN (1 << 1) 1654 1655 static const size_t trace__entry_str_size = 2048; 1656 1657 static void thread_trace__free_files(struct thread_trace *ttrace) 1658 { 1659 for (int i = 0; i <= ttrace->files.max; ++i) { 1660 struct file *file = ttrace->files.table + i; 1661 zfree(&file->pathname); 1662 } 1663 1664 zfree(&ttrace->files.table); 1665 ttrace->files.max = -1; 1666 } 1667 1668 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd) 1669 { 1670 if (fd < 0) 1671 return NULL; 1672 1673 if (fd > ttrace->files.max) { 1674 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); 1675 1676 if (nfiles == NULL) 1677 return NULL; 1678 1679 if (ttrace->files.max != -1) { 1680 memset(nfiles + ttrace->files.max + 1, 0, 1681 (fd - ttrace->files.max) * sizeof(struct file)); 1682 } else { 1683 memset(nfiles, 0, (fd + 1) * sizeof(struct file)); 1684 } 1685 1686 ttrace->files.table = nfiles; 1687 ttrace->files.max = fd; 1688 } 1689 1690 return ttrace->files.table + fd; 1691 } 1692 1693 struct file *thread__files_entry(struct thread *thread, int fd) 1694 { 1695 return thread_trace__files_entry(thread__priv(thread), fd); 1696 } 1697 1698 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) 1699 { 1700 struct thread_trace *ttrace = thread__priv(thread); 1701 struct file *file = thread_trace__files_entry(ttrace, fd); 1702 1703 if (file != NULL) { 1704 struct stat st; 1705 1706 if (stat(pathname, &st) == 0) 1707 file->dev_maj = major(st.st_rdev); 1708 file->pathname = strdup(pathname); 1709 if (file->pathname) 1710 return 0; 1711 } 1712 1713 return -1; 1714 } 1715 1716 static int thread__read_fd_path(struct thread *thread, int fd) 1717 { 1718 char linkname[PATH_MAX], pathname[PATH_MAX]; 1719 struct stat st; 1720 int ret; 1721 1722 if (thread__pid(thread) == thread__tid(thread)) { 1723 scnprintf(linkname, sizeof(linkname), 1724 "/proc/%d/fd/%d", thread__pid(thread), fd); 1725 } else { 1726 scnprintf(linkname, sizeof(linkname), 1727 "/proc/%d/task/%d/fd/%d", 1728 thread__pid(thread), thread__tid(thread), fd); 1729 } 1730 1731 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname)) 1732 return -1; 1733 1734 ret = readlink(linkname, pathname, sizeof(pathname)); 1735 1736 if (ret < 0 || ret > st.st_size) 1737 return -1; 1738 1739 pathname[ret] = '\0'; 1740 return trace__set_fd_pathname(thread, fd, pathname); 1741 } 1742 1743 static const char *thread__fd_path(struct thread *thread, int fd, 1744 struct trace *trace) 1745 { 1746 struct thread_trace *ttrace = thread__priv(thread); 1747 1748 if (ttrace == NULL || trace->fd_path_disabled) 1749 return NULL; 1750 1751 if (fd < 0) 1752 return NULL; 1753 1754 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { 1755 if (!trace->live) 1756 return NULL; 1757 ++trace->stats.proc_getname; 1758 if (thread__read_fd_path(thread, fd)) 1759 return NULL; 1760 } 1761 1762 return ttrace->files.table[fd].pathname; 1763 } 1764 1765 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg) 1766 { 1767 int fd = arg->val; 1768 size_t printed = scnprintf(bf, size, "%d", fd); 1769 const char *path = thread__fd_path(arg->thread, fd, arg->trace); 1770 1771 if (path) 1772 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1773 1774 return printed; 1775 } 1776 1777 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) 1778 { 1779 size_t printed = scnprintf(bf, size, "%d", fd); 1780 struct thread *thread = machine__find_thread(trace->host, pid, pid); 1781 1782 if (thread) { 1783 const char *path = thread__fd_path(thread, fd, trace); 1784 1785 if (path) 1786 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1787 1788 thread__put(thread); 1789 } 1790 1791 return printed; 1792 } 1793 1794 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 1795 struct syscall_arg *arg) 1796 { 1797 int fd = arg->val; 1798 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg); 1799 struct thread_trace *ttrace = thread__priv(arg->thread); 1800 1801 if (ttrace && fd >= 0 && fd <= ttrace->files.max) 1802 zfree(&ttrace->files.table[fd].pathname); 1803 1804 return printed; 1805 } 1806 1807 static void thread__set_filename_pos(struct thread *thread, const char *bf, 1808 unsigned long ptr) 1809 { 1810 struct thread_trace *ttrace = thread__priv(thread); 1811 1812 ttrace->filename.ptr = ptr; 1813 ttrace->filename.entry_str_pos = bf - ttrace->entry_str; 1814 } 1815 1816 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size) 1817 { 1818 struct augmented_arg *augmented_arg = arg->augmented.args; 1819 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value); 1820 /* 1821 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls 1822 * we would have two strings, each prefixed by its size. 1823 */ 1824 int consumed = sizeof(*augmented_arg) + augmented_arg->size; 1825 1826 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1827 arg->augmented.size -= consumed; 1828 1829 return printed; 1830 } 1831 1832 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 1833 struct syscall_arg *arg) 1834 { 1835 unsigned long ptr = arg->val; 1836 1837 if (arg->augmented.args) 1838 return syscall_arg__scnprintf_augmented_string(arg, bf, size); 1839 1840 if (!arg->trace->vfs_getname) 1841 return scnprintf(bf, size, "%#x", ptr); 1842 1843 thread__set_filename_pos(arg->thread, bf, ptr); 1844 return 0; 1845 } 1846 1847 #define MAX_CONTROL_CHAR 31 1848 #define MAX_ASCII 127 1849 1850 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg) 1851 { 1852 struct augmented_arg *augmented_arg = arg->augmented.args; 1853 unsigned char *orig = (unsigned char *)augmented_arg->value; 1854 size_t printed = 0; 1855 int consumed; 1856 1857 if (augmented_arg == NULL) 1858 return 0; 1859 1860 for (int j = 0; j < augmented_arg->size; ++j) { 1861 bool control_char = orig[j] <= MAX_CONTROL_CHAR || orig[j] >= MAX_ASCII; 1862 /* print control characters (0~31 and 127), and non-ascii characters in \(digits) */ 1863 printed += scnprintf(bf + printed, size - printed, control_char ? "\\%d" : "%c", (int)orig[j]); 1864 } 1865 1866 consumed = sizeof(*augmented_arg) + augmented_arg->size; 1867 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1868 arg->augmented.size -= consumed; 1869 1870 return printed; 1871 } 1872 1873 static bool trace__filter_duration(struct trace *trace, double t) 1874 { 1875 return t < (trace->duration_filter * NSEC_PER_MSEC); 1876 } 1877 1878 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1879 { 1880 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; 1881 1882 return fprintf(fp, "%10.3f ", ts); 1883 } 1884 1885 /* 1886 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are 1887 * using ttrace->entry_time for a thread that receives a sys_exit without 1888 * first having received a sys_enter ("poll" issued before tracing session 1889 * starts, lost sys_enter exit due to ring buffer overflow). 1890 */ 1891 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1892 { 1893 if (tstamp > 0) 1894 return __trace__fprintf_tstamp(trace, tstamp, fp); 1895 1896 return fprintf(fp, " ? "); 1897 } 1898 1899 static pid_t workload_pid = -1; 1900 static volatile sig_atomic_t done = false; 1901 static volatile sig_atomic_t interrupted = false; 1902 1903 static void sighandler_interrupt(int sig __maybe_unused) 1904 { 1905 done = interrupted = true; 1906 } 1907 1908 static void sighandler_chld(int sig __maybe_unused, siginfo_t *info, 1909 void *context __maybe_unused) 1910 { 1911 if (info->si_pid == workload_pid) 1912 done = true; 1913 } 1914 1915 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) 1916 { 1917 size_t printed = 0; 1918 1919 if (trace->multiple_threads) { 1920 if (trace->show_comm) 1921 printed += fprintf(fp, "%.14s/", thread__comm_str(thread)); 1922 printed += fprintf(fp, "%d ", thread__tid(thread)); 1923 } 1924 1925 return printed; 1926 } 1927 1928 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, 1929 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp) 1930 { 1931 size_t printed = 0; 1932 1933 if (trace->show_tstamp) 1934 printed = trace__fprintf_tstamp(trace, tstamp, fp); 1935 if (trace->show_duration) 1936 printed += fprintf_duration(duration, duration_calculated, fp); 1937 return printed + trace__fprintf_comm_tid(trace, thread, fp); 1938 } 1939 1940 static int trace__process_event(struct trace *trace, struct machine *machine, 1941 union perf_event *event, struct perf_sample *sample) 1942 { 1943 int ret = 0; 1944 1945 switch (event->header.type) { 1946 case PERF_RECORD_LOST: 1947 color_fprintf(trace->output, PERF_COLOR_RED, 1948 "LOST %" PRIu64 " events!\n", (u64)event->lost.lost); 1949 ret = machine__process_lost_event(machine, event, sample); 1950 break; 1951 default: 1952 ret = machine__process_event(machine, event, sample); 1953 break; 1954 } 1955 1956 return ret; 1957 } 1958 1959 static int trace__tool_process(const struct perf_tool *tool, 1960 union perf_event *event, 1961 struct perf_sample *sample, 1962 struct machine *machine) 1963 { 1964 struct trace *trace = container_of(tool, struct trace, tool); 1965 return trace__process_event(trace, machine, event, sample); 1966 } 1967 1968 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 1969 { 1970 struct machine *machine = vmachine; 1971 1972 if (machine->kptr_restrict_warned) 1973 return NULL; 1974 1975 if (symbol_conf.kptr_restrict) { 1976 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 1977 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n" 1978 "Kernel samples will not be resolved.\n"); 1979 machine->kptr_restrict_warned = true; 1980 return NULL; 1981 } 1982 1983 return machine__resolve_kernel_addr(vmachine, addrp, modp); 1984 } 1985 1986 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) 1987 { 1988 int err = symbol__init(NULL); 1989 1990 if (err) 1991 return err; 1992 1993 trace->host = machine__new_host(); 1994 if (trace->host == NULL) 1995 return -ENOMEM; 1996 1997 thread__set_priv_destructor(thread_trace__delete); 1998 1999 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); 2000 if (err < 0) 2001 goto out; 2002 2003 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 2004 evlist->core.threads, trace__tool_process, 2005 true, false, 1); 2006 out: 2007 if (err) 2008 symbol__exit(); 2009 2010 return err; 2011 } 2012 2013 static void trace__symbols__exit(struct trace *trace) 2014 { 2015 machine__exit(trace->host); 2016 trace->host = NULL; 2017 2018 symbol__exit(); 2019 } 2020 2021 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) 2022 { 2023 int idx; 2024 2025 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) 2026 nr_args = sc->fmt->nr_args; 2027 2028 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); 2029 if (sc->arg_fmt == NULL) 2030 return -1; 2031 2032 for (idx = 0; idx < nr_args; ++idx) { 2033 if (sc->fmt) 2034 sc->arg_fmt[idx] = sc->fmt->arg[idx]; 2035 } 2036 2037 sc->nr_args = nr_args; 2038 return 0; 2039 } 2040 2041 static const struct syscall_arg_fmt syscall_arg_fmts__by_name[] = { 2042 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, }, 2043 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, }, 2044 }; 2045 2046 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp) 2047 { 2048 const struct syscall_arg_fmt *fmt = fmtp; 2049 return strcmp(name, fmt->name); 2050 } 2051 2052 static const struct syscall_arg_fmt * 2053 __syscall_arg_fmt__find_by_name(const struct syscall_arg_fmt *fmts, const int nmemb, 2054 const char *name) 2055 { 2056 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp); 2057 } 2058 2059 static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name) 2060 { 2061 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name); 2062 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name); 2063 } 2064 2065 static struct tep_format_field * 2066 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field, 2067 bool *use_btf) 2068 { 2069 struct tep_format_field *last_field = NULL; 2070 int len; 2071 2072 for (; field; field = field->next, ++arg) { 2073 last_field = field; 2074 2075 if (arg->scnprintf) 2076 continue; 2077 2078 len = strlen(field->name); 2079 2080 // As far as heuristics (or intention) goes this seems to hold true, and makes sense! 2081 if ((field->flags & TEP_FIELD_IS_POINTER) && strstarts(field->type, "const ")) 2082 arg->from_user = true; 2083 2084 if (strcmp(field->type, "const char *") == 0 && 2085 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) || 2086 strstr(field->name, "path") != NULL)) { 2087 arg->scnprintf = SCA_FILENAME; 2088 } else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr")) 2089 arg->scnprintf = SCA_PTR; 2090 else if (strcmp(field->type, "pid_t") == 0) 2091 arg->scnprintf = SCA_PID; 2092 else if (strcmp(field->type, "umode_t") == 0) 2093 arg->scnprintf = SCA_MODE_T; 2094 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) { 2095 arg->scnprintf = SCA_CHAR_ARRAY; 2096 arg->nr_entries = field->arraylen; 2097 } else if ((strcmp(field->type, "int") == 0 || 2098 strcmp(field->type, "unsigned int") == 0 || 2099 strcmp(field->type, "long") == 0) && 2100 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) { 2101 /* 2102 * /sys/kernel/tracing/events/syscalls/sys_enter* 2103 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c 2104 * 65 int 2105 * 23 unsigned int 2106 * 7 unsigned long 2107 */ 2108 arg->scnprintf = SCA_FD; 2109 } else if (strstr(field->type, "enum") && use_btf != NULL) { 2110 *use_btf = true; 2111 arg->strtoul = STUL_BTF_TYPE; 2112 } else { 2113 const struct syscall_arg_fmt *fmt = 2114 syscall_arg_fmt__find_by_name(field->name); 2115 2116 if (fmt) { 2117 arg->scnprintf = fmt->scnprintf; 2118 arg->strtoul = fmt->strtoul; 2119 } 2120 } 2121 } 2122 2123 return last_field; 2124 } 2125 2126 static int syscall__set_arg_fmts(struct syscall *sc) 2127 { 2128 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args, 2129 &sc->use_btf); 2130 2131 if (last_field) 2132 sc->args_size = last_field->offset + last_field->size; 2133 2134 return 0; 2135 } 2136 2137 static int syscall__read_info(struct syscall *sc, struct trace *trace) 2138 { 2139 char tp_name[128]; 2140 const char *name; 2141 int err; 2142 2143 if (sc->nonexistent) 2144 return -EEXIST; 2145 2146 if (sc->name) { 2147 /* Info already read. */ 2148 return 0; 2149 } 2150 2151 name = syscalltbl__name(sc->e_machine, sc->id); 2152 if (name == NULL) { 2153 sc->nonexistent = true; 2154 return -EEXIST; 2155 } 2156 2157 sc->name = name; 2158 sc->fmt = syscall_fmt__find(sc->name); 2159 2160 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 2161 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 2162 2163 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) { 2164 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); 2165 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 2166 } 2167 2168 /* 2169 * Fails to read trace point format via sysfs node, so the trace point 2170 * doesn't exist. Set the 'nonexistent' flag as true. 2171 */ 2172 if (IS_ERR(sc->tp_format)) { 2173 sc->nonexistent = true; 2174 err = PTR_ERR(sc->tp_format); 2175 sc->tp_format = NULL; 2176 return err; 2177 } 2178 2179 /* 2180 * The tracepoint format contains __syscall_nr field, so it's one more 2181 * than the actual number of syscall arguments. 2182 */ 2183 if (syscall__alloc_arg_fmts(sc, sc->tp_format->format.nr_fields - 1)) 2184 return -ENOMEM; 2185 2186 sc->args = sc->tp_format->format.fields; 2187 /* 2188 * We need to check and discard the first variable '__syscall_nr' 2189 * or 'nr' that mean the syscall number. It is needless here. 2190 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels. 2191 */ 2192 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) { 2193 sc->args = sc->args->next; 2194 --sc->nr_args; 2195 } 2196 2197 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit"); 2198 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat"); 2199 2200 err = syscall__set_arg_fmts(sc); 2201 2202 /* after calling syscall__set_arg_fmts() we'll know whether use_btf is true */ 2203 if (sc->use_btf) 2204 trace__load_vmlinux_btf(trace); 2205 2206 return err; 2207 } 2208 2209 static int evsel__init_tp_arg_scnprintf(struct evsel *evsel, bool *use_btf) 2210 { 2211 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 2212 2213 if (fmt != NULL) { 2214 const struct tep_event *tp_format = evsel__tp_format(evsel); 2215 2216 if (tp_format) { 2217 syscall_arg_fmt__init_array(fmt, tp_format->format.fields, use_btf); 2218 return 0; 2219 } 2220 } 2221 2222 return -ENOMEM; 2223 } 2224 2225 static int intcmp(const void *a, const void *b) 2226 { 2227 const int *one = a, *another = b; 2228 2229 return *one - *another; 2230 } 2231 2232 static int trace__validate_ev_qualifier(struct trace *trace) 2233 { 2234 int err = 0; 2235 bool printed_invalid_prefix = false; 2236 struct str_node *pos; 2237 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); 2238 2239 trace->ev_qualifier_ids.entries = malloc(nr_allocated * 2240 sizeof(trace->ev_qualifier_ids.entries[0])); 2241 2242 if (trace->ev_qualifier_ids.entries == NULL) { 2243 fputs("Error:\tNot enough memory for allocating events qualifier ids\n", 2244 trace->output); 2245 err = -EINVAL; 2246 goto out; 2247 } 2248 2249 strlist__for_each_entry(pos, trace->ev_qualifier) { 2250 const char *sc = pos->s; 2251 /* 2252 * TODO: Assume more than the validation/warnings are all for 2253 * the same binary type as perf. 2254 */ 2255 int id = syscalltbl__id(EM_HOST, sc), match_next = -1; 2256 2257 if (id < 0) { 2258 id = syscalltbl__strglobmatch_first(EM_HOST, sc, &match_next); 2259 if (id >= 0) 2260 goto matches; 2261 2262 if (!printed_invalid_prefix) { 2263 pr_debug("Skipping unknown syscalls: "); 2264 printed_invalid_prefix = true; 2265 } else { 2266 pr_debug(", "); 2267 } 2268 2269 pr_debug("%s", sc); 2270 continue; 2271 } 2272 matches: 2273 trace->ev_qualifier_ids.entries[nr_used++] = id; 2274 if (match_next == -1) 2275 continue; 2276 2277 while (1) { 2278 id = syscalltbl__strglobmatch_next(EM_HOST, sc, &match_next); 2279 if (id < 0) 2280 break; 2281 if (nr_allocated == nr_used) { 2282 void *entries; 2283 2284 nr_allocated += 8; 2285 entries = realloc(trace->ev_qualifier_ids.entries, 2286 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); 2287 if (entries == NULL) { 2288 err = -ENOMEM; 2289 fputs("\nError:\t Not enough memory for parsing\n", trace->output); 2290 goto out_free; 2291 } 2292 trace->ev_qualifier_ids.entries = entries; 2293 } 2294 trace->ev_qualifier_ids.entries[nr_used++] = id; 2295 } 2296 } 2297 2298 trace->ev_qualifier_ids.nr = nr_used; 2299 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); 2300 out: 2301 if (printed_invalid_prefix) 2302 pr_debug("\n"); 2303 return err; 2304 out_free: 2305 zfree(&trace->ev_qualifier_ids.entries); 2306 trace->ev_qualifier_ids.nr = 0; 2307 goto out; 2308 } 2309 2310 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) 2311 { 2312 bool in_ev_qualifier; 2313 2314 if (trace->ev_qualifier_ids.nr == 0) 2315 return true; 2316 2317 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, 2318 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; 2319 2320 if (in_ev_qualifier) 2321 return !trace->not_ev_qualifier; 2322 2323 return trace->not_ev_qualifier; 2324 } 2325 2326 /* 2327 * args is to be interpreted as a series of longs but we need to handle 2328 * 8-byte unaligned accesses. args points to raw_data within the event 2329 * and raw_data is guaranteed to be 8-byte unaligned because it is 2330 * preceded by raw_size which is a u32. So we need to copy args to a temp 2331 * variable to read it. Most notably this avoids extended load instructions 2332 * on unaligned addresses 2333 */ 2334 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx) 2335 { 2336 unsigned long val; 2337 unsigned char *p = arg->args + sizeof(unsigned long) * idx; 2338 2339 memcpy(&val, p, sizeof(val)); 2340 return val; 2341 } 2342 2343 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size, 2344 struct syscall_arg *arg) 2345 { 2346 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name) 2347 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name); 2348 2349 return scnprintf(bf, size, "arg%d: ", arg->idx); 2350 } 2351 2352 /* 2353 * Check if the value is in fact zero, i.e. mask whatever needs masking, such 2354 * as mount 'flags' argument that needs ignoring some magic flag, see comment 2355 * in tools/perf/trace/beauty/mount_flags.c 2356 */ 2357 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val) 2358 { 2359 if (fmt && fmt->mask_val) 2360 return fmt->mask_val(arg, val); 2361 2362 return val; 2363 } 2364 2365 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size, 2366 struct syscall_arg *arg, unsigned long val) 2367 { 2368 if (fmt && fmt->scnprintf) { 2369 arg->val = val; 2370 if (fmt->parm) 2371 arg->parm = fmt->parm; 2372 return fmt->scnprintf(bf, size, arg); 2373 } 2374 return scnprintf(bf, size, "%ld", val); 2375 } 2376 2377 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, 2378 unsigned char *args, void *augmented_args, int augmented_args_size, 2379 struct trace *trace, struct thread *thread) 2380 { 2381 size_t printed = 0, btf_printed; 2382 unsigned long val; 2383 u8 bit = 1; 2384 struct syscall_arg arg = { 2385 .args = args, 2386 .augmented = { 2387 .size = augmented_args_size, 2388 .args = augmented_args, 2389 }, 2390 .idx = 0, 2391 .mask = 0, 2392 .trace = trace, 2393 .thread = thread, 2394 .show_string_prefix = trace->show_string_prefix, 2395 }; 2396 struct thread_trace *ttrace = thread__priv(thread); 2397 void *default_scnprintf; 2398 2399 /* 2400 * Things like fcntl will set this in its 'cmd' formatter to pick the 2401 * right formatter for the return value (an fd? file flags?), which is 2402 * not needed for syscalls that always return a given type, say an fd. 2403 */ 2404 ttrace->ret_scnprintf = NULL; 2405 2406 if (sc->args != NULL) { 2407 struct tep_format_field *field; 2408 2409 for (field = sc->args; field; 2410 field = field->next, ++arg.idx, bit <<= 1) { 2411 if (arg.mask & bit) 2412 continue; 2413 2414 arg.fmt = &sc->arg_fmt[arg.idx]; 2415 val = syscall_arg__val(&arg, arg.idx); 2416 /* 2417 * Some syscall args need some mask, most don't and 2418 * return val untouched. 2419 */ 2420 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val); 2421 2422 /* 2423 * Suppress this argument if its value is zero and show_zero 2424 * property isn't set. 2425 * 2426 * If it has a BTF type, then override the zero suppression knob 2427 * as the common case is for zero in an enum to have an associated entry. 2428 */ 2429 if (val == 0 && !trace->show_zeros && 2430 !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero) && 2431 !(sc->arg_fmt && sc->arg_fmt[arg.idx].strtoul == STUL_BTF_TYPE)) 2432 continue; 2433 2434 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2435 2436 if (trace->show_arg_names) 2437 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2438 2439 default_scnprintf = sc->arg_fmt[arg.idx].scnprintf; 2440 2441 if (trace->force_btf || default_scnprintf == NULL || default_scnprintf == SCA_PTR) { 2442 btf_printed = trace__btf_scnprintf(trace, &arg, bf + printed, 2443 size - printed, val, field->type); 2444 if (btf_printed) { 2445 printed += btf_printed; 2446 continue; 2447 } 2448 } 2449 2450 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], 2451 bf + printed, size - printed, &arg, val); 2452 } 2453 } else if (IS_ERR(sc->tp_format)) { 2454 /* 2455 * If we managed to read the tracepoint /format file, then we 2456 * may end up not having any args, like with gettid(), so only 2457 * print the raw args when we didn't manage to read it. 2458 */ 2459 while (arg.idx < sc->nr_args) { 2460 if (arg.mask & bit) 2461 goto next_arg; 2462 val = syscall_arg__val(&arg, arg.idx); 2463 if (printed) 2464 printed += scnprintf(bf + printed, size - printed, ", "); 2465 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg); 2466 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val); 2467 next_arg: 2468 ++arg.idx; 2469 bit <<= 1; 2470 } 2471 } 2472 2473 return printed; 2474 } 2475 2476 static struct syscall *syscall__new(int e_machine, int id) 2477 { 2478 struct syscall *sc = zalloc(sizeof(*sc)); 2479 2480 if (!sc) 2481 return NULL; 2482 2483 sc->e_machine = e_machine; 2484 sc->id = id; 2485 return sc; 2486 } 2487 2488 static void syscall__delete(struct syscall *sc) 2489 { 2490 if (!sc) 2491 return; 2492 2493 free(sc->arg_fmt); 2494 free(sc); 2495 } 2496 2497 static int syscall__bsearch_cmp(const void *key, const void *entry) 2498 { 2499 const struct syscall *a = key, *b = *((const struct syscall **)entry); 2500 2501 if (a->e_machine != b->e_machine) 2502 return a->e_machine - b->e_machine; 2503 2504 return a->id - b->id; 2505 } 2506 2507 static int syscall__cmp(const void *va, const void *vb) 2508 { 2509 const struct syscall *a = *((const struct syscall **)va); 2510 const struct syscall *b = *((const struct syscall **)vb); 2511 2512 if (a->e_machine != b->e_machine) 2513 return a->e_machine - b->e_machine; 2514 2515 return a->id - b->id; 2516 } 2517 2518 static struct syscall *trace__find_syscall(struct trace *trace, int e_machine, int id) 2519 { 2520 struct syscall key = { 2521 .e_machine = e_machine, 2522 .id = id, 2523 }; 2524 struct syscall *sc, **tmp; 2525 2526 if (trace->syscalls.table) { 2527 struct syscall **sc_entry = bsearch(&key, trace->syscalls.table, 2528 trace->syscalls.table_size, 2529 sizeof(trace->syscalls.table[0]), 2530 syscall__bsearch_cmp); 2531 2532 if (sc_entry) 2533 return *sc_entry; 2534 } 2535 2536 sc = syscall__new(e_machine, id); 2537 if (!sc) 2538 return NULL; 2539 2540 tmp = reallocarray(trace->syscalls.table, trace->syscalls.table_size + 1, 2541 sizeof(trace->syscalls.table[0])); 2542 if (!tmp) { 2543 syscall__delete(sc); 2544 return NULL; 2545 } 2546 2547 trace->syscalls.table = tmp; 2548 trace->syscalls.table[trace->syscalls.table_size++] = sc; 2549 qsort(trace->syscalls.table, trace->syscalls.table_size, sizeof(trace->syscalls.table[0]), 2550 syscall__cmp); 2551 return sc; 2552 } 2553 2554 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel, 2555 union perf_event *event, 2556 struct perf_sample *sample); 2557 2558 static struct syscall *trace__syscall_info(struct trace *trace, struct evsel *evsel, 2559 int e_machine, int id) 2560 { 2561 struct syscall *sc; 2562 int err = 0; 2563 2564 if (id < 0) { 2565 2566 /* 2567 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried 2568 * before that, leaving at a higher verbosity level till that is 2569 * explained. Reproduced with plain ftrace with: 2570 * 2571 * echo 1 > /t/events/raw_syscalls/sys_exit/enable 2572 * grep "NR -1 " /t/trace_pipe 2573 * 2574 * After generating some load on the machine. 2575 */ 2576 if (verbose > 1) { 2577 static u64 n; 2578 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", 2579 id, evsel__name(evsel), ++n); 2580 } 2581 return NULL; 2582 } 2583 2584 err = -EINVAL; 2585 2586 sc = trace__find_syscall(trace, e_machine, id); 2587 if (sc) 2588 err = syscall__read_info(sc, trace); 2589 2590 if (err && verbose > 0) { 2591 char sbuf[STRERR_BUFSIZE]; 2592 2593 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, 2594 str_error_r(-err, sbuf, sizeof(sbuf))); 2595 if (sc && sc->name) 2596 fprintf(trace->output, "(%s)", sc->name); 2597 fputs(" information\n", trace->output); 2598 } 2599 return err ? NULL : sc; 2600 } 2601 2602 struct syscall_stats { 2603 struct stats stats; 2604 u64 nr_failures; 2605 int max_errno; 2606 u32 *errnos; 2607 }; 2608 2609 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace, 2610 int id, struct perf_sample *sample, long err, 2611 struct trace *trace) 2612 { 2613 struct hashmap *syscall_stats = ttrace->syscall_stats; 2614 struct syscall_stats *stats = NULL; 2615 u64 duration = 0; 2616 2617 if (trace->summary_bpf) 2618 return; 2619 2620 if (trace->summary_mode == SUMMARY__BY_TOTAL) 2621 syscall_stats = trace->syscall_stats; 2622 2623 if (!hashmap__find(syscall_stats, id, &stats)) { 2624 stats = zalloc(sizeof(*stats)); 2625 if (stats == NULL) 2626 return; 2627 2628 init_stats(&stats->stats); 2629 if (hashmap__add(syscall_stats, id, stats) < 0) { 2630 free(stats); 2631 return; 2632 } 2633 } 2634 2635 if (ttrace->entry_time && sample->time > ttrace->entry_time) 2636 duration = sample->time - ttrace->entry_time; 2637 2638 update_stats(&stats->stats, duration); 2639 2640 if (err < 0) { 2641 ++stats->nr_failures; 2642 2643 if (!trace->errno_summary) 2644 return; 2645 2646 err = -err; 2647 if (err > stats->max_errno) { 2648 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32)); 2649 2650 if (new_errnos) { 2651 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32)); 2652 } else { 2653 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n", 2654 thread__comm_str(thread), thread__pid(thread), 2655 thread__tid(thread)); 2656 return; 2657 } 2658 2659 stats->errnos = new_errnos; 2660 stats->max_errno = err; 2661 } 2662 2663 ++stats->errnos[err - 1]; 2664 } 2665 } 2666 2667 static int trace__printf_interrupted_entry(struct trace *trace) 2668 { 2669 struct thread_trace *ttrace; 2670 size_t printed; 2671 int len; 2672 2673 if (trace->failure_only || trace->current == NULL) 2674 return 0; 2675 2676 ttrace = thread__priv(trace->current); 2677 2678 if (!ttrace->entry_pending) 2679 return 0; 2680 2681 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); 2682 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); 2683 2684 if (len < trace->args_alignment - 4) 2685 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); 2686 2687 printed += fprintf(trace->output, " ...\n"); 2688 2689 ttrace->entry_pending = false; 2690 ++trace->nr_events_printed; 2691 2692 return printed; 2693 } 2694 2695 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, 2696 struct perf_sample *sample, struct thread *thread) 2697 { 2698 int printed = 0; 2699 2700 if (trace->print_sample) { 2701 double ts = (double)sample->time / NSEC_PER_MSEC; 2702 2703 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n", 2704 evsel__name(evsel), ts, 2705 thread__comm_str(thread), 2706 sample->pid, sample->tid, sample->cpu); 2707 } 2708 2709 return printed; 2710 } 2711 2712 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size) 2713 { 2714 /* 2715 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter 2716 * and there we get all 6 syscall args plus the tracepoint common fields 2717 * that gets calculated at the start and the syscall_nr (another long). 2718 * So we check if that is the case and if so don't look after the 2719 * sc->args_size but always after the full raw_syscalls:sys_enter payload, 2720 * which is fixed. 2721 * 2722 * We'll revisit this later to pass s->args_size to the BPF augmenter 2723 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it 2724 * copies only what we need for each syscall, like what happens when we 2725 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace 2726 * traffic to just what is needed for each syscall. 2727 */ 2728 int args_size = raw_augmented_args_size ?: sc->args_size; 2729 2730 *augmented_args_size = sample->raw_size - args_size; 2731 if (*augmented_args_size > 0) { 2732 static uintptr_t argbuf[1024]; /* assuming single-threaded */ 2733 2734 if ((size_t)(*augmented_args_size) > sizeof(argbuf)) 2735 return NULL; 2736 2737 /* 2738 * The perf ring-buffer is 8-byte aligned but sample->raw_data 2739 * is not because it's preceded by u32 size. Later, beautifier 2740 * will use the augmented args with stricter alignments like in 2741 * some struct. To make sure it's aligned, let's copy the args 2742 * into a static buffer as it's single-threaded for now. 2743 */ 2744 memcpy(argbuf, sample->raw_data + args_size, *augmented_args_size); 2745 2746 return argbuf; 2747 } 2748 return NULL; 2749 } 2750 2751 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, 2752 union perf_event *event __maybe_unused, 2753 struct perf_sample *sample) 2754 { 2755 char *msg; 2756 void *args; 2757 int printed = 0; 2758 struct thread *thread; 2759 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2760 int augmented_args_size = 0, e_machine; 2761 void *augmented_args = NULL; 2762 struct syscall *sc; 2763 struct thread_trace *ttrace; 2764 2765 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2766 e_machine = thread__e_machine(thread, trace->host); 2767 sc = trace__syscall_info(trace, evsel, e_machine, id); 2768 if (sc == NULL) 2769 goto out_put; 2770 ttrace = thread__trace(thread, trace); 2771 if (ttrace == NULL) 2772 goto out_put; 2773 2774 trace__fprintf_sample(trace, evsel, sample, thread); 2775 2776 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2777 2778 if (ttrace->entry_str == NULL) { 2779 ttrace->entry_str = malloc(trace__entry_str_size); 2780 if (!ttrace->entry_str) 2781 goto out_put; 2782 } 2783 2784 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) 2785 trace__printf_interrupted_entry(trace); 2786 /* 2787 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible 2788 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments 2789 * this breaks syscall__augmented_args() check for augmented args, as we calculate 2790 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file, 2791 * so when handling, say the openat syscall, we end up getting 6 args for the 2792 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly 2793 * thinking that the extra 2 u64 args are the augmented filename, so just check 2794 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one. 2795 */ 2796 if (evsel != trace->syscalls.events.sys_enter) 2797 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2798 ttrace->entry_time = sample->time; 2799 msg = ttrace->entry_str; 2800 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); 2801 2802 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed, 2803 args, augmented_args, augmented_args_size, trace, thread); 2804 2805 if (sc->is_exit) { 2806 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { 2807 int alignment = 0; 2808 2809 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); 2810 printed = fprintf(trace->output, "%s)", ttrace->entry_str); 2811 if (trace->args_alignment > printed) 2812 alignment = trace->args_alignment - printed; 2813 fprintf(trace->output, "%*s= ?\n", alignment, " "); 2814 } 2815 } else { 2816 ttrace->entry_pending = true; 2817 /* See trace__vfs_getname & trace__sys_exit */ 2818 ttrace->filename.pending_open = false; 2819 } 2820 2821 if (trace->current != thread) { 2822 thread__put(trace->current); 2823 trace->current = thread__get(thread); 2824 } 2825 err = 0; 2826 out_put: 2827 thread__put(thread); 2828 return err; 2829 } 2830 2831 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, 2832 struct perf_sample *sample) 2833 { 2834 struct thread_trace *ttrace; 2835 struct thread *thread; 2836 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2837 struct syscall *sc; 2838 char msg[1024]; 2839 void *args, *augmented_args = NULL; 2840 int augmented_args_size, e_machine; 2841 size_t printed = 0; 2842 2843 2844 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2845 e_machine = thread__e_machine(thread, trace->host); 2846 sc = trace__syscall_info(trace, evsel, e_machine, id); 2847 if (sc == NULL) 2848 goto out_put; 2849 ttrace = thread__trace(thread, trace); 2850 /* 2851 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args() 2852 * and the rest of the beautifiers accessing it via struct syscall_arg touches it. 2853 */ 2854 if (ttrace == NULL) 2855 goto out_put; 2856 2857 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2858 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2859 printed += syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); 2860 fprintf(trace->output, "%.*s", (int)printed, msg); 2861 err = 0; 2862 out_put: 2863 thread__put(thread); 2864 return err; 2865 } 2866 2867 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, 2868 struct perf_sample *sample, 2869 struct callchain_cursor *cursor) 2870 { 2871 struct addr_location al; 2872 int max_stack = evsel->core.attr.sample_max_stack ? 2873 evsel->core.attr.sample_max_stack : 2874 trace->max_stack; 2875 int err = -1; 2876 2877 addr_location__init(&al); 2878 if (machine__resolve(trace->host, &al, sample) < 0) 2879 goto out; 2880 2881 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack); 2882 out: 2883 addr_location__exit(&al); 2884 return err; 2885 } 2886 2887 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) 2888 { 2889 /* TODO: user-configurable print_opts */ 2890 const unsigned int print_opts = EVSEL__PRINT_SYM | 2891 EVSEL__PRINT_DSO | 2892 EVSEL__PRINT_UNKNOWN_AS_ADDR; 2893 2894 return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output); 2895 } 2896 2897 static const char *errno_to_name(struct evsel *evsel, int err) 2898 { 2899 struct perf_env *env = evsel__env(evsel); 2900 2901 return perf_env__arch_strerrno(env, err); 2902 } 2903 2904 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, 2905 union perf_event *event __maybe_unused, 2906 struct perf_sample *sample) 2907 { 2908 long ret; 2909 u64 duration = 0; 2910 bool duration_calculated = false; 2911 struct thread *thread; 2912 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0; 2913 int alignment = trace->args_alignment, e_machine; 2914 struct syscall *sc; 2915 struct thread_trace *ttrace; 2916 2917 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2918 e_machine = thread__e_machine(thread, trace->host); 2919 sc = trace__syscall_info(trace, evsel, e_machine, id); 2920 if (sc == NULL) 2921 goto out_put; 2922 ttrace = thread__trace(thread, trace); 2923 if (ttrace == NULL) 2924 goto out_put; 2925 2926 trace__fprintf_sample(trace, evsel, sample, thread); 2927 2928 ret = perf_evsel__sc_tp_uint(evsel, ret, sample); 2929 2930 if (trace->summary) 2931 thread__update_stats(thread, ttrace, id, sample, ret, trace); 2932 2933 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) { 2934 trace__set_fd_pathname(thread, ret, ttrace->filename.name); 2935 ttrace->filename.pending_open = false; 2936 ++trace->stats.vfs_getname; 2937 } 2938 2939 if (ttrace->entry_time) { 2940 duration = sample->time - ttrace->entry_time; 2941 if (trace__filter_duration(trace, duration)) 2942 goto out; 2943 duration_calculated = true; 2944 } else if (trace->duration_filter) 2945 goto out; 2946 2947 if (sample->callchain) { 2948 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 2949 2950 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 2951 if (callchain_ret == 0) { 2952 if (cursor->nr < trace->min_stack) 2953 goto out; 2954 callchain_ret = 1; 2955 } 2956 } 2957 2958 if (trace->summary_only || (ret >= 0 && trace->failure_only)) 2959 goto out; 2960 2961 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output); 2962 2963 if (ttrace->entry_pending) { 2964 printed = fprintf(trace->output, "%s", ttrace->entry_str); 2965 } else { 2966 printed += fprintf(trace->output, " ... ["); 2967 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 2968 printed += 9; 2969 printed += fprintf(trace->output, "]: %s()", sc->name); 2970 } 2971 2972 printed++; /* the closing ')' */ 2973 2974 if (alignment > printed) 2975 alignment -= printed; 2976 else 2977 alignment = 0; 2978 2979 fprintf(trace->output, ")%*s= ", alignment, " "); 2980 2981 if (sc->fmt == NULL) { 2982 if (ret < 0) 2983 goto errno_print; 2984 signed_print: 2985 fprintf(trace->output, "%ld", ret); 2986 } else if (ret < 0) { 2987 errno_print: { 2988 char bf[STRERR_BUFSIZE]; 2989 const char *emsg = str_error_r(-ret, bf, sizeof(bf)), 2990 *e = errno_to_name(evsel, -ret); 2991 2992 fprintf(trace->output, "-1 %s (%s)", e, emsg); 2993 } 2994 } else if (ret == 0 && sc->fmt->timeout) 2995 fprintf(trace->output, "0 (Timeout)"); 2996 else if (ttrace->ret_scnprintf) { 2997 char bf[1024]; 2998 struct syscall_arg arg = { 2999 .val = ret, 3000 .thread = thread, 3001 .trace = trace, 3002 }; 3003 ttrace->ret_scnprintf(bf, sizeof(bf), &arg); 3004 ttrace->ret_scnprintf = NULL; 3005 fprintf(trace->output, "%s", bf); 3006 } else if (sc->fmt->hexret) 3007 fprintf(trace->output, "%#lx", ret); 3008 else if (sc->fmt->errpid) { 3009 struct thread *child = machine__find_thread(trace->host, ret, ret); 3010 3011 fprintf(trace->output, "%ld", ret); 3012 if (child != NULL) { 3013 if (thread__comm_set(child)) 3014 fprintf(trace->output, " (%s)", thread__comm_str(child)); 3015 thread__put(child); 3016 } 3017 } else 3018 goto signed_print; 3019 3020 fputc('\n', trace->output); 3021 3022 /* 3023 * We only consider an 'event' for the sake of --max-events a non-filtered 3024 * sys_enter + sys_exit and other tracepoint events. 3025 */ 3026 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) 3027 interrupted = true; 3028 3029 if (callchain_ret > 0) 3030 trace__fprintf_callchain(trace, sample); 3031 else if (callchain_ret < 0) 3032 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3033 out: 3034 ttrace->entry_pending = false; 3035 err = 0; 3036 out_put: 3037 thread__put(thread); 3038 return err; 3039 } 3040 3041 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, 3042 union perf_event *event __maybe_unused, 3043 struct perf_sample *sample) 3044 { 3045 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3046 struct thread_trace *ttrace; 3047 size_t filename_len, entry_str_len, to_move; 3048 ssize_t remaining_space; 3049 char *pos; 3050 const char *filename = evsel__rawptr(evsel, sample, "pathname"); 3051 3052 if (!thread) 3053 goto out; 3054 3055 ttrace = thread__priv(thread); 3056 if (!ttrace) 3057 goto out_put; 3058 3059 filename_len = strlen(filename); 3060 if (filename_len == 0) 3061 goto out_put; 3062 3063 if (ttrace->filename.namelen < filename_len) { 3064 char *f = realloc(ttrace->filename.name, filename_len + 1); 3065 3066 if (f == NULL) 3067 goto out_put; 3068 3069 ttrace->filename.namelen = filename_len; 3070 ttrace->filename.name = f; 3071 } 3072 3073 strcpy(ttrace->filename.name, filename); 3074 ttrace->filename.pending_open = true; 3075 3076 if (!ttrace->filename.ptr) 3077 goto out_put; 3078 3079 entry_str_len = strlen(ttrace->entry_str); 3080 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */ 3081 if (remaining_space <= 0) 3082 goto out_put; 3083 3084 if (filename_len > (size_t)remaining_space) { 3085 filename += filename_len - remaining_space; 3086 filename_len = remaining_space; 3087 } 3088 3089 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */ 3090 pos = ttrace->entry_str + ttrace->filename.entry_str_pos; 3091 memmove(pos + filename_len, pos, to_move); 3092 memcpy(pos, filename, filename_len); 3093 3094 ttrace->filename.ptr = 0; 3095 ttrace->filename.entry_str_pos = 0; 3096 out_put: 3097 thread__put(thread); 3098 out: 3099 return 0; 3100 } 3101 3102 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, 3103 union perf_event *event __maybe_unused, 3104 struct perf_sample *sample) 3105 { 3106 u64 runtime = evsel__intval(evsel, sample, "runtime"); 3107 double runtime_ms = (double)runtime / NSEC_PER_MSEC; 3108 struct thread *thread = machine__findnew_thread(trace->host, 3109 sample->pid, 3110 sample->tid); 3111 struct thread_trace *ttrace = thread__trace(thread, trace); 3112 3113 if (ttrace == NULL) 3114 goto out_dump; 3115 3116 ttrace->runtime_ms += runtime_ms; 3117 trace->runtime_ms += runtime_ms; 3118 out_put: 3119 thread__put(thread); 3120 return 0; 3121 3122 out_dump: 3123 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", 3124 evsel->name, 3125 evsel__strval(evsel, sample, "comm"), 3126 (pid_t)evsel__intval(evsel, sample, "pid"), 3127 runtime, 3128 evsel__intval(evsel, sample, "vruntime")); 3129 goto out_put; 3130 } 3131 3132 static int bpf_output__printer(enum binary_printer_ops op, 3133 unsigned int val, void *extra __maybe_unused, FILE *fp) 3134 { 3135 unsigned char ch = (unsigned char)val; 3136 3137 switch (op) { 3138 case BINARY_PRINT_CHAR_DATA: 3139 return fprintf(fp, "%c", isprint(ch) ? ch : '.'); 3140 case BINARY_PRINT_DATA_BEGIN: 3141 case BINARY_PRINT_LINE_BEGIN: 3142 case BINARY_PRINT_ADDR: 3143 case BINARY_PRINT_NUM_DATA: 3144 case BINARY_PRINT_NUM_PAD: 3145 case BINARY_PRINT_SEP: 3146 case BINARY_PRINT_CHAR_PAD: 3147 case BINARY_PRINT_LINE_END: 3148 case BINARY_PRINT_DATA_END: 3149 default: 3150 break; 3151 } 3152 3153 return 0; 3154 } 3155 3156 static void bpf_output__fprintf(struct trace *trace, 3157 struct perf_sample *sample) 3158 { 3159 binary__fprintf(sample->raw_data, sample->raw_size, 8, 3160 bpf_output__printer, NULL, trace->output); 3161 ++trace->nr_events_printed; 3162 } 3163 3164 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample, 3165 struct thread *thread, void *augmented_args, int augmented_args_size) 3166 { 3167 char bf[2048]; 3168 size_t size = sizeof(bf); 3169 const struct tep_event *tp_format = evsel__tp_format(evsel); 3170 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL; 3171 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel); 3172 size_t printed = 0, btf_printed; 3173 unsigned long val; 3174 u8 bit = 1; 3175 struct syscall_arg syscall_arg = { 3176 .augmented = { 3177 .size = augmented_args_size, 3178 .args = augmented_args, 3179 }, 3180 .idx = 0, 3181 .mask = 0, 3182 .trace = trace, 3183 .thread = thread, 3184 .show_string_prefix = trace->show_string_prefix, 3185 }; 3186 3187 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) { 3188 if (syscall_arg.mask & bit) 3189 continue; 3190 3191 syscall_arg.len = 0; 3192 syscall_arg.fmt = arg; 3193 if (field->flags & TEP_FIELD_IS_ARRAY) { 3194 int offset = field->offset; 3195 3196 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 3197 offset = format_field__intval(field, sample, evsel->needs_swap); 3198 syscall_arg.len = offset >> 16; 3199 offset &= 0xffff; 3200 if (tep_field_is_relative(field->flags)) 3201 offset += field->offset + field->size; 3202 } 3203 3204 val = (uintptr_t)(sample->raw_data + offset); 3205 } else 3206 val = format_field__intval(field, sample, evsel->needs_swap); 3207 /* 3208 * Some syscall args need some mask, most don't and 3209 * return val untouched. 3210 */ 3211 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val); 3212 3213 /* Suppress this argument if its value is zero and show_zero property isn't set. */ 3214 if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE) 3215 continue; 3216 3217 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 3218 3219 if (trace->show_arg_names) 3220 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 3221 3222 btf_printed = trace__btf_scnprintf(trace, &syscall_arg, bf + printed, size - printed, val, field->type); 3223 if (btf_printed) { 3224 printed += btf_printed; 3225 continue; 3226 } 3227 3228 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val); 3229 } 3230 3231 return fprintf(trace->output, "%.*s", (int)printed, bf); 3232 } 3233 3234 static int trace__event_handler(struct trace *trace, struct evsel *evsel, 3235 union perf_event *event __maybe_unused, 3236 struct perf_sample *sample) 3237 { 3238 struct thread *thread; 3239 int callchain_ret = 0; 3240 3241 if (evsel->nr_events_printed >= evsel->max_events) 3242 return 0; 3243 3244 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3245 3246 if (sample->callchain) { 3247 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 3248 3249 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 3250 if (callchain_ret == 0) { 3251 if (cursor->nr < trace->min_stack) 3252 goto out; 3253 callchain_ret = 1; 3254 } 3255 } 3256 3257 trace__printf_interrupted_entry(trace); 3258 trace__fprintf_tstamp(trace, sample->time, trace->output); 3259 3260 if (trace->trace_syscalls && trace->show_duration) 3261 fprintf(trace->output, "( ): "); 3262 3263 if (thread) 3264 trace__fprintf_comm_tid(trace, thread, trace->output); 3265 3266 if (evsel == trace->syscalls.events.bpf_output) { 3267 int id = perf_evsel__sc_tp_uint(evsel, id, sample); 3268 int e_machine = thread ? thread__e_machine(thread, trace->host) : EM_HOST; 3269 struct syscall *sc = trace__syscall_info(trace, evsel, e_machine, id); 3270 3271 if (sc) { 3272 fprintf(trace->output, "%s(", sc->name); 3273 trace__fprintf_sys_enter(trace, evsel, sample); 3274 fputc(')', trace->output); 3275 goto newline; 3276 } 3277 3278 /* 3279 * XXX: Not having the associated syscall info or not finding/adding 3280 * the thread should never happen, but if it does... 3281 * fall thru and print it as a bpf_output event. 3282 */ 3283 } 3284 3285 fprintf(trace->output, "%s(", evsel->name); 3286 3287 if (evsel__is_bpf_output(evsel)) { 3288 bpf_output__fprintf(trace, sample); 3289 } else { 3290 const struct tep_event *tp_format = evsel__tp_format(evsel); 3291 3292 if (tp_format && (strncmp(tp_format->name, "sys_enter_", 10) || 3293 trace__fprintf_sys_enter(trace, evsel, sample))) { 3294 if (trace->libtraceevent_print) { 3295 event_format__fprintf(tp_format, sample->cpu, 3296 sample->raw_data, sample->raw_size, 3297 trace->output); 3298 } else { 3299 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0); 3300 } 3301 } 3302 } 3303 3304 newline: 3305 fprintf(trace->output, ")\n"); 3306 3307 if (callchain_ret > 0) 3308 trace__fprintf_callchain(trace, sample); 3309 else if (callchain_ret < 0) 3310 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3311 3312 ++trace->nr_events_printed; 3313 3314 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { 3315 evsel__disable(evsel); 3316 evsel__close(evsel); 3317 } 3318 out: 3319 thread__put(thread); 3320 return 0; 3321 } 3322 3323 static void print_location(FILE *f, struct perf_sample *sample, 3324 struct addr_location *al, 3325 bool print_dso, bool print_sym) 3326 { 3327 3328 if ((verbose > 0 || print_dso) && al->map) 3329 fprintf(f, "%s@", dso__long_name(map__dso(al->map))); 3330 3331 if ((verbose > 0 || print_sym) && al->sym) 3332 fprintf(f, "%s+0x%" PRIx64, al->sym->name, 3333 al->addr - al->sym->start); 3334 else if (al->map) 3335 fprintf(f, "0x%" PRIx64, al->addr); 3336 else 3337 fprintf(f, "0x%" PRIx64, sample->addr); 3338 } 3339 3340 static int trace__pgfault(struct trace *trace, 3341 struct evsel *evsel, 3342 union perf_event *event __maybe_unused, 3343 struct perf_sample *sample) 3344 { 3345 struct thread *thread; 3346 struct addr_location al; 3347 char map_type = 'd'; 3348 struct thread_trace *ttrace; 3349 int err = -1; 3350 int callchain_ret = 0; 3351 3352 addr_location__init(&al); 3353 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3354 3355 if (sample->callchain) { 3356 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 3357 3358 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 3359 if (callchain_ret == 0) { 3360 if (cursor->nr < trace->min_stack) 3361 goto out_put; 3362 callchain_ret = 1; 3363 } 3364 } 3365 3366 ttrace = thread__trace(thread, trace); 3367 if (ttrace == NULL) 3368 goto out_put; 3369 3370 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ) { 3371 ttrace->pfmaj++; 3372 trace->pfmaj++; 3373 } else { 3374 ttrace->pfmin++; 3375 trace->pfmin++; 3376 } 3377 3378 if (trace->summary_only) 3379 goto out; 3380 3381 thread__find_symbol(thread, sample->cpumode, sample->ip, &al); 3382 3383 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); 3384 3385 fprintf(trace->output, "%sfault [", 3386 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ? 3387 "maj" : "min"); 3388 3389 print_location(trace->output, sample, &al, false, true); 3390 3391 fprintf(trace->output, "] => "); 3392 3393 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 3394 3395 if (!al.map) { 3396 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 3397 3398 if (al.map) 3399 map_type = 'x'; 3400 else 3401 map_type = '?'; 3402 } 3403 3404 print_location(trace->output, sample, &al, true, false); 3405 3406 fprintf(trace->output, " (%c%c)\n", map_type, al.level); 3407 3408 if (callchain_ret > 0) 3409 trace__fprintf_callchain(trace, sample); 3410 else if (callchain_ret < 0) 3411 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3412 3413 ++trace->nr_events_printed; 3414 out: 3415 err = 0; 3416 out_put: 3417 thread__put(thread); 3418 addr_location__exit(&al); 3419 return err; 3420 } 3421 3422 static void trace__set_base_time(struct trace *trace, 3423 struct evsel *evsel, 3424 struct perf_sample *sample) 3425 { 3426 /* 3427 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust 3428 * and don't use sample->time unconditionally, we may end up having 3429 * some other event in the future without PERF_SAMPLE_TIME for good 3430 * reason, i.e. we may not be interested in its timestamps, just in 3431 * it taking place, picking some piece of information when it 3432 * appears in our event stream (vfs_getname comes to mind). 3433 */ 3434 if (trace->base_time == 0 && !trace->full_time && 3435 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 3436 trace->base_time = sample->time; 3437 } 3438 3439 static int trace__process_sample(const struct perf_tool *tool, 3440 union perf_event *event, 3441 struct perf_sample *sample, 3442 struct evsel *evsel, 3443 struct machine *machine __maybe_unused) 3444 { 3445 struct trace *trace = container_of(tool, struct trace, tool); 3446 struct thread *thread; 3447 int err = 0; 3448 3449 tracepoint_handler handler = evsel->handler; 3450 3451 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3452 if (thread && thread__is_filtered(thread)) 3453 goto out; 3454 3455 trace__set_base_time(trace, evsel, sample); 3456 3457 if (handler) { 3458 ++trace->nr_events; 3459 handler(trace, evsel, event, sample); 3460 } 3461 out: 3462 thread__put(thread); 3463 return err; 3464 } 3465 3466 static int trace__record(struct trace *trace, int argc, const char **argv) 3467 { 3468 unsigned int rec_argc, i, j; 3469 const char **rec_argv; 3470 const char * const record_args[] = { 3471 "record", 3472 "-R", 3473 "-m", "1024", 3474 "-c", "1", 3475 }; 3476 pid_t pid = getpid(); 3477 char *filter = asprintf__tp_filter_pids(1, &pid); 3478 const char * const sc_args[] = { "-e", }; 3479 unsigned int sc_args_nr = ARRAY_SIZE(sc_args); 3480 const char * const majpf_args[] = { "-e", "major-faults" }; 3481 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args); 3482 const char * const minpf_args[] = { "-e", "minor-faults" }; 3483 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args); 3484 int err = -1; 3485 3486 /* +3 is for the event string below and the pid filter */ 3487 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 + 3488 majpf_args_nr + minpf_args_nr + argc; 3489 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3490 3491 if (rec_argv == NULL || filter == NULL) 3492 goto out_free; 3493 3494 j = 0; 3495 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3496 rec_argv[j++] = record_args[i]; 3497 3498 if (trace->trace_syscalls) { 3499 for (i = 0; i < sc_args_nr; i++) 3500 rec_argv[j++] = sc_args[i]; 3501 3502 /* event string may be different for older kernels - e.g., RHEL6 */ 3503 if (is_valid_tracepoint("raw_syscalls:sys_enter")) 3504 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit"; 3505 else if (is_valid_tracepoint("syscalls:sys_enter")) 3506 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit"; 3507 else { 3508 pr_err("Neither raw_syscalls nor syscalls events exist.\n"); 3509 goto out_free; 3510 } 3511 } 3512 3513 rec_argv[j++] = "--filter"; 3514 rec_argv[j++] = filter; 3515 3516 if (trace->trace_pgfaults & TRACE_PFMAJ) 3517 for (i = 0; i < majpf_args_nr; i++) 3518 rec_argv[j++] = majpf_args[i]; 3519 3520 if (trace->trace_pgfaults & TRACE_PFMIN) 3521 for (i = 0; i < minpf_args_nr; i++) 3522 rec_argv[j++] = minpf_args[i]; 3523 3524 for (i = 0; i < (unsigned int)argc; i++) 3525 rec_argv[j++] = argv[i]; 3526 3527 err = cmd_record(j, rec_argv); 3528 out_free: 3529 free(filter); 3530 free(rec_argv); 3531 return err; 3532 } 3533 3534 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); 3535 static size_t trace__fprintf_total_summary(struct trace *trace, FILE *fp); 3536 3537 static bool evlist__add_vfs_getname(struct evlist *evlist) 3538 { 3539 bool found = false; 3540 struct evsel *evsel, *tmp; 3541 struct parse_events_error err; 3542 int ret; 3543 3544 parse_events_error__init(&err); 3545 ret = parse_events(evlist, "probe:vfs_getname*", &err); 3546 parse_events_error__exit(&err); 3547 if (ret) 3548 return false; 3549 3550 evlist__for_each_entry_safe(evlist, evsel, tmp) { 3551 if (!strstarts(evsel__name(evsel), "probe:vfs_getname")) 3552 continue; 3553 3554 if (evsel__field(evsel, "pathname")) { 3555 evsel->handler = trace__vfs_getname; 3556 found = true; 3557 continue; 3558 } 3559 3560 list_del_init(&evsel->core.node); 3561 evsel->evlist = NULL; 3562 evsel__delete(evsel); 3563 } 3564 3565 return found; 3566 } 3567 3568 static struct evsel *evsel__new_pgfault(u64 config) 3569 { 3570 struct evsel *evsel; 3571 struct perf_event_attr attr = { 3572 .type = PERF_TYPE_SOFTWARE, 3573 .mmap_data = 1, 3574 }; 3575 3576 attr.config = config; 3577 attr.sample_period = 1; 3578 3579 event_attr_init(&attr); 3580 3581 evsel = evsel__new(&attr); 3582 if (evsel) 3583 evsel->handler = trace__pgfault; 3584 3585 return evsel; 3586 } 3587 3588 static void evlist__free_syscall_tp_fields(struct evlist *evlist) 3589 { 3590 struct evsel *evsel; 3591 3592 evlist__for_each_entry(evlist, evsel) { 3593 evsel_trace__delete(evsel->priv); 3594 evsel->priv = NULL; 3595 } 3596 } 3597 3598 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) 3599 { 3600 const u32 type = event->header.type; 3601 struct evsel *evsel; 3602 3603 if (type != PERF_RECORD_SAMPLE) { 3604 trace__process_event(trace, trace->host, event, sample); 3605 return; 3606 } 3607 3608 evsel = evlist__id2evsel(trace->evlist, sample->id); 3609 if (evsel == NULL) { 3610 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); 3611 return; 3612 } 3613 3614 if (evswitch__discard(&trace->evswitch, evsel)) 3615 return; 3616 3617 trace__set_base_time(trace, evsel, sample); 3618 3619 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 3620 sample->raw_data == NULL) { 3621 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", 3622 evsel__name(evsel), sample->tid, 3623 sample->cpu, sample->raw_size); 3624 } else { 3625 tracepoint_handler handler = evsel->handler; 3626 handler(trace, evsel, event, sample); 3627 } 3628 3629 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) 3630 interrupted = true; 3631 } 3632 3633 static int trace__add_syscall_newtp(struct trace *trace) 3634 { 3635 int ret = -1; 3636 struct evlist *evlist = trace->evlist; 3637 struct evsel *sys_enter, *sys_exit; 3638 3639 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter); 3640 if (sys_enter == NULL) 3641 goto out; 3642 3643 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args)) 3644 goto out_delete_sys_enter; 3645 3646 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit); 3647 if (sys_exit == NULL) 3648 goto out_delete_sys_enter; 3649 3650 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret)) 3651 goto out_delete_sys_exit; 3652 3653 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param); 3654 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param); 3655 3656 evlist__add(evlist, sys_enter); 3657 evlist__add(evlist, sys_exit); 3658 3659 if (callchain_param.enabled && !trace->kernel_syscallchains) { 3660 /* 3661 * We're interested only in the user space callchain 3662 * leading to the syscall, allow overriding that for 3663 * debugging reasons using --kernel_syscall_callchains 3664 */ 3665 sys_exit->core.attr.exclude_callchain_kernel = 1; 3666 } 3667 3668 trace->syscalls.events.sys_enter = sys_enter; 3669 trace->syscalls.events.sys_exit = sys_exit; 3670 3671 ret = 0; 3672 out: 3673 return ret; 3674 3675 out_delete_sys_exit: 3676 evsel__delete_priv(sys_exit); 3677 out_delete_sys_enter: 3678 evsel__delete_priv(sys_enter); 3679 goto out; 3680 } 3681 3682 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) 3683 { 3684 int err = -1; 3685 struct evsel *sys_exit; 3686 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier, 3687 trace->ev_qualifier_ids.nr, 3688 trace->ev_qualifier_ids.entries); 3689 3690 if (filter == NULL) 3691 goto out_enomem; 3692 3693 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) { 3694 sys_exit = trace->syscalls.events.sys_exit; 3695 err = evsel__append_tp_filter(sys_exit, filter); 3696 } 3697 3698 free(filter); 3699 out: 3700 return err; 3701 out_enomem: 3702 errno = ENOMEM; 3703 goto out; 3704 } 3705 3706 #ifdef HAVE_BPF_SKEL 3707 static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type) 3708 { 3709 int id; 3710 3711 if (arg_fmt->type != NULL) 3712 return -1; 3713 3714 id = btf__find_by_name(btf, type); 3715 if (id < 0) 3716 return -1; 3717 3718 arg_fmt->type = btf__type_by_id(btf, id); 3719 arg_fmt->type_id = id; 3720 3721 return 0; 3722 } 3723 3724 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) 3725 { 3726 struct bpf_program *pos, *prog = NULL; 3727 const char *sec_name; 3728 3729 if (trace->skel->obj == NULL) 3730 return NULL; 3731 3732 bpf_object__for_each_program(pos, trace->skel->obj) { 3733 sec_name = bpf_program__section_name(pos); 3734 if (sec_name && !strcmp(sec_name, name)) { 3735 prog = pos; 3736 break; 3737 } 3738 } 3739 3740 return prog; 3741 } 3742 3743 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, 3744 const char *prog_name, const char *type) 3745 { 3746 struct bpf_program *prog; 3747 3748 if (prog_name == NULL) { 3749 char default_prog_name[256]; 3750 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name); 3751 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3752 if (prog != NULL) 3753 goto out_found; 3754 if (sc->fmt && sc->fmt->alias) { 3755 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias); 3756 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3757 if (prog != NULL) 3758 goto out_found; 3759 } 3760 goto out_unaugmented; 3761 } 3762 3763 prog = trace__find_bpf_program_by_title(trace, prog_name); 3764 3765 if (prog != NULL) { 3766 out_found: 3767 return prog; 3768 } 3769 3770 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n", 3771 prog_name, type, sc->name); 3772 out_unaugmented: 3773 return trace->skel->progs.syscall_unaugmented; 3774 } 3775 3776 static void trace__init_syscall_bpf_progs(struct trace *trace, int e_machine, int id) 3777 { 3778 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id); 3779 3780 if (sc == NULL) 3781 return; 3782 3783 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3784 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit"); 3785 } 3786 3787 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int e_machine, int id) 3788 { 3789 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id); 3790 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3791 } 3792 3793 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int e_machine, int id) 3794 { 3795 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id); 3796 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3797 } 3798 3799 static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int e_machine, int key, unsigned int *beauty_array) 3800 { 3801 struct tep_format_field *field; 3802 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key); 3803 const struct btf_type *bt; 3804 char *struct_offset, *tmp, name[32]; 3805 bool can_augment = false; 3806 int i, cnt; 3807 3808 if (sc == NULL) 3809 return -1; 3810 3811 trace__load_vmlinux_btf(trace); 3812 if (trace->btf == NULL) 3813 return -1; 3814 3815 for (i = 0, field = sc->args; field; ++i, field = field->next) { 3816 // XXX We're only collecting pointer payloads _from_ user space 3817 if (!sc->arg_fmt[i].from_user) 3818 continue; 3819 3820 struct_offset = strstr(field->type, "struct "); 3821 if (struct_offset == NULL) 3822 struct_offset = strstr(field->type, "union "); 3823 else 3824 struct_offset++; // "union" is shorter 3825 3826 if (field->flags & TEP_FIELD_IS_POINTER && struct_offset) { /* struct or union (think BPF's attr arg) */ 3827 struct_offset += 6; 3828 3829 /* for 'struct foo *', we only want 'foo' */ 3830 for (tmp = struct_offset, cnt = 0; *tmp != ' ' && *tmp != '\0'; ++tmp, ++cnt) { 3831 } 3832 3833 strncpy(name, struct_offset, cnt); 3834 name[cnt] = '\0'; 3835 3836 /* cache struct's btf_type and type_id */ 3837 if (syscall_arg_fmt__cache_btf_struct(&sc->arg_fmt[i], trace->btf, name)) 3838 continue; 3839 3840 bt = sc->arg_fmt[i].type; 3841 beauty_array[i] = bt->size; 3842 can_augment = true; 3843 } else if (field->flags & TEP_FIELD_IS_POINTER && /* string */ 3844 strcmp(field->type, "const char *") == 0 && 3845 (strstr(field->name, "name") || 3846 strstr(field->name, "path") || 3847 strstr(field->name, "file") || 3848 strstr(field->name, "root") || 3849 strstr(field->name, "key") || 3850 strstr(field->name, "special") || 3851 strstr(field->name, "type") || 3852 strstr(field->name, "description"))) { 3853 beauty_array[i] = 1; 3854 can_augment = true; 3855 } else if (field->flags & TEP_FIELD_IS_POINTER && /* buffer */ 3856 strstr(field->type, "char *") && 3857 (strstr(field->name, "buf") || 3858 strstr(field->name, "val") || 3859 strstr(field->name, "msg"))) { 3860 int j; 3861 struct tep_format_field *field_tmp; 3862 3863 /* find the size of the buffer that appears in pairs with buf */ 3864 for (j = 0, field_tmp = sc->args; field_tmp; ++j, field_tmp = field_tmp->next) { 3865 if (!(field_tmp->flags & TEP_FIELD_IS_POINTER) && /* only integers */ 3866 (strstr(field_tmp->name, "count") || 3867 strstr(field_tmp->name, "siz") || /* size, bufsiz */ 3868 (strstr(field_tmp->name, "len") && strcmp(field_tmp->name, "filename")))) { 3869 /* filename's got 'len' in it, we don't want that */ 3870 beauty_array[i] = -(j + 1); 3871 can_augment = true; 3872 break; 3873 } 3874 } 3875 } 3876 } 3877 3878 if (can_augment) 3879 return 0; 3880 3881 return -1; 3882 } 3883 3884 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, 3885 struct syscall *sc) 3886 { 3887 struct tep_format_field *field, *candidate_field; 3888 /* 3889 * We're only interested in syscalls that have a pointer: 3890 */ 3891 for (field = sc->args; field; field = field->next) { 3892 if (field->flags & TEP_FIELD_IS_POINTER) 3893 goto try_to_find_pair; 3894 } 3895 3896 return NULL; 3897 3898 try_to_find_pair: 3899 for (int i = 0, num_idx = syscalltbl__num_idx(sc->e_machine); i < num_idx; ++i) { 3900 int id = syscalltbl__id_at_idx(sc->e_machine, i); 3901 struct syscall *pair = trace__syscall_info(trace, NULL, sc->e_machine, id); 3902 struct bpf_program *pair_prog; 3903 bool is_candidate = false; 3904 3905 if (pair == NULL || pair->id == sc->id || 3906 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented) 3907 continue; 3908 3909 for (field = sc->args, candidate_field = pair->args; 3910 field && candidate_field; field = field->next, candidate_field = candidate_field->next) { 3911 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER, 3912 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER; 3913 3914 if (is_pointer) { 3915 if (!candidate_is_pointer) { 3916 // The candidate just doesn't copies our pointer arg, might copy other pointers we want. 3917 continue; 3918 } 3919 } else { 3920 if (candidate_is_pointer) { 3921 // The candidate might copy a pointer we don't have, skip it. 3922 goto next_candidate; 3923 } 3924 continue; 3925 } 3926 3927 if (strcmp(field->type, candidate_field->type)) 3928 goto next_candidate; 3929 3930 /* 3931 * This is limited in the BPF program but sys_write 3932 * uses "const char *" for its "buf" arg so we need to 3933 * use some heuristic that is kinda future proof... 3934 */ 3935 if (strcmp(field->type, "const char *") == 0 && 3936 !(strstr(field->name, "name") || 3937 strstr(field->name, "path") || 3938 strstr(field->name, "file") || 3939 strstr(field->name, "root") || 3940 strstr(field->name, "description"))) 3941 goto next_candidate; 3942 3943 is_candidate = true; 3944 } 3945 3946 if (!is_candidate) 3947 goto next_candidate; 3948 3949 /* 3950 * Check if the tentative pair syscall augmenter has more pointers, if it has, 3951 * then it may be collecting that and we then can't use it, as it would collect 3952 * more than what is common to the two syscalls. 3953 */ 3954 if (candidate_field) { 3955 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next) 3956 if (candidate_field->flags & TEP_FIELD_IS_POINTER) 3957 goto next_candidate; 3958 } 3959 3960 pair_prog = pair->bpf_prog.sys_enter; 3961 /* 3962 * If the pair isn't enabled, then its bpf_prog.sys_enter will not 3963 * have been searched for, so search it here and if it returns the 3964 * unaugmented one, then ignore it, otherwise we'll reuse that BPF 3965 * program for a filtered syscall on a non-filtered one. 3966 * 3967 * For instance, we have "!syscalls:sys_enter_renameat" and that is 3968 * useful for "renameat2". 3969 */ 3970 if (pair_prog == NULL) { 3971 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3972 if (pair_prog == trace->skel->progs.syscall_unaugmented) 3973 goto next_candidate; 3974 } 3975 3976 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, 3977 sc->name); 3978 return pair_prog; 3979 next_candidate: 3980 continue; 3981 } 3982 3983 return NULL; 3984 } 3985 3986 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_machine) 3987 { 3988 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter); 3989 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit); 3990 int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter); 3991 int err = 0; 3992 unsigned int beauty_array[6]; 3993 3994 for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) { 3995 int prog_fd, key = syscalltbl__id_at_idx(e_machine, i); 3996 3997 if (!trace__syscall_enabled(trace, key)) 3998 continue; 3999 4000 trace__init_syscall_bpf_progs(trace, e_machine, key); 4001 4002 // It'll get at least the "!raw_syscalls:unaugmented" 4003 prog_fd = trace__bpf_prog_sys_enter_fd(trace, e_machine, key); 4004 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 4005 if (err) 4006 break; 4007 prog_fd = trace__bpf_prog_sys_exit_fd(trace, e_machine, key); 4008 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY); 4009 if (err) 4010 break; 4011 4012 /* use beauty_map to tell BPF how many bytes to collect, set beauty_map's value here */ 4013 memset(beauty_array, 0, sizeof(beauty_array)); 4014 err = trace__bpf_sys_enter_beauty_map(trace, e_machine, key, (unsigned int *)beauty_array); 4015 if (err) 4016 continue; 4017 err = bpf_map_update_elem(beauty_map_fd, &key, beauty_array, BPF_ANY); 4018 if (err) 4019 break; 4020 } 4021 4022 /* 4023 * Now lets do a second pass looking for enabled syscalls without 4024 * an augmenter that have a signature that is a superset of another 4025 * syscall with an augmenter so that we can auto-reuse it. 4026 * 4027 * I.e. if we have an augmenter for the "open" syscall that has 4028 * this signature: 4029 * 4030 * int open(const char *pathname, int flags, mode_t mode); 4031 * 4032 * I.e. that will collect just the first string argument, then we 4033 * can reuse it for the 'creat' syscall, that has this signature: 4034 * 4035 * int creat(const char *pathname, mode_t mode); 4036 * 4037 * and for: 4038 * 4039 * int stat(const char *pathname, struct stat *statbuf); 4040 * int lstat(const char *pathname, struct stat *statbuf); 4041 * 4042 * Because the 'open' augmenter will collect the first arg as a string, 4043 * and leave alone all the other args, which already helps with 4044 * beautifying 'stat' and 'lstat''s pathname arg. 4045 * 4046 * Then, in time, when 'stat' gets an augmenter that collects both 4047 * first and second arg (this one on the raw_syscalls:sys_exit prog 4048 * array tail call, then that one will be used. 4049 */ 4050 for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) { 4051 int key = syscalltbl__id_at_idx(e_machine, i); 4052 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key); 4053 struct bpf_program *pair_prog; 4054 int prog_fd; 4055 4056 if (sc == NULL || sc->bpf_prog.sys_enter == NULL) 4057 continue; 4058 4059 /* 4060 * For now we're just reusing the sys_enter prog, and if it 4061 * already has an augmenter, we don't need to find one. 4062 */ 4063 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented) 4064 continue; 4065 4066 /* 4067 * Look at all the other syscalls for one that has a signature 4068 * that is close enough that we can share: 4069 */ 4070 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc); 4071 if (pair_prog == NULL) 4072 continue; 4073 4074 sc->bpf_prog.sys_enter = pair_prog; 4075 4076 /* 4077 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter 4078 * with the fd for the program we're reusing: 4079 */ 4080 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter); 4081 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 4082 if (err) 4083 break; 4084 } 4085 4086 return err; 4087 } 4088 #endif // HAVE_BPF_SKEL 4089 4090 static int trace__set_ev_qualifier_filter(struct trace *trace) 4091 { 4092 if (trace->syscalls.events.sys_enter) 4093 return trace__set_ev_qualifier_tp_filter(trace); 4094 return 0; 4095 } 4096 4097 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, 4098 size_t npids __maybe_unused, pid_t *pids __maybe_unused) 4099 { 4100 int err = 0; 4101 #ifdef HAVE_LIBBPF_SUPPORT 4102 bool value = true; 4103 int map_fd = bpf_map__fd(map); 4104 size_t i; 4105 4106 for (i = 0; i < npids; ++i) { 4107 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY); 4108 if (err) 4109 break; 4110 } 4111 #endif 4112 return err; 4113 } 4114 4115 static int trace__set_filter_loop_pids(struct trace *trace) 4116 { 4117 unsigned int nr = 1, err; 4118 pid_t pids[32] = { 4119 getpid(), 4120 }; 4121 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]); 4122 4123 while (thread && nr < ARRAY_SIZE(pids)) { 4124 struct thread *parent = machine__find_thread(trace->host, 4125 thread__ppid(thread), 4126 thread__ppid(thread)); 4127 4128 if (parent == NULL) 4129 break; 4130 4131 if (!strcmp(thread__comm_str(parent), "sshd") || 4132 strstarts(thread__comm_str(parent), "gnome-terminal")) { 4133 pids[nr++] = thread__tid(parent); 4134 thread__put(parent); 4135 break; 4136 } 4137 thread__put(thread); 4138 thread = parent; 4139 } 4140 thread__put(thread); 4141 4142 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids); 4143 if (!err && trace->filter_pids.map) 4144 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); 4145 4146 return err; 4147 } 4148 4149 static int trace__set_filter_pids(struct trace *trace) 4150 { 4151 int err = 0; 4152 /* 4153 * Better not use !target__has_task() here because we need to cover the 4154 * case where no threads were specified in the command line, but a 4155 * workload was, and in that case we will fill in the thread_map when 4156 * we fork the workload in evlist__prepare_workload. 4157 */ 4158 if (trace->filter_pids.nr > 0) { 4159 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr, 4160 trace->filter_pids.entries); 4161 if (!err && trace->filter_pids.map) { 4162 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, 4163 trace->filter_pids.entries); 4164 } 4165 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { 4166 err = trace__set_filter_loop_pids(trace); 4167 } 4168 4169 return err; 4170 } 4171 4172 static int __trace__deliver_event(struct trace *trace, union perf_event *event) 4173 { 4174 struct evlist *evlist = trace->evlist; 4175 struct perf_sample sample; 4176 int err; 4177 4178 perf_sample__init(&sample, /*all=*/false); 4179 err = evlist__parse_sample(evlist, event, &sample); 4180 if (err) 4181 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); 4182 else 4183 trace__handle_event(trace, event, &sample); 4184 4185 perf_sample__exit(&sample); 4186 return 0; 4187 } 4188 4189 static int __trace__flush_events(struct trace *trace) 4190 { 4191 u64 first = ordered_events__first_time(&trace->oe.data); 4192 u64 flush = trace->oe.last - NSEC_PER_SEC; 4193 4194 /* Is there some thing to flush.. */ 4195 if (first && first < flush) 4196 return ordered_events__flush_time(&trace->oe.data, flush); 4197 4198 return 0; 4199 } 4200 4201 static int trace__flush_events(struct trace *trace) 4202 { 4203 return !trace->sort_events ? 0 : __trace__flush_events(trace); 4204 } 4205 4206 static int trace__deliver_event(struct trace *trace, union perf_event *event) 4207 { 4208 int err; 4209 4210 if (!trace->sort_events) 4211 return __trace__deliver_event(trace, event); 4212 4213 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); 4214 if (err && err != -1) 4215 return err; 4216 4217 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL); 4218 if (err) 4219 return err; 4220 4221 return trace__flush_events(trace); 4222 } 4223 4224 static int ordered_events__deliver_event(struct ordered_events *oe, 4225 struct ordered_event *event) 4226 { 4227 struct trace *trace = container_of(oe, struct trace, oe.data); 4228 4229 return __trace__deliver_event(trace, event->event); 4230 } 4231 4232 static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg, 4233 char **type) 4234 { 4235 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel); 4236 const struct tep_event *tp_format; 4237 4238 if (!fmt) 4239 return NULL; 4240 4241 tp_format = evsel__tp_format(evsel); 4242 if (!tp_format) 4243 return NULL; 4244 4245 for (const struct tep_format_field *field = tp_format->format.fields; field; 4246 field = field->next, ++fmt) { 4247 if (strcmp(field->name, arg) == 0) { 4248 *type = field->type; 4249 return fmt; 4250 } 4251 } 4252 4253 return NULL; 4254 } 4255 4256 static int trace__expand_filter(struct trace *trace, struct evsel *evsel) 4257 { 4258 char *tok, *left = evsel->filter, *new_filter = evsel->filter; 4259 4260 while ((tok = strpbrk(left, "=<>!")) != NULL) { 4261 char *right = tok + 1, *right_end; 4262 4263 if (*right == '=') 4264 ++right; 4265 4266 while (isspace(*right)) 4267 ++right; 4268 4269 if (*right == '\0') 4270 break; 4271 4272 while (!isalpha(*left)) 4273 if (++left == tok) { 4274 /* 4275 * Bail out, can't find the name of the argument that is being 4276 * used in the filter, let it try to set this filter, will fail later. 4277 */ 4278 return 0; 4279 } 4280 4281 right_end = right + 1; 4282 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|') 4283 ++right_end; 4284 4285 if (isalpha(*right)) { 4286 struct syscall_arg_fmt *fmt; 4287 int left_size = tok - left, 4288 right_size = right_end - right; 4289 char arg[128], *type; 4290 4291 while (isspace(left[left_size - 1])) 4292 --left_size; 4293 4294 scnprintf(arg, sizeof(arg), "%.*s", left_size, left); 4295 4296 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg, &type); 4297 if (fmt == NULL) { 4298 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n", 4299 arg, evsel->name, evsel->filter); 4300 return -1; 4301 } 4302 4303 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ", 4304 arg, (int)(right - tok), tok, right_size, right); 4305 4306 if (fmt->strtoul) { 4307 u64 val; 4308 struct syscall_arg syscall_arg = { 4309 .trace = trace, 4310 .fmt = fmt, 4311 .type_name = type, 4312 .parm = fmt->parm, 4313 }; 4314 4315 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) { 4316 char *n, expansion[19]; 4317 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val); 4318 int expansion_offset = right - new_filter; 4319 4320 pr_debug("%s", expansion); 4321 4322 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) { 4323 pr_debug(" out of memory!\n"); 4324 free(new_filter); 4325 return -1; 4326 } 4327 if (new_filter != evsel->filter) 4328 free(new_filter); 4329 left = n + expansion_offset + expansion_lenght; 4330 new_filter = n; 4331 } else { 4332 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n", 4333 right_size, right, arg, evsel->name, evsel->filter); 4334 return -1; 4335 } 4336 } else { 4337 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n", 4338 arg, evsel->name, evsel->filter); 4339 return -1; 4340 } 4341 4342 pr_debug("\n"); 4343 } else { 4344 left = right_end; 4345 } 4346 } 4347 4348 if (new_filter != evsel->filter) { 4349 pr_debug("New filter for %s: %s\n", evsel->name, new_filter); 4350 evsel__set_filter(evsel, new_filter); 4351 free(new_filter); 4352 } 4353 4354 return 0; 4355 } 4356 4357 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) 4358 { 4359 struct evlist *evlist = trace->evlist; 4360 struct evsel *evsel; 4361 4362 evlist__for_each_entry(evlist, evsel) { 4363 if (evsel->filter == NULL) 4364 continue; 4365 4366 if (trace__expand_filter(trace, evsel)) { 4367 *err_evsel = evsel; 4368 return -1; 4369 } 4370 } 4371 4372 return 0; 4373 } 4374 4375 static int trace__run(struct trace *trace, int argc, const char **argv) 4376 { 4377 struct evlist *evlist = trace->evlist; 4378 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL; 4379 int err = -1, i; 4380 unsigned long before; 4381 const bool forks = argc > 0; 4382 bool draining = false; 4383 4384 trace->live = true; 4385 4386 if (trace->summary_bpf) { 4387 if (trace_prepare_bpf_summary(trace->summary_mode) < 0) 4388 goto out_delete_evlist; 4389 4390 if (trace->summary_only) 4391 goto create_maps; 4392 } 4393 4394 if (!trace->raw_augmented_syscalls) { 4395 if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) 4396 goto out_error_raw_syscalls; 4397 4398 if (trace->trace_syscalls) 4399 trace->vfs_getname = evlist__add_vfs_getname(evlist); 4400 } 4401 4402 if ((trace->trace_pgfaults & TRACE_PFMAJ)) { 4403 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ); 4404 if (pgfault_maj == NULL) 4405 goto out_error_mem; 4406 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param); 4407 evlist__add(evlist, pgfault_maj); 4408 } 4409 4410 if ((trace->trace_pgfaults & TRACE_PFMIN)) { 4411 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN); 4412 if (pgfault_min == NULL) 4413 goto out_error_mem; 4414 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param); 4415 evlist__add(evlist, pgfault_min); 4416 } 4417 4418 /* Enable ignoring missing threads when -p option is defined. */ 4419 trace->opts.ignore_missing_thread = trace->opts.target.pid; 4420 4421 if (trace->sched && 4422 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime)) 4423 goto out_error_sched_stat_runtime; 4424 /* 4425 * If a global cgroup was set, apply it to all the events without an 4426 * explicit cgroup. I.e.: 4427 * 4428 * trace -G A -e sched:*switch 4429 * 4430 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc 4431 * _and_ sched:sched_switch to the 'A' cgroup, while: 4432 * 4433 * trace -e sched:*switch -G A 4434 * 4435 * will only set the sched:sched_switch event to the 'A' cgroup, all the 4436 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without" 4437 * a cgroup (on the root cgroup, sys wide, etc). 4438 * 4439 * Multiple cgroups: 4440 * 4441 * trace -G A -e sched:*switch -G B 4442 * 4443 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes 4444 * to the 'B' cgroup. 4445 * 4446 * evlist__set_default_cgroup() grabs a reference of the passed cgroup 4447 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL. 4448 */ 4449 if (trace->cgroup) 4450 evlist__set_default_cgroup(trace->evlist, trace->cgroup); 4451 4452 create_maps: 4453 err = evlist__create_maps(evlist, &trace->opts.target); 4454 if (err < 0) { 4455 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); 4456 goto out_delete_evlist; 4457 } 4458 4459 err = trace__symbols_init(trace, evlist); 4460 if (err < 0) { 4461 fprintf(trace->output, "Problems initializing symbol libraries!\n"); 4462 goto out_delete_evlist; 4463 } 4464 4465 if (trace->summary_mode == SUMMARY__BY_TOTAL && !trace->summary_bpf) { 4466 trace->syscall_stats = alloc_syscall_stats(); 4467 if (trace->syscall_stats == NULL) 4468 goto out_delete_evlist; 4469 } 4470 4471 evlist__config(evlist, &trace->opts, &callchain_param); 4472 4473 if (forks) { 4474 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL); 4475 if (err < 0) { 4476 fprintf(trace->output, "Couldn't run the workload!\n"); 4477 goto out_delete_evlist; 4478 } 4479 workload_pid = evlist->workload.pid; 4480 } 4481 4482 err = evlist__open(evlist); 4483 if (err < 0) 4484 goto out_error_open; 4485 #ifdef HAVE_BPF_SKEL 4486 if (trace->syscalls.events.bpf_output) { 4487 struct perf_cpu cpu; 4488 4489 /* 4490 * Set up the __augmented_syscalls__ BPF map to hold for each 4491 * CPU the bpf-output event's file descriptor. 4492 */ 4493 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) { 4494 int mycpu = cpu.cpu; 4495 4496 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__, 4497 &mycpu, sizeof(mycpu), 4498 xyarray__entry(trace->syscalls.events.bpf_output->core.fd, 4499 mycpu, 0), 4500 sizeof(__u32), BPF_ANY); 4501 } 4502 } 4503 4504 if (trace->skel) 4505 trace->filter_pids.map = trace->skel->maps.pids_filtered; 4506 #endif 4507 err = trace__set_filter_pids(trace); 4508 if (err < 0) 4509 goto out_error_mem; 4510 4511 #ifdef HAVE_BPF_SKEL 4512 if (trace->skel && trace->skel->progs.sys_enter) { 4513 /* 4514 * TODO: Initialize for all host binary machine types, not just 4515 * those matching the perf binary. 4516 */ 4517 trace__init_syscalls_bpf_prog_array_maps(trace, EM_HOST); 4518 } 4519 #endif 4520 4521 if (trace->ev_qualifier_ids.nr > 0) { 4522 err = trace__set_ev_qualifier_filter(trace); 4523 if (err < 0) 4524 goto out_errno; 4525 4526 if (trace->syscalls.events.sys_exit) { 4527 pr_debug("event qualifier tracepoint filter: %s\n", 4528 trace->syscalls.events.sys_exit->filter); 4529 } 4530 } 4531 4532 /* 4533 * If the "close" syscall is not traced, then we will not have the 4534 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the 4535 * fd->pathname table and were ending up showing the last value set by 4536 * syscalls opening a pathname and associating it with a descriptor or 4537 * reading it from /proc/pid/fd/ in cases where that doesn't make 4538 * sense. 4539 * 4540 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is 4541 * not in use. 4542 */ 4543 /* TODO: support for more than just perf binary machine type close. */ 4544 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(EM_HOST, "close")); 4545 4546 err = trace__expand_filters(trace, &evsel); 4547 if (err) 4548 goto out_delete_evlist; 4549 err = evlist__apply_filters(evlist, &evsel, &trace->opts.target); 4550 if (err < 0) 4551 goto out_error_apply_filters; 4552 4553 if (!trace->summary_only || !trace->summary_bpf) { 4554 err = evlist__mmap(evlist, trace->opts.mmap_pages); 4555 if (err < 0) 4556 goto out_error_mmap; 4557 } 4558 4559 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay) 4560 evlist__enable(evlist); 4561 4562 if (forks) 4563 evlist__start_workload(evlist); 4564 4565 if (trace->opts.target.initial_delay) { 4566 usleep(trace->opts.target.initial_delay * 1000); 4567 evlist__enable(evlist); 4568 } 4569 4570 if (trace->summary_bpf) 4571 trace_start_bpf_summary(); 4572 4573 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || 4574 perf_thread_map__nr(evlist->core.threads) > 1 || 4575 evlist__first(evlist)->core.attr.inherit; 4576 4577 /* 4578 * Now that we already used evsel->core.attr to ask the kernel to setup the 4579 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in 4580 * trace__resolve_callchain(), allowing per-event max-stack settings 4581 * to override an explicitly set --max-stack global setting. 4582 */ 4583 evlist__for_each_entry(evlist, evsel) { 4584 if (evsel__has_callchain(evsel) && 4585 evsel->core.attr.sample_max_stack == 0) 4586 evsel->core.attr.sample_max_stack = trace->max_stack; 4587 } 4588 again: 4589 before = trace->nr_events; 4590 4591 for (i = 0; i < evlist->core.nr_mmaps; i++) { 4592 union perf_event *event; 4593 struct mmap *md; 4594 4595 md = &evlist->mmap[i]; 4596 if (perf_mmap__read_init(&md->core) < 0) 4597 continue; 4598 4599 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 4600 ++trace->nr_events; 4601 4602 err = trace__deliver_event(trace, event); 4603 if (err) 4604 goto out_disable; 4605 4606 perf_mmap__consume(&md->core); 4607 4608 if (interrupted) 4609 goto out_disable; 4610 4611 if (done && !draining) { 4612 evlist__disable(evlist); 4613 draining = true; 4614 } 4615 } 4616 perf_mmap__read_done(&md->core); 4617 } 4618 4619 if (trace->nr_events == before) { 4620 int timeout = done ? 100 : -1; 4621 4622 if (!draining && evlist__poll(evlist, timeout) > 0) { 4623 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) 4624 draining = true; 4625 4626 goto again; 4627 } else { 4628 if (trace__flush_events(trace)) 4629 goto out_disable; 4630 } 4631 } else { 4632 goto again; 4633 } 4634 4635 out_disable: 4636 thread__zput(trace->current); 4637 4638 evlist__disable(evlist); 4639 4640 if (trace->summary_bpf) 4641 trace_end_bpf_summary(); 4642 4643 if (trace->sort_events) 4644 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL); 4645 4646 if (!err) { 4647 if (trace->summary) { 4648 if (trace->summary_bpf) 4649 trace_print_bpf_summary(trace->output); 4650 else if (trace->summary_mode == SUMMARY__BY_TOTAL) 4651 trace__fprintf_total_summary(trace, trace->output); 4652 else 4653 trace__fprintf_thread_summary(trace, trace->output); 4654 } 4655 4656 if (trace->show_tool_stats) { 4657 fprintf(trace->output, "Stats:\n " 4658 " vfs_getname : %" PRIu64 "\n" 4659 " proc_getname: %" PRIu64 "\n", 4660 trace->stats.vfs_getname, 4661 trace->stats.proc_getname); 4662 } 4663 } 4664 4665 out_delete_evlist: 4666 trace_cleanup_bpf_summary(); 4667 delete_syscall_stats(trace->syscall_stats); 4668 trace__symbols__exit(trace); 4669 evlist__free_syscall_tp_fields(evlist); 4670 evlist__delete(evlist); 4671 cgroup__put(trace->cgroup); 4672 trace->evlist = NULL; 4673 trace->live = false; 4674 return err; 4675 { 4676 char errbuf[BUFSIZ]; 4677 4678 out_error_sched_stat_runtime: 4679 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime"); 4680 goto out_error; 4681 4682 out_error_raw_syscalls: 4683 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)"); 4684 goto out_error; 4685 4686 out_error_mmap: 4687 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf)); 4688 goto out_error; 4689 4690 out_error_open: 4691 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); 4692 4693 out_error: 4694 fprintf(trace->output, "%s\n", errbuf); 4695 goto out_delete_evlist; 4696 4697 out_error_apply_filters: 4698 fprintf(trace->output, 4699 "Failed to set filter \"%s\" on event %s with %d (%s)\n", 4700 evsel->filter, evsel__name(evsel), errno, 4701 str_error_r(errno, errbuf, sizeof(errbuf))); 4702 goto out_delete_evlist; 4703 } 4704 out_error_mem: 4705 fprintf(trace->output, "Not enough memory to run!\n"); 4706 goto out_delete_evlist; 4707 4708 out_errno: 4709 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno)); 4710 goto out_delete_evlist; 4711 } 4712 4713 static int trace__replay(struct trace *trace) 4714 { 4715 const struct evsel_str_handler handlers[] = { 4716 { "probe:vfs_getname", trace__vfs_getname, }, 4717 }; 4718 struct perf_data data = { 4719 .path = input_name, 4720 .mode = PERF_DATA_MODE_READ, 4721 .force = trace->force, 4722 }; 4723 struct perf_session *session; 4724 struct evsel *evsel; 4725 int err = -1; 4726 4727 perf_tool__init(&trace->tool, /*ordered_events=*/true); 4728 trace->tool.sample = trace__process_sample; 4729 trace->tool.mmap = perf_event__process_mmap; 4730 trace->tool.mmap2 = perf_event__process_mmap2; 4731 trace->tool.comm = perf_event__process_comm; 4732 trace->tool.exit = perf_event__process_exit; 4733 trace->tool.fork = perf_event__process_fork; 4734 trace->tool.attr = perf_event__process_attr; 4735 trace->tool.tracing_data = perf_event__process_tracing_data; 4736 trace->tool.build_id = perf_event__process_build_id; 4737 trace->tool.namespaces = perf_event__process_namespaces; 4738 4739 trace->tool.ordered_events = true; 4740 trace->tool.ordering_requires_timestamps = true; 4741 4742 /* add tid to output */ 4743 trace->multiple_threads = true; 4744 4745 session = perf_session__new(&data, &trace->tool); 4746 if (IS_ERR(session)) 4747 return PTR_ERR(session); 4748 4749 if (trace->opts.target.pid) 4750 symbol_conf.pid_list_str = strdup(trace->opts.target.pid); 4751 4752 if (trace->opts.target.tid) 4753 symbol_conf.tid_list_str = strdup(trace->opts.target.tid); 4754 4755 if (symbol__init(&session->header.env) < 0) 4756 goto out; 4757 4758 trace->host = &session->machines.host; 4759 4760 err = perf_session__set_tracepoints_handlers(session, handlers); 4761 if (err) 4762 goto out; 4763 4764 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter"); 4765 trace->syscalls.events.sys_enter = evsel; 4766 /* older kernels have syscalls tp versus raw_syscalls */ 4767 if (evsel == NULL) 4768 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter"); 4769 4770 if (evsel && 4771 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 || 4772 perf_evsel__init_sc_tp_ptr_field(evsel, args))) { 4773 pr_err("Error during initialize raw_syscalls:sys_enter event\n"); 4774 goto out; 4775 } 4776 4777 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit"); 4778 trace->syscalls.events.sys_exit = evsel; 4779 if (evsel == NULL) 4780 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit"); 4781 if (evsel && 4782 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 || 4783 perf_evsel__init_sc_tp_uint_field(evsel, ret))) { 4784 pr_err("Error during initialize raw_syscalls:sys_exit event\n"); 4785 goto out; 4786 } 4787 4788 evlist__for_each_entry(session->evlist, evsel) { 4789 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && 4790 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ || 4791 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 4792 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS)) 4793 evsel->handler = trace__pgfault; 4794 } 4795 4796 if (trace->summary_mode == SUMMARY__BY_TOTAL) { 4797 trace->syscall_stats = alloc_syscall_stats(); 4798 if (trace->syscall_stats == NULL) 4799 goto out; 4800 } 4801 4802 setup_pager(); 4803 4804 err = perf_session__process_events(session); 4805 if (err) 4806 pr_err("Failed to process events, error %d", err); 4807 4808 else if (trace->summary) 4809 trace__fprintf_thread_summary(trace, trace->output); 4810 4811 out: 4812 delete_syscall_stats(trace->syscall_stats); 4813 perf_session__delete(session); 4814 4815 return err; 4816 } 4817 4818 static size_t trace__fprintf_summary_header(FILE *fp) 4819 { 4820 size_t printed; 4821 4822 printed = fprintf(fp, "\n Summary of events:\n\n"); 4823 4824 return printed; 4825 } 4826 4827 struct syscall_entry { 4828 struct syscall_stats *stats; 4829 double msecs; 4830 int syscall; 4831 }; 4832 4833 static int entry_cmp(const void *e1, const void *e2) 4834 { 4835 const struct syscall_entry *entry1 = e1; 4836 const struct syscall_entry *entry2 = e2; 4837 4838 return entry1->msecs > entry2->msecs ? -1 : 1; 4839 } 4840 4841 static struct syscall_entry *syscall__sort_stats(struct hashmap *syscall_stats) 4842 { 4843 struct syscall_entry *entry; 4844 struct hashmap_entry *pos; 4845 unsigned bkt, i, nr; 4846 4847 nr = syscall_stats->sz; 4848 entry = malloc(nr * sizeof(*entry)); 4849 if (entry == NULL) 4850 return NULL; 4851 4852 i = 0; 4853 hashmap__for_each_entry(syscall_stats, pos, bkt) { 4854 struct syscall_stats *ss = pos->pvalue; 4855 struct stats *st = &ss->stats; 4856 4857 entry[i].stats = ss; 4858 entry[i].msecs = (u64)st->n * (avg_stats(st) / NSEC_PER_MSEC); 4859 entry[i].syscall = pos->key; 4860 i++; 4861 } 4862 assert(i == nr); 4863 4864 qsort(entry, nr, sizeof(*entry), entry_cmp); 4865 return entry; 4866 } 4867 4868 static size_t syscall__dump_stats(struct trace *trace, int e_machine, FILE *fp, 4869 struct hashmap *syscall_stats) 4870 { 4871 size_t printed = 0; 4872 struct syscall *sc; 4873 struct syscall_entry *entries; 4874 4875 entries = syscall__sort_stats(syscall_stats); 4876 if (entries == NULL) 4877 return 0; 4878 4879 printed += fprintf(fp, "\n"); 4880 4881 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n"); 4882 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n"); 4883 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n"); 4884 4885 for (size_t i = 0; i < syscall_stats->sz; i++) { 4886 struct syscall_entry *entry = &entries[i]; 4887 struct syscall_stats *stats = entry->stats; 4888 4889 if (stats) { 4890 double min = (double)(stats->stats.min) / NSEC_PER_MSEC; 4891 double max = (double)(stats->stats.max) / NSEC_PER_MSEC; 4892 double avg = avg_stats(&stats->stats); 4893 double pct; 4894 u64 n = (u64)stats->stats.n; 4895 4896 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0; 4897 avg /= NSEC_PER_MSEC; 4898 4899 sc = trace__syscall_info(trace, /*evsel=*/NULL, e_machine, entry->syscall); 4900 if (!sc) 4901 continue; 4902 4903 printed += fprintf(fp, " %-15s", sc->name); 4904 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f", 4905 n, stats->nr_failures, entry->msecs, min, avg); 4906 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct); 4907 4908 if (trace->errno_summary && stats->nr_failures) { 4909 int e; 4910 4911 for (e = 0; e < stats->max_errno; ++e) { 4912 if (stats->errnos[e] != 0) 4913 fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]); 4914 } 4915 } 4916 } 4917 } 4918 4919 free(entries); 4920 printed += fprintf(fp, "\n\n"); 4921 4922 return printed; 4923 } 4924 4925 static size_t thread__dump_stats(struct thread_trace *ttrace, 4926 struct trace *trace, int e_machine, FILE *fp) 4927 { 4928 return syscall__dump_stats(trace, e_machine, fp, ttrace->syscall_stats); 4929 } 4930 4931 static size_t system__dump_stats(struct trace *trace, int e_machine, FILE *fp) 4932 { 4933 return syscall__dump_stats(trace, e_machine, fp, trace->syscall_stats); 4934 } 4935 4936 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) 4937 { 4938 size_t printed = 0; 4939 struct thread_trace *ttrace = thread__priv(thread); 4940 int e_machine = thread__e_machine(thread, trace->host); 4941 double ratio; 4942 4943 if (ttrace == NULL) 4944 return 0; 4945 4946 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; 4947 4948 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread)); 4949 printed += fprintf(fp, "%lu events, ", ttrace->nr_events); 4950 printed += fprintf(fp, "%.1f%%", ratio); 4951 if (ttrace->pfmaj) 4952 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj); 4953 if (ttrace->pfmin) 4954 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin); 4955 if (trace->sched) 4956 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); 4957 else if (fputc('\n', fp) != EOF) 4958 ++printed; 4959 4960 printed += thread__dump_stats(ttrace, trace, e_machine, fp); 4961 4962 return printed; 4963 } 4964 4965 static unsigned long thread__nr_events(struct thread_trace *ttrace) 4966 { 4967 return ttrace ? ttrace->nr_events : 0; 4968 } 4969 4970 static int trace_nr_events_cmp(void *priv __maybe_unused, 4971 const struct list_head *la, 4972 const struct list_head *lb) 4973 { 4974 struct thread_list *a = list_entry(la, struct thread_list, list); 4975 struct thread_list *b = list_entry(lb, struct thread_list, list); 4976 unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread)); 4977 unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread)); 4978 4979 if (a_nr_events != b_nr_events) 4980 return a_nr_events < b_nr_events ? -1 : 1; 4981 4982 /* Identical number of threads, place smaller tids first. */ 4983 return thread__tid(a->thread) < thread__tid(b->thread) 4984 ? -1 4985 : (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0); 4986 } 4987 4988 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) 4989 { 4990 size_t printed = trace__fprintf_summary_header(fp); 4991 LIST_HEAD(threads); 4992 4993 if (machine__thread_list(trace->host, &threads) == 0) { 4994 struct thread_list *pos; 4995 4996 list_sort(NULL, &threads, trace_nr_events_cmp); 4997 4998 list_for_each_entry(pos, &threads, list) 4999 printed += trace__fprintf_thread(fp, pos->thread, trace); 5000 } 5001 thread_list__delete(&threads); 5002 return printed; 5003 } 5004 5005 static size_t trace__fprintf_total_summary(struct trace *trace, FILE *fp) 5006 { 5007 size_t printed = trace__fprintf_summary_header(fp); 5008 5009 printed += fprintf(fp, " total, "); 5010 printed += fprintf(fp, "%lu events", trace->nr_events); 5011 5012 if (trace->pfmaj) 5013 printed += fprintf(fp, ", %lu majfaults", trace->pfmaj); 5014 if (trace->pfmin) 5015 printed += fprintf(fp, ", %lu minfaults", trace->pfmin); 5016 if (trace->sched) 5017 printed += fprintf(fp, ", %.3f msec\n", trace->runtime_ms); 5018 else if (fputc('\n', fp) != EOF) 5019 ++printed; 5020 5021 /* TODO: get all system e_machines. */ 5022 printed += system__dump_stats(trace, EM_HOST, fp); 5023 5024 return printed; 5025 } 5026 5027 static int trace__set_duration(const struct option *opt, const char *str, 5028 int unset __maybe_unused) 5029 { 5030 struct trace *trace = opt->value; 5031 5032 trace->duration_filter = atof(str); 5033 return 0; 5034 } 5035 5036 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str, 5037 int unset __maybe_unused) 5038 { 5039 int ret = -1; 5040 size_t i; 5041 struct trace *trace = opt->value; 5042 /* 5043 * FIXME: introduce a intarray class, plain parse csv and create a 5044 * { int nr, int entries[] } struct... 5045 */ 5046 struct intlist *list = intlist__new(str); 5047 5048 if (list == NULL) 5049 return -1; 5050 5051 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1; 5052 trace->filter_pids.entries = calloc(i, sizeof(pid_t)); 5053 5054 if (trace->filter_pids.entries == NULL) 5055 goto out; 5056 5057 trace->filter_pids.entries[0] = getpid(); 5058 5059 for (i = 1; i < trace->filter_pids.nr; ++i) 5060 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; 5061 5062 intlist__delete(list); 5063 ret = 0; 5064 out: 5065 return ret; 5066 } 5067 5068 static int trace__open_output(struct trace *trace, const char *filename) 5069 { 5070 struct stat st; 5071 5072 if (!stat(filename, &st) && st.st_size) { 5073 char oldname[PATH_MAX]; 5074 5075 scnprintf(oldname, sizeof(oldname), "%s.old", filename); 5076 unlink(oldname); 5077 rename(filename, oldname); 5078 } 5079 5080 trace->output = fopen(filename, "w"); 5081 5082 return trace->output == NULL ? -errno : 0; 5083 } 5084 5085 static int parse_pagefaults(const struct option *opt, const char *str, 5086 int unset __maybe_unused) 5087 { 5088 int *trace_pgfaults = opt->value; 5089 5090 if (strcmp(str, "all") == 0) 5091 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN; 5092 else if (strcmp(str, "maj") == 0) 5093 *trace_pgfaults |= TRACE_PFMAJ; 5094 else if (strcmp(str, "min") == 0) 5095 *trace_pgfaults |= TRACE_PFMIN; 5096 else 5097 return -1; 5098 5099 return 0; 5100 } 5101 5102 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler) 5103 { 5104 struct evsel *evsel; 5105 5106 evlist__for_each_entry(evlist, evsel) { 5107 if (evsel->handler == NULL) 5108 evsel->handler = handler; 5109 } 5110 } 5111 5112 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name) 5113 { 5114 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 5115 5116 if (fmt) { 5117 const struct syscall_fmt *scfmt = syscall_fmt__find(name); 5118 5119 if (scfmt) { 5120 const struct tep_event *tp_format = evsel__tp_format(evsel); 5121 5122 if (tp_format) { 5123 int skip = 0; 5124 5125 if (strcmp(tp_format->format.fields->name, "__syscall_nr") == 0 || 5126 strcmp(tp_format->format.fields->name, "nr") == 0) 5127 ++skip; 5128 5129 memcpy(fmt + skip, scfmt->arg, 5130 (tp_format->format.nr_fields - skip) * sizeof(*fmt)); 5131 } 5132 } 5133 } 5134 } 5135 5136 static int evlist__set_syscall_tp_fields(struct evlist *evlist, bool *use_btf) 5137 { 5138 struct evsel *evsel; 5139 5140 evlist__for_each_entry(evlist, evsel) { 5141 const struct tep_event *tp_format; 5142 5143 if (evsel->priv) 5144 continue; 5145 5146 tp_format = evsel__tp_format(evsel); 5147 if (!tp_format) 5148 continue; 5149 5150 if (strcmp(tp_format->system, "syscalls")) { 5151 evsel__init_tp_arg_scnprintf(evsel, use_btf); 5152 continue; 5153 } 5154 5155 if (evsel__init_syscall_tp(evsel)) 5156 return -1; 5157 5158 if (!strncmp(tp_format->name, "sys_enter_", 10)) { 5159 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 5160 5161 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64))) 5162 return -1; 5163 5164 evsel__set_syscall_arg_fmt(evsel, 5165 tp_format->name + sizeof("sys_enter_") - 1); 5166 } else if (!strncmp(tp_format->name, "sys_exit_", 9)) { 5167 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 5168 5169 if (__tp_field__init_uint(&sc->ret, sizeof(u64), 5170 sc->id.offset + sizeof(u64), 5171 evsel->needs_swap)) 5172 return -1; 5173 5174 evsel__set_syscall_arg_fmt(evsel, 5175 tp_format->name + sizeof("sys_exit_") - 1); 5176 } 5177 } 5178 5179 return 0; 5180 } 5181 5182 /* 5183 * XXX: Hackish, just splitting the combined -e+--event (syscalls 5184 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use 5185 * existing facilities unchanged (trace->ev_qualifier + parse_options()). 5186 * 5187 * It'd be better to introduce a parse_options() variant that would return a 5188 * list with the terms it didn't match to an event... 5189 */ 5190 static int trace__parse_events_option(const struct option *opt, const char *str, 5191 int unset __maybe_unused) 5192 { 5193 struct trace *trace = (struct trace *)opt->value; 5194 const char *s = str; 5195 char *sep = NULL, *lists[2] = { NULL, NULL, }; 5196 int len = strlen(str) + 1, err = -1, list, idx; 5197 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR); 5198 char group_name[PATH_MAX]; 5199 const struct syscall_fmt *fmt; 5200 5201 if (strace_groups_dir == NULL) 5202 return -1; 5203 5204 if (*s == '!') { 5205 ++s; 5206 trace->not_ev_qualifier = true; 5207 } 5208 5209 while (1) { 5210 if ((sep = strchr(s, ',')) != NULL) 5211 *sep = '\0'; 5212 5213 list = 0; 5214 /* TODO: support for more than just perf binary machine type syscalls. */ 5215 if (syscalltbl__id(EM_HOST, s) >= 0 || 5216 syscalltbl__strglobmatch_first(EM_HOST, s, &idx) >= 0) { 5217 list = 1; 5218 goto do_concat; 5219 } 5220 5221 fmt = syscall_fmt__find_by_alias(s); 5222 if (fmt != NULL) { 5223 list = 1; 5224 s = fmt->name; 5225 } else { 5226 path__join(group_name, sizeof(group_name), strace_groups_dir, s); 5227 if (access(group_name, R_OK) == 0) 5228 list = 1; 5229 } 5230 do_concat: 5231 if (lists[list]) { 5232 sprintf(lists[list] + strlen(lists[list]), ",%s", s); 5233 } else { 5234 lists[list] = malloc(len); 5235 if (lists[list] == NULL) 5236 goto out; 5237 strcpy(lists[list], s); 5238 } 5239 5240 if (!sep) 5241 break; 5242 5243 *sep = ','; 5244 s = sep + 1; 5245 } 5246 5247 if (lists[1] != NULL) { 5248 struct strlist_config slist_config = { 5249 .dirname = strace_groups_dir, 5250 }; 5251 5252 trace->ev_qualifier = strlist__new(lists[1], &slist_config); 5253 if (trace->ev_qualifier == NULL) { 5254 fputs("Not enough memory to parse event qualifier", trace->output); 5255 goto out; 5256 } 5257 5258 if (trace__validate_ev_qualifier(trace)) 5259 goto out; 5260 trace->trace_syscalls = true; 5261 } 5262 5263 err = 0; 5264 5265 if (lists[0]) { 5266 struct parse_events_option_args parse_events_option_args = { 5267 .evlistp = &trace->evlist, 5268 }; 5269 struct option o = { 5270 .value = &parse_events_option_args, 5271 }; 5272 err = parse_events_option(&o, lists[0], 0); 5273 } 5274 out: 5275 free(strace_groups_dir); 5276 free(lists[0]); 5277 free(lists[1]); 5278 if (sep) 5279 *sep = ','; 5280 5281 return err; 5282 } 5283 5284 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset) 5285 { 5286 struct trace *trace = opt->value; 5287 5288 if (!list_empty(&trace->evlist->core.entries)) { 5289 struct option o = { 5290 .value = &trace->evlist, 5291 }; 5292 return parse_cgroups(&o, str, unset); 5293 } 5294 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); 5295 5296 return 0; 5297 } 5298 5299 static int trace__parse_summary_mode(const struct option *opt, const char *str, 5300 int unset __maybe_unused) 5301 { 5302 struct trace *trace = opt->value; 5303 5304 if (!strcmp(str, "thread")) { 5305 trace->summary_mode = SUMMARY__BY_THREAD; 5306 } else if (!strcmp(str, "total")) { 5307 trace->summary_mode = SUMMARY__BY_TOTAL; 5308 } else if (!strcmp(str, "cgroup")) { 5309 trace->summary_mode = SUMMARY__BY_CGROUP; 5310 } else { 5311 pr_err("Unknown summary mode: %s\n", str); 5312 return -1; 5313 } 5314 5315 return 0; 5316 } 5317 5318 static int trace__config(const char *var, const char *value, void *arg) 5319 { 5320 struct trace *trace = arg; 5321 int err = 0; 5322 5323 if (!strcmp(var, "trace.add_events")) { 5324 trace->perfconfig_events = strdup(value); 5325 if (trace->perfconfig_events == NULL) { 5326 pr_err("Not enough memory for %s\n", "trace.add_events"); 5327 return -1; 5328 } 5329 } else if (!strcmp(var, "trace.show_timestamp")) { 5330 trace->show_tstamp = perf_config_bool(var, value); 5331 } else if (!strcmp(var, "trace.show_duration")) { 5332 trace->show_duration = perf_config_bool(var, value); 5333 } else if (!strcmp(var, "trace.show_arg_names")) { 5334 trace->show_arg_names = perf_config_bool(var, value); 5335 if (!trace->show_arg_names) 5336 trace->show_zeros = true; 5337 } else if (!strcmp(var, "trace.show_zeros")) { 5338 bool new_show_zeros = perf_config_bool(var, value); 5339 if (!trace->show_arg_names && !new_show_zeros) { 5340 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n"); 5341 goto out; 5342 } 5343 trace->show_zeros = new_show_zeros; 5344 } else if (!strcmp(var, "trace.show_prefix")) { 5345 trace->show_string_prefix = perf_config_bool(var, value); 5346 } else if (!strcmp(var, "trace.no_inherit")) { 5347 trace->opts.no_inherit = perf_config_bool(var, value); 5348 } else if (!strcmp(var, "trace.args_alignment")) { 5349 int args_alignment = 0; 5350 if (perf_config_int(&args_alignment, var, value) == 0) 5351 trace->args_alignment = args_alignment; 5352 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) { 5353 if (strcasecmp(value, "libtraceevent") == 0) 5354 trace->libtraceevent_print = true; 5355 else if (strcasecmp(value, "libbeauty") == 0) 5356 trace->libtraceevent_print = false; 5357 } 5358 out: 5359 return err; 5360 } 5361 5362 static void trace__exit(struct trace *trace) 5363 { 5364 thread__zput(trace->current); 5365 strlist__delete(trace->ev_qualifier); 5366 zfree(&trace->ev_qualifier_ids.entries); 5367 if (trace->syscalls.table) { 5368 for (size_t i = 0; i < trace->syscalls.table_size; i++) 5369 syscall__delete(trace->syscalls.table[i]); 5370 zfree(&trace->syscalls.table); 5371 } 5372 zfree(&trace->perfconfig_events); 5373 evlist__delete(trace->evlist); 5374 trace->evlist = NULL; 5375 ordered_events__free(&trace->oe.data); 5376 #ifdef HAVE_LIBBPF_SUPPORT 5377 btf__free(trace->btf); 5378 trace->btf = NULL; 5379 #endif 5380 } 5381 5382 #ifdef HAVE_BPF_SKEL 5383 static int bpf__setup_bpf_output(struct evlist *evlist) 5384 { 5385 int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/"); 5386 5387 if (err) 5388 pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n"); 5389 5390 return err; 5391 } 5392 #endif 5393 5394 int cmd_trace(int argc, const char **argv) 5395 { 5396 const char *trace_usage[] = { 5397 "perf trace [<options>] [<command>]", 5398 "perf trace [<options>] -- <command> [<options>]", 5399 "perf trace record [<options>] [<command>]", 5400 "perf trace record [<options>] -- <command> [<options>]", 5401 NULL 5402 }; 5403 struct trace trace = { 5404 .opts = { 5405 .target = { 5406 .uses_mmap = true, 5407 }, 5408 .user_freq = UINT_MAX, 5409 .user_interval = ULLONG_MAX, 5410 .no_buffering = true, 5411 .mmap_pages = UINT_MAX, 5412 }, 5413 .output = stderr, 5414 .show_comm = true, 5415 .show_tstamp = true, 5416 .show_duration = true, 5417 .show_arg_names = true, 5418 .args_alignment = 70, 5419 .trace_syscalls = false, 5420 .kernel_syscallchains = false, 5421 .max_stack = UINT_MAX, 5422 .max_events = ULONG_MAX, 5423 }; 5424 const char *output_name = NULL; 5425 const struct option trace_options[] = { 5426 OPT_CALLBACK('e', "event", &trace, "event", 5427 "event/syscall selector. use 'perf list' to list available events", 5428 trace__parse_events_option), 5429 OPT_CALLBACK(0, "filter", &trace.evlist, "filter", 5430 "event filter", parse_filter), 5431 OPT_BOOLEAN(0, "comm", &trace.show_comm, 5432 "show the thread COMM next to its id"), 5433 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"), 5434 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace", 5435 trace__parse_events_option), 5436 OPT_STRING('o', "output", &output_name, "file", "output file name"), 5437 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"), 5438 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", 5439 "trace events on existing process id"), 5440 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", 5441 "trace events on existing thread id"), 5442 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids", 5443 "pids to filter (by the kernel)", trace__set_filter_pids_from_option), 5444 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, 5445 "system-wide collection from all CPUs"), 5446 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", 5447 "list of cpus to monitor"), 5448 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, 5449 "child tasks do not inherit counters"), 5450 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", 5451 "number of mmap data pages", evlist__parse_mmap_pages), 5452 OPT_STRING('u', "uid", &trace.uid_str, "user", "user to profile"), 5453 OPT_CALLBACK(0, "duration", &trace, "float", 5454 "show only events with duration > N.M ms", 5455 trace__set_duration), 5456 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), 5457 OPT_INCR('v', "verbose", &verbose, "be more verbose"), 5458 OPT_BOOLEAN('T', "time", &trace.full_time, 5459 "Show full timestamp, not time relative to first start"), 5460 OPT_BOOLEAN(0, "failure", &trace.failure_only, 5461 "Show only syscalls that failed"), 5462 OPT_BOOLEAN('s', "summary", &trace.summary_only, 5463 "Show only syscall summary with statistics"), 5464 OPT_BOOLEAN('S', "with-summary", &trace.summary, 5465 "Show all syscalls and summary with statistics"), 5466 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary, 5467 "Show errno stats per syscall, use with -s or -S"), 5468 OPT_CALLBACK(0, "summary-mode", &trace, "mode", 5469 "How to show summary: select thread (default), total or cgroup", 5470 trace__parse_summary_mode), 5471 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min", 5472 "Trace pagefaults", parse_pagefaults, "maj"), 5473 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"), 5474 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"), 5475 OPT_CALLBACK(0, "call-graph", &trace.opts, 5476 "record_mode[,record_size]", record_callchain_help, 5477 &record_parse_callchain_opt), 5478 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print, 5479 "Use libtraceevent to print the tracepoint arguments."), 5480 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, 5481 "Show the kernel callchains on the syscall exit path"), 5482 OPT_ULONG(0, "max-events", &trace.max_events, 5483 "Set the maximum number of events to print, exit after that is reached. "), 5484 OPT_UINTEGER(0, "min-stack", &trace.min_stack, 5485 "Set the minimum stack depth when parsing the callchain, " 5486 "anything below the specified depth will be ignored."), 5487 OPT_UINTEGER(0, "max-stack", &trace.max_stack, 5488 "Set the maximum stack depth when parsing the callchain, " 5489 "anything beyond the specified depth will be ignored. " 5490 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 5491 OPT_BOOLEAN(0, "sort-events", &trace.sort_events, 5492 "Sort batch of events before processing, use if getting out of order events"), 5493 OPT_BOOLEAN(0, "print-sample", &trace.print_sample, 5494 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"), 5495 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, 5496 "per thread proc mmap processing timeout in ms"), 5497 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", 5498 trace__parse_cgroups), 5499 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay, 5500 "ms to wait before starting measurement after program " 5501 "start"), 5502 OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer" 5503 "to customized ones"), 5504 OPT_BOOLEAN(0, "bpf-summary", &trace.summary_bpf, "Summary syscall stats in BPF"), 5505 OPTS_EVSWITCH(&trace.evswitch), 5506 OPT_END() 5507 }; 5508 bool __maybe_unused max_stack_user_set = true; 5509 bool mmap_pages_user_set = true; 5510 struct evsel *evsel; 5511 const char * const trace_subcommands[] = { "record", NULL }; 5512 int err = -1; 5513 char bf[BUFSIZ]; 5514 struct sigaction sigchld_act; 5515 5516 signal(SIGSEGV, sighandler_dump_stack); 5517 signal(SIGFPE, sighandler_dump_stack); 5518 signal(SIGINT, sighandler_interrupt); 5519 5520 memset(&sigchld_act, 0, sizeof(sigchld_act)); 5521 sigchld_act.sa_flags = SA_SIGINFO; 5522 sigchld_act.sa_sigaction = sighandler_chld; 5523 sigaction(SIGCHLD, &sigchld_act, NULL); 5524 5525 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace); 5526 ordered_events__set_copy_on_queue(&trace.oe.data, true); 5527 5528 trace.evlist = evlist__new(); 5529 5530 if (trace.evlist == NULL) { 5531 pr_err("Not enough memory to run!\n"); 5532 err = -ENOMEM; 5533 goto out; 5534 } 5535 5536 /* 5537 * Parsing .perfconfig may entail creating a BPF event, that may need 5538 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting 5539 * is too small. This affects just this process, not touching the 5540 * global setting. If it fails we'll get something in 'perf trace -v' 5541 * to help diagnose the problem. 5542 */ 5543 rlimit__bump_memlock(); 5544 5545 err = perf_config(trace__config, &trace); 5546 if (err) 5547 goto out; 5548 5549 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands, 5550 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION); 5551 5552 /* 5553 * Here we already passed thru trace__parse_events_option() and it has 5554 * already figured out if -e syscall_name, if not but if --event 5555 * foo:bar was used, the user is interested _just_ in those, say, 5556 * tracepoint events, not in the strace-like syscall-name-based mode. 5557 * 5558 * This is important because we need to check if strace-like mode is 5559 * needed to decided if we should filter out the eBPF 5560 * __augmented_syscalls__ code, if it is in the mix, say, via 5561 * .perfconfig trace.add_events, and filter those out. 5562 */ 5563 if (!trace.trace_syscalls && !trace.trace_pgfaults && 5564 trace.evlist->core.nr_entries == 0 /* Was --events used? */) { 5565 trace.trace_syscalls = true; 5566 } 5567 /* 5568 * Now that we have --verbose figured out, lets see if we need to parse 5569 * events from .perfconfig, so that if those events fail parsing, say some 5570 * BPF program fails, then we'll be able to use --verbose to see what went 5571 * wrong in more detail. 5572 */ 5573 if (trace.perfconfig_events != NULL) { 5574 struct parse_events_error parse_err; 5575 5576 parse_events_error__init(&parse_err); 5577 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err); 5578 if (err) 5579 parse_events_error__print(&parse_err, trace.perfconfig_events); 5580 parse_events_error__exit(&parse_err); 5581 if (err) 5582 goto out; 5583 } 5584 5585 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) { 5586 usage_with_options_msg(trace_usage, trace_options, 5587 "cgroup monitoring only available in system-wide mode"); 5588 } 5589 5590 #ifdef HAVE_BPF_SKEL 5591 if (!trace.trace_syscalls) 5592 goto skip_augmentation; 5593 5594 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) { 5595 pr_debug("Syscall augmentation fails with record, disabling augmentation"); 5596 goto skip_augmentation; 5597 } 5598 5599 if (trace.summary_bpf) { 5600 if (!trace.opts.target.system_wide) { 5601 /* TODO: Add filters in the BPF to support other targets. */ 5602 pr_err("Error: --bpf-summary only works for system-wide mode.\n"); 5603 goto out; 5604 } 5605 if (trace.summary_only) 5606 goto skip_augmentation; 5607 } 5608 5609 trace.skel = augmented_raw_syscalls_bpf__open(); 5610 if (!trace.skel) { 5611 pr_debug("Failed to open augmented syscalls BPF skeleton"); 5612 } else { 5613 /* 5614 * Disable attaching the BPF programs except for sys_enter and 5615 * sys_exit that tail call into this as necessary. 5616 */ 5617 struct bpf_program *prog; 5618 5619 bpf_object__for_each_program(prog, trace.skel->obj) { 5620 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit) 5621 bpf_program__set_autoattach(prog, /*autoattach=*/false); 5622 } 5623 5624 err = augmented_raw_syscalls_bpf__load(trace.skel); 5625 5626 if (err < 0) { 5627 libbpf_strerror(err, bf, sizeof(bf)); 5628 pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf); 5629 } else { 5630 augmented_raw_syscalls_bpf__attach(trace.skel); 5631 trace__add_syscall_newtp(&trace); 5632 } 5633 } 5634 5635 err = bpf__setup_bpf_output(trace.evlist); 5636 if (err) { 5637 libbpf_strerror(err, bf, sizeof(bf)); 5638 pr_err("ERROR: Setup BPF output event failed: %s\n", bf); 5639 goto out; 5640 } 5641 trace.syscalls.events.bpf_output = evlist__last(trace.evlist); 5642 assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__")); 5643 skip_augmentation: 5644 #endif 5645 err = -1; 5646 5647 if (trace.trace_pgfaults) { 5648 trace.opts.sample_address = true; 5649 trace.opts.sample_time = true; 5650 } 5651 5652 if (trace.opts.mmap_pages == UINT_MAX) 5653 mmap_pages_user_set = false; 5654 5655 if (trace.max_stack == UINT_MAX) { 5656 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack(); 5657 max_stack_user_set = false; 5658 } 5659 5660 #ifdef HAVE_DWARF_UNWIND_SUPPORT 5661 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) { 5662 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); 5663 } 5664 #endif 5665 5666 if (callchain_param.enabled) { 5667 if (!mmap_pages_user_set && geteuid() == 0) 5668 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4; 5669 5670 symbol_conf.use_callchain = true; 5671 } 5672 5673 if (trace.evlist->core.nr_entries > 0) { 5674 bool use_btf = false; 5675 5676 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler); 5677 if (evlist__set_syscall_tp_fields(trace.evlist, &use_btf)) { 5678 perror("failed to set syscalls:* tracepoint fields"); 5679 goto out; 5680 } 5681 5682 if (use_btf) 5683 trace__load_vmlinux_btf(&trace); 5684 } 5685 5686 /* 5687 * If we are augmenting syscalls, then combine what we put in the 5688 * __augmented_syscalls__ BPF map with what is in the 5689 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF, 5690 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit. 5691 * 5692 * We'll switch to look at two BPF maps, one for sys_enter and the 5693 * other for sys_exit when we start augmenting the sys_exit paths with 5694 * buffers that are being copied from kernel to userspace, think 'read' 5695 * syscall. 5696 */ 5697 if (trace.syscalls.events.bpf_output) { 5698 evlist__for_each_entry(trace.evlist, evsel) { 5699 bool raw_syscalls_sys_exit = evsel__name_is(evsel, "raw_syscalls:sys_exit"); 5700 5701 if (raw_syscalls_sys_exit) { 5702 trace.raw_augmented_syscalls = true; 5703 goto init_augmented_syscall_tp; 5704 } 5705 5706 if (trace.syscalls.events.bpf_output->priv == NULL && 5707 strstr(evsel__name(evsel), "syscalls:sys_enter")) { 5708 struct evsel *augmented = trace.syscalls.events.bpf_output; 5709 if (evsel__init_augmented_syscall_tp(augmented, evsel) || 5710 evsel__init_augmented_syscall_tp_args(augmented)) 5711 goto out; 5712 /* 5713 * Augmented is __augmented_syscalls__ BPF_OUTPUT event 5714 * Above we made sure we can get from the payload the tp fields 5715 * that we get from syscalls:sys_enter tracefs format file. 5716 */ 5717 augmented->handler = trace__sys_enter; 5718 /* 5719 * Now we do the same for the *syscalls:sys_enter event so that 5720 * if we handle it directly, i.e. if the BPF prog returns 0 so 5721 * as not to filter it, then we'll handle it just like we would 5722 * for the BPF_OUTPUT one: 5723 */ 5724 if (evsel__init_augmented_syscall_tp(evsel, evsel) || 5725 evsel__init_augmented_syscall_tp_args(evsel)) 5726 goto out; 5727 evsel->handler = trace__sys_enter; 5728 } 5729 5730 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) { 5731 struct syscall_tp *sc; 5732 init_augmented_syscall_tp: 5733 if (evsel__init_augmented_syscall_tp(evsel, evsel)) 5734 goto out; 5735 sc = __evsel__syscall_tp(evsel); 5736 /* 5737 * For now with BPF raw_augmented we hook into 5738 * raw_syscalls:sys_enter and there we get all 5739 * 6 syscall args plus the tracepoint common 5740 * fields and the syscall_nr (another long). 5741 * So we check if that is the case and if so 5742 * don't look after the sc->args_size but 5743 * always after the full raw_syscalls:sys_enter 5744 * payload, which is fixed. 5745 * 5746 * We'll revisit this later to pass 5747 * s->args_size to the BPF augmenter (now 5748 * tools/perf/examples/bpf/augmented_raw_syscalls.c, 5749 * so that it copies only what we need for each 5750 * syscall, like what happens when we use 5751 * syscalls:sys_enter_NAME, so that we reduce 5752 * the kernel/userspace traffic to just what is 5753 * needed for each syscall. 5754 */ 5755 if (trace.raw_augmented_syscalls) 5756 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; 5757 evsel__init_augmented_syscall_tp_ret(evsel); 5758 evsel->handler = trace__sys_exit; 5759 } 5760 } 5761 } 5762 5763 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) { 5764 err = trace__record(&trace, argc-1, &argv[1]); 5765 goto out; 5766 } 5767 5768 /* Using just --errno-summary will trigger --summary */ 5769 if (trace.errno_summary && !trace.summary && !trace.summary_only) 5770 trace.summary_only = true; 5771 5772 /* summary_only implies summary option, but don't overwrite summary if set */ 5773 if (trace.summary_only) 5774 trace.summary = trace.summary_only; 5775 5776 /* Keep exited threads, otherwise information might be lost for summary */ 5777 if (trace.summary) { 5778 symbol_conf.keep_exited_threads = true; 5779 if (trace.summary_mode == SUMMARY__NONE) 5780 trace.summary_mode = SUMMARY__BY_THREAD; 5781 5782 if (!trace.summary_bpf && trace.summary_mode == SUMMARY__BY_CGROUP) { 5783 pr_err("Error: --summary-mode=cgroup only works with --bpf-summary\n"); 5784 err = -EINVAL; 5785 goto out; 5786 } 5787 } 5788 5789 if (output_name != NULL) { 5790 err = trace__open_output(&trace, output_name); 5791 if (err < 0) { 5792 perror("failed to create output file"); 5793 goto out; 5794 } 5795 } 5796 5797 err = evswitch__init(&trace.evswitch, trace.evlist, stderr); 5798 if (err) 5799 goto out_close; 5800 5801 err = target__validate(&trace.opts.target); 5802 if (err) { 5803 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5804 fprintf(trace.output, "%s", bf); 5805 goto out_close; 5806 } 5807 5808 if (trace.uid_str) { 5809 uid_t uid = parse_uid(trace.uid_str); 5810 5811 if (uid == UINT_MAX) { 5812 ui__error("Invalid User: %s", trace.uid_str); 5813 err = -EINVAL; 5814 goto out_close; 5815 } 5816 err = parse_uid_filter(trace.evlist, uid); 5817 if (err) 5818 goto out_close; 5819 5820 trace.opts.target.system_wide = true; 5821 } 5822 5823 if (!argc && target__none(&trace.opts.target)) 5824 trace.opts.target.system_wide = true; 5825 5826 if (input_name) 5827 err = trace__replay(&trace); 5828 else 5829 err = trace__run(&trace, argc, argv); 5830 5831 out_close: 5832 if (output_name != NULL) 5833 fclose(trace.output); 5834 out: 5835 trace__exit(&trace); 5836 #ifdef HAVE_BPF_SKEL 5837 augmented_raw_syscalls_bpf__destroy(trace.skel); 5838 #endif 5839 return err; 5840 } 5841