1 /* 2 * builtin-trace.c 3 * 4 * Builtin 'trace' command: 5 * 6 * Display a continuously updated trace of any workload, CPU, specific PID, 7 * system wide, etc. Default format is loosely strace like, but any other 8 * event may be specified using --event. 9 * 10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 11 * 12 * Initially based on the 'trace' prototype by Thomas Gleixner: 13 * 14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'") 15 */ 16 17 #include "util/record.h" 18 #include <api/fs/tracing_path.h> 19 #ifdef HAVE_LIBBPF_SUPPORT 20 #include <bpf/bpf.h> 21 #include <bpf/libbpf.h> 22 #include <bpf/btf.h> 23 #ifdef HAVE_BPF_SKEL 24 #include "bpf_skel/augmented_raw_syscalls.skel.h" 25 #endif 26 #endif 27 #include "util/bpf_map.h" 28 #include "util/rlimit.h" 29 #include "builtin.h" 30 #include "util/cgroup.h" 31 #include "util/color.h" 32 #include "util/config.h" 33 #include "util/debug.h" 34 #include "util/dso.h" 35 #include "util/env.h" 36 #include "util/event.h" 37 #include "util/evsel.h" 38 #include "util/evsel_fprintf.h" 39 #include "util/synthetic-events.h" 40 #include "util/evlist.h" 41 #include "util/evswitch.h" 42 #include "util/hashmap.h" 43 #include "util/mmap.h" 44 #include <subcmd/pager.h> 45 #include <subcmd/exec-cmd.h> 46 #include "util/machine.h" 47 #include "util/map.h" 48 #include "util/symbol.h" 49 #include "util/path.h" 50 #include "util/session.h" 51 #include "util/thread.h" 52 #include <subcmd/parse-options.h> 53 #include "util/strlist.h" 54 #include "util/intlist.h" 55 #include "util/thread_map.h" 56 #include "util/stat.h" 57 #include "util/tool.h" 58 #include "util/trace.h" 59 #include "util/util.h" 60 #include "trace/beauty/beauty.h" 61 #include "trace-event.h" 62 #include "util/parse-events.h" 63 #include "util/tracepoint.h" 64 #include "callchain.h" 65 #include "print_binary.h" 66 #include "string2.h" 67 #include "syscalltbl.h" 68 #include "../perf.h" 69 #include "trace_augment.h" 70 #include "dwarf-regs.h" 71 72 #include <errno.h> 73 #include <inttypes.h> 74 #include <poll.h> 75 #include <signal.h> 76 #include <stdlib.h> 77 #include <string.h> 78 #include <linux/err.h> 79 #include <linux/filter.h> 80 #include <linux/kernel.h> 81 #include <linux/list_sort.h> 82 #include <linux/random.h> 83 #include <linux/stringify.h> 84 #include <linux/time64.h> 85 #include <linux/zalloc.h> 86 #include <fcntl.h> 87 #include <sys/sysmacros.h> 88 89 #include <linux/ctype.h> 90 #include <perf/mmap.h> 91 #include <tools/libc_compat.h> 92 93 #ifdef HAVE_LIBTRACEEVENT 94 #include <event-parse.h> 95 #endif 96 97 #ifndef O_CLOEXEC 98 # define O_CLOEXEC 02000000 99 #endif 100 101 #ifndef F_LINUX_SPECIFIC_BASE 102 # define F_LINUX_SPECIFIC_BASE 1024 103 #endif 104 105 #define RAW_SYSCALL_ARGS_NUM 6 106 107 /* 108 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100 109 * 110 * We have to explicitely mark the direction of the flow of data, if from the 111 * kernel to user space or the other way around, since the BPF collector we 112 * have so far copies only from user to kernel space, mark the arguments that 113 * go that direction, so that we don´t end up collecting the previous contents 114 * for syscall args that goes from kernel to user space. 115 */ 116 struct syscall_arg_fmt { 117 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 118 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val); 119 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val); 120 void *parm; 121 const char *name; 122 u16 nr_entries; // for arrays 123 bool from_user; 124 bool show_zero; 125 #ifdef HAVE_LIBBPF_SUPPORT 126 const struct btf_type *type; 127 int type_id; /* used in btf_dump */ 128 #endif 129 }; 130 131 struct syscall_fmt { 132 const char *name; 133 const char *alias; 134 struct { 135 const char *sys_enter, 136 *sys_exit; 137 } bpf_prog_name; 138 struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM]; 139 u8 nr_args; 140 bool errpid; 141 bool timeout; 142 bool hexret; 143 }; 144 145 struct trace { 146 struct perf_tool tool; 147 struct { 148 /** Sorted sycall numbers used by the trace. */ 149 struct syscall **table; 150 /** Size of table. */ 151 size_t table_size; 152 struct { 153 struct evsel *sys_enter, 154 *sys_exit, 155 *bpf_output; 156 } events; 157 } syscalls; 158 #ifdef HAVE_BPF_SKEL 159 struct augmented_raw_syscalls_bpf *skel; 160 #endif 161 #ifdef HAVE_LIBBPF_SUPPORT 162 struct btf *btf; 163 #endif 164 struct record_opts opts; 165 struct evlist *evlist; 166 struct machine *host; 167 struct thread *current; 168 struct cgroup *cgroup; 169 u64 base_time; 170 FILE *output; 171 unsigned long nr_events; 172 unsigned long nr_events_printed; 173 unsigned long max_events; 174 struct evswitch evswitch; 175 struct strlist *ev_qualifier; 176 struct { 177 size_t nr; 178 int *entries; 179 } ev_qualifier_ids; 180 struct { 181 size_t nr; 182 pid_t *entries; 183 struct bpf_map *map; 184 } filter_pids; 185 /* 186 * TODO: The map is from an ID (aka system call number) to struct 187 * syscall_stats. If there is >1 e_machine, such as i386 and x86-64 188 * processes, then the stats here will gather wrong the statistics for 189 * the non EM_HOST system calls. A fix would be to add the e_machine 190 * into the key, but this would make the code inconsistent with the 191 * per-thread version. 192 */ 193 struct hashmap *syscall_stats; 194 double duration_filter; 195 double runtime_ms; 196 unsigned long pfmaj, pfmin; 197 struct { 198 u64 vfs_getname, 199 proc_getname; 200 } stats; 201 unsigned int max_stack; 202 unsigned int min_stack; 203 enum trace_summary_mode summary_mode; 204 int raw_augmented_syscalls_args_size; 205 bool raw_augmented_syscalls; 206 bool fd_path_disabled; 207 bool sort_events; 208 bool not_ev_qualifier; 209 bool live; 210 bool full_time; 211 bool sched; 212 bool multiple_threads; 213 bool summary; 214 bool summary_only; 215 bool errno_summary; 216 bool failure_only; 217 bool show_comm; 218 bool print_sample; 219 bool show_tool_stats; 220 bool trace_syscalls; 221 bool libtraceevent_print; 222 bool kernel_syscallchains; 223 s16 args_alignment; 224 bool show_tstamp; 225 bool show_duration; 226 bool show_zeros; 227 bool show_arg_names; 228 bool show_string_prefix; 229 bool force; 230 bool vfs_getname; 231 bool force_btf; 232 bool summary_bpf; 233 int trace_pgfaults; 234 char *perfconfig_events; 235 struct { 236 struct ordered_events data; 237 u64 last; 238 } oe; 239 }; 240 241 static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused) 242 { 243 #ifdef HAVE_LIBBPF_SUPPORT 244 if (trace->btf != NULL) 245 return; 246 247 trace->btf = btf__load_vmlinux_btf(); 248 if (verbose > 0) { 249 fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" : 250 "Failed to load vmlinux BTF\n"); 251 } 252 #endif 253 } 254 255 struct tp_field { 256 int offset; 257 union { 258 u64 (*integer)(struct tp_field *field, struct perf_sample *sample); 259 void *(*pointer)(struct tp_field *field, struct perf_sample *sample); 260 }; 261 }; 262 263 #define TP_UINT_FIELD(bits) \ 264 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \ 265 { \ 266 u##bits value; \ 267 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 268 return value; \ 269 } 270 271 TP_UINT_FIELD(8); 272 TP_UINT_FIELD(16); 273 TP_UINT_FIELD(32); 274 TP_UINT_FIELD(64); 275 276 #define TP_UINT_FIELD__SWAPPED(bits) \ 277 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \ 278 { \ 279 u##bits value; \ 280 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 281 return bswap_##bits(value);\ 282 } 283 284 TP_UINT_FIELD__SWAPPED(16); 285 TP_UINT_FIELD__SWAPPED(32); 286 TP_UINT_FIELD__SWAPPED(64); 287 288 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap) 289 { 290 field->offset = offset; 291 292 switch (size) { 293 case 1: 294 field->integer = tp_field__u8; 295 break; 296 case 2: 297 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; 298 break; 299 case 4: 300 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; 301 break; 302 case 8: 303 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; 304 break; 305 default: 306 return -1; 307 } 308 309 return 0; 310 } 311 312 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap) 313 { 314 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); 315 } 316 317 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample) 318 { 319 return sample->raw_data + field->offset; 320 } 321 322 static int __tp_field__init_ptr(struct tp_field *field, int offset) 323 { 324 field->offset = offset; 325 field->pointer = tp_field__ptr; 326 return 0; 327 } 328 329 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field) 330 { 331 return __tp_field__init_ptr(field, format_field->offset); 332 } 333 334 struct syscall_tp { 335 struct tp_field id; 336 union { 337 struct tp_field args, ret; 338 }; 339 }; 340 341 /* 342 * The evsel->priv as used by 'perf trace' 343 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME 344 * fmt: for all the other tracepoints 345 */ 346 struct evsel_trace { 347 struct syscall_tp sc; 348 struct syscall_arg_fmt *fmt; 349 }; 350 351 static struct evsel_trace *evsel_trace__new(void) 352 { 353 return zalloc(sizeof(struct evsel_trace)); 354 } 355 356 static void evsel_trace__delete(struct evsel_trace *et) 357 { 358 if (et == NULL) 359 return; 360 361 zfree(&et->fmt); 362 free(et); 363 } 364 365 /* 366 * Used with raw_syscalls:sys_{enter,exit} and with the 367 * syscalls:sys_{enter,exit}_SYSCALL tracepoints 368 */ 369 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel) 370 { 371 struct evsel_trace *et = evsel->priv; 372 373 return &et->sc; 374 } 375 376 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel) 377 { 378 if (evsel->priv == NULL) { 379 evsel->priv = evsel_trace__new(); 380 if (evsel->priv == NULL) 381 return NULL; 382 } 383 384 return __evsel__syscall_tp(evsel); 385 } 386 387 /* 388 * Used with all the other tracepoints. 389 */ 390 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel) 391 { 392 struct evsel_trace *et = evsel->priv; 393 394 return et->fmt; 395 } 396 397 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel) 398 { 399 struct evsel_trace *et = evsel->priv; 400 401 if (evsel->priv == NULL) { 402 et = evsel->priv = evsel_trace__new(); 403 404 if (et == NULL) 405 return NULL; 406 } 407 408 if (et->fmt == NULL) { 409 const struct tep_event *tp_format = evsel__tp_format(evsel); 410 411 if (tp_format == NULL) 412 goto out_delete; 413 414 et->fmt = calloc(tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt)); 415 if (et->fmt == NULL) 416 goto out_delete; 417 } 418 419 return __evsel__syscall_arg_fmt(evsel); 420 421 out_delete: 422 evsel_trace__delete(evsel->priv); 423 evsel->priv = NULL; 424 return NULL; 425 } 426 427 static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name) 428 { 429 struct tep_format_field *format_field = evsel__field(evsel, name); 430 431 if (format_field == NULL) 432 return -1; 433 434 return tp_field__init_uint(field, format_field, evsel->needs_swap); 435 } 436 437 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \ 438 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 439 evsel__init_tp_uint_field(evsel, &sc->name, #name); }) 440 441 static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name) 442 { 443 struct tep_format_field *format_field = evsel__field(evsel, name); 444 445 if (format_field == NULL) 446 return -1; 447 448 return tp_field__init_ptr(field, format_field); 449 } 450 451 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \ 452 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 453 evsel__init_tp_ptr_field(evsel, &sc->name, #name); }) 454 455 static void evsel__delete_priv(struct evsel *evsel) 456 { 457 zfree(&evsel->priv); 458 evsel__delete(evsel); 459 } 460 461 static int evsel__init_syscall_tp(struct evsel *evsel) 462 { 463 struct syscall_tp *sc = evsel__syscall_tp(evsel); 464 465 if (sc != NULL) { 466 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") && 467 evsel__init_tp_uint_field(evsel, &sc->id, "nr")) 468 return -ENOENT; 469 470 return 0; 471 } 472 473 return -ENOMEM; 474 } 475 476 static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp) 477 { 478 struct syscall_tp *sc = evsel__syscall_tp(evsel); 479 480 if (sc != NULL) { 481 struct tep_format_field *syscall_id = evsel__field(tp, "id"); 482 if (syscall_id == NULL) 483 syscall_id = evsel__field(tp, "__syscall_nr"); 484 if (syscall_id == NULL || 485 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) 486 return -EINVAL; 487 488 return 0; 489 } 490 491 return -ENOMEM; 492 } 493 494 static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel) 495 { 496 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 497 498 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); 499 } 500 501 static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel) 502 { 503 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 504 505 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap); 506 } 507 508 static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler) 509 { 510 if (evsel__syscall_tp(evsel) != NULL) { 511 if (perf_evsel__init_sc_tp_uint_field(evsel, id)) 512 return -ENOENT; 513 514 evsel->handler = handler; 515 return 0; 516 } 517 518 return -ENOMEM; 519 } 520 521 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler) 522 { 523 struct evsel *evsel = evsel__newtp("raw_syscalls", direction); 524 525 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */ 526 if (IS_ERR(evsel)) 527 evsel = evsel__newtp("syscalls", direction); 528 529 if (IS_ERR(evsel)) 530 return NULL; 531 532 if (evsel__init_raw_syscall_tp(evsel, handler)) 533 goto out_delete; 534 535 return evsel; 536 537 out_delete: 538 evsel__delete_priv(evsel); 539 return NULL; 540 } 541 542 #define perf_evsel__sc_tp_uint(evsel, name, sample) \ 543 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 544 fields->name.integer(&fields->name, sample); }) 545 546 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \ 547 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 548 fields->name.pointer(&fields->name, sample); }) 549 550 size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val) 551 { 552 int idx = val - sa->offset; 553 554 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 555 size_t printed = scnprintf(bf, size, intfmt, val); 556 if (show_suffix) 557 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 558 return printed; 559 } 560 561 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : ""); 562 } 563 564 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 565 { 566 int idx = val - sa->offset; 567 568 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 569 size_t printed = scnprintf(bf, size, intfmt, val); 570 if (show_prefix) 571 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 572 return printed; 573 } 574 575 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 576 } 577 578 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size, 579 const char *intfmt, 580 struct syscall_arg *arg) 581 { 582 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val); 583 } 584 585 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, 586 struct syscall_arg *arg) 587 { 588 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg); 589 } 590 591 #define SCA_STRARRAY syscall_arg__scnprintf_strarray 592 593 bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 594 { 595 return strarray__strtoul(arg->parm, bf, size, ret); 596 } 597 598 bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 599 { 600 return strarray__strtoul_flags(arg->parm, bf, size, ret); 601 } 602 603 bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 604 { 605 return strarrays__strtoul(arg->parm, bf, size, ret); 606 } 607 608 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg) 609 { 610 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val); 611 } 612 613 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 614 { 615 size_t printed; 616 int i; 617 618 for (i = 0; i < sas->nr_entries; ++i) { 619 struct strarray *sa = sas->entries[i]; 620 int idx = val - sa->offset; 621 622 if (idx >= 0 && idx < sa->nr_entries) { 623 if (sa->entries[idx] == NULL) 624 break; 625 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 626 } 627 } 628 629 printed = scnprintf(bf, size, intfmt, val); 630 if (show_prefix) 631 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); 632 return printed; 633 } 634 635 bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret) 636 { 637 int i; 638 639 for (i = 0; i < sa->nr_entries; ++i) { 640 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') { 641 *ret = sa->offset + i; 642 return true; 643 } 644 } 645 646 return false; 647 } 648 649 bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret) 650 { 651 u64 val = 0; 652 char *tok = bf, *sep, *end; 653 654 *ret = 0; 655 656 while (size != 0) { 657 int toklen = size; 658 659 sep = memchr(tok, '|', size); 660 if (sep != NULL) { 661 size -= sep - tok + 1; 662 663 end = sep - 1; 664 while (end > tok && isspace(*end)) 665 --end; 666 667 toklen = end - tok + 1; 668 } 669 670 while (isspace(*tok)) 671 ++tok; 672 673 if (isalpha(*tok) || *tok == '_') { 674 if (!strarray__strtoul(sa, tok, toklen, &val)) 675 return false; 676 } else 677 val = strtoul(tok, NULL, 0); 678 679 *ret |= (1 << (val - 1)); 680 681 if (sep == NULL) 682 break; 683 tok = sep + 1; 684 } 685 686 return true; 687 } 688 689 bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret) 690 { 691 int i; 692 693 for (i = 0; i < sas->nr_entries; ++i) { 694 struct strarray *sa = sas->entries[i]; 695 696 if (strarray__strtoul(sa, bf, size, ret)) 697 return true; 698 } 699 700 return false; 701 } 702 703 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size, 704 struct syscall_arg *arg) 705 { 706 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val); 707 } 708 709 #ifndef AT_FDCWD 710 #define AT_FDCWD -100 711 #endif 712 713 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size, 714 struct syscall_arg *arg) 715 { 716 int fd = arg->val; 717 const char *prefix = "AT_FD"; 718 719 if (fd == AT_FDCWD) 720 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD"); 721 722 return syscall_arg__scnprintf_fd(bf, size, arg); 723 } 724 725 #define SCA_FDAT syscall_arg__scnprintf_fd_at 726 727 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 728 struct syscall_arg *arg); 729 730 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd 731 732 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg) 733 { 734 return scnprintf(bf, size, "%#lx", arg->val); 735 } 736 737 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg) 738 { 739 if (arg->val == 0) 740 return scnprintf(bf, size, "NULL"); 741 return syscall_arg__scnprintf_hex(bf, size, arg); 742 } 743 744 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg) 745 { 746 return scnprintf(bf, size, "%d", arg->val); 747 } 748 749 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg) 750 { 751 return scnprintf(bf, size, "%ld", arg->val); 752 } 753 754 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg) 755 { 756 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can 757 // fill missing comms using thread__set_comm()... 758 // here or in a special syscall_arg__scnprintf_pid_sched_tp... 759 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val); 760 } 761 762 #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array 763 764 static const char *bpf_cmd[] = { 765 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM", 766 "MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH", 767 "PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID", 768 "PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD", 769 "PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID", 770 "TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE", 771 "BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH", 772 "MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE", 773 "LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE", 774 "LINK_DETACH", "PROG_BIND_MAP", 775 }; 776 static DEFINE_STRARRAY(bpf_cmd, "BPF_"); 777 778 static const char *fsmount_flags[] = { 779 [1] = "CLOEXEC", 780 }; 781 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_"); 782 783 #include "trace/beauty/generated/fsconfig_arrays.c" 784 785 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_"); 786 787 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", }; 788 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1); 789 790 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", }; 791 static DEFINE_STRARRAY(itimers, "ITIMER_"); 792 793 static const char *keyctl_options[] = { 794 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN", 795 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ", 796 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT", 797 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT", 798 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT", 799 }; 800 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_"); 801 802 static const char *whences[] = { "SET", "CUR", "END", 803 #ifdef SEEK_DATA 804 "DATA", 805 #endif 806 #ifdef SEEK_HOLE 807 "HOLE", 808 #endif 809 }; 810 static DEFINE_STRARRAY(whences, "SEEK_"); 811 812 static const char *fcntl_cmds[] = { 813 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK", 814 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64", 815 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX", 816 "GETOWNER_UIDS", 817 }; 818 static DEFINE_STRARRAY(fcntl_cmds, "F_"); 819 820 static const char *fcntl_linux_specific_cmds[] = { 821 "SETLEASE", "GETLEASE", "NOTIFY", "DUPFD_QUERY", [5] = "CANCELLK", "DUPFD_CLOEXEC", 822 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS", 823 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT", 824 }; 825 826 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE); 827 828 static struct strarray *fcntl_cmds_arrays[] = { 829 &strarray__fcntl_cmds, 830 &strarray__fcntl_linux_specific_cmds, 831 }; 832 833 static DEFINE_STRARRAYS(fcntl_cmds_arrays); 834 835 static const char *rlimit_resources[] = { 836 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE", 837 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO", 838 "RTTIME", 839 }; 840 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_"); 841 842 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", }; 843 static DEFINE_STRARRAY(sighow, "SIG_"); 844 845 static const char *clockid[] = { 846 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID", 847 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME", 848 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI" 849 }; 850 static DEFINE_STRARRAY(clockid, "CLOCK_"); 851 852 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size, 853 struct syscall_arg *arg) 854 { 855 bool show_prefix = arg->show_string_prefix; 856 const char *suffix = "_OK"; 857 size_t printed = 0; 858 int mode = arg->val; 859 860 if (mode == F_OK) /* 0 */ 861 return scnprintf(bf, size, "F%s", show_prefix ? suffix : ""); 862 #define P_MODE(n) \ 863 if (mode & n##_OK) { \ 864 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ 865 mode &= ~n##_OK; \ 866 } 867 868 P_MODE(R); 869 P_MODE(W); 870 P_MODE(X); 871 #undef P_MODE 872 873 if (mode) 874 printed += scnprintf(bf + printed, size - printed, "|%#x", mode); 875 876 return printed; 877 } 878 879 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode 880 881 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 882 struct syscall_arg *arg); 883 884 #define SCA_FILENAME syscall_arg__scnprintf_filename 885 886 // 'argname' is just documentational at this point, to remove the previous comment with that info 887 #define SCA_FILENAME_FROM_USER(argname) \ 888 { .scnprintf = SCA_FILENAME, \ 889 .from_user = true, } 890 891 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg); 892 893 #define SCA_BUF syscall_arg__scnprintf_buf 894 895 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size, 896 struct syscall_arg *arg) 897 { 898 bool show_prefix = arg->show_string_prefix; 899 const char *prefix = "O_"; 900 int printed = 0, flags = arg->val; 901 902 #define P_FLAG(n) \ 903 if (flags & O_##n) { \ 904 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 905 flags &= ~O_##n; \ 906 } 907 908 P_FLAG(CLOEXEC); 909 P_FLAG(NONBLOCK); 910 #undef P_FLAG 911 912 if (flags) 913 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 914 915 return printed; 916 } 917 918 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags 919 920 #ifndef GRND_NONBLOCK 921 #define GRND_NONBLOCK 0x0001 922 #endif 923 #ifndef GRND_RANDOM 924 #define GRND_RANDOM 0x0002 925 #endif 926 927 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size, 928 struct syscall_arg *arg) 929 { 930 bool show_prefix = arg->show_string_prefix; 931 const char *prefix = "GRND_"; 932 int printed = 0, flags = arg->val; 933 934 #define P_FLAG(n) \ 935 if (flags & GRND_##n) { \ 936 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 937 flags &= ~GRND_##n; \ 938 } 939 940 P_FLAG(RANDOM); 941 P_FLAG(NONBLOCK); 942 #undef P_FLAG 943 944 if (flags) 945 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 946 947 return printed; 948 } 949 950 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags 951 952 #ifdef HAVE_LIBBPF_SUPPORT 953 static void syscall_arg_fmt__cache_btf_enum(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type) 954 { 955 int id; 956 957 type = strstr(type, "enum "); 958 if (type == NULL) 959 return; 960 961 type += 5; // skip "enum " to get the enumeration name 962 963 id = btf__find_by_name(btf, type); 964 if (id < 0) 965 return; 966 967 arg_fmt->type = btf__type_by_id(btf, id); 968 } 969 970 static bool syscall_arg__strtoul_btf_enum(char *bf, size_t size, struct syscall_arg *arg, u64 *val) 971 { 972 const struct btf_type *bt = arg->fmt->type; 973 struct btf *btf = arg->trace->btf; 974 struct btf_enum *be = btf_enum(bt); 975 976 for (int i = 0; i < btf_vlen(bt); ++i, ++be) { 977 const char *name = btf__name_by_offset(btf, be->name_off); 978 int max_len = max(size, strlen(name)); 979 980 if (strncmp(name, bf, max_len) == 0) { 981 *val = be->val; 982 return true; 983 } 984 } 985 986 return false; 987 } 988 989 static bool syscall_arg__strtoul_btf_type(char *bf, size_t size, struct syscall_arg *arg, u64 *val) 990 { 991 const struct btf_type *bt; 992 char *type = arg->type_name; 993 struct btf *btf; 994 995 trace__load_vmlinux_btf(arg->trace); 996 997 btf = arg->trace->btf; 998 if (btf == NULL) 999 return false; 1000 1001 if (arg->fmt->type == NULL) { 1002 // See if this is an enum 1003 syscall_arg_fmt__cache_btf_enum(arg->fmt, btf, type); 1004 } 1005 1006 // Now let's see if we have a BTF type resolved 1007 bt = arg->fmt->type; 1008 if (bt == NULL) 1009 return false; 1010 1011 // If it is an enum: 1012 if (btf_is_enum(arg->fmt->type)) 1013 return syscall_arg__strtoul_btf_enum(bf, size, arg, val); 1014 1015 return false; 1016 } 1017 1018 static size_t btf_enum_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, int val) 1019 { 1020 struct btf_enum *be = btf_enum(type); 1021 const int nr_entries = btf_vlen(type); 1022 1023 for (int i = 0; i < nr_entries; ++i, ++be) { 1024 if (be->val == val) { 1025 return scnprintf(bf, size, "%s", 1026 btf__name_by_offset(btf, be->name_off)); 1027 } 1028 } 1029 1030 return 0; 1031 } 1032 1033 struct trace_btf_dump_snprintf_ctx { 1034 char *bf; 1035 size_t printed, size; 1036 }; 1037 1038 static void trace__btf_dump_snprintf(void *vctx, const char *fmt, va_list args) 1039 { 1040 struct trace_btf_dump_snprintf_ctx *ctx = vctx; 1041 1042 ctx->printed += vscnprintf(ctx->bf + ctx->printed, ctx->size - ctx->printed, fmt, args); 1043 } 1044 1045 static size_t btf_struct_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, struct syscall_arg *arg) 1046 { 1047 struct trace_btf_dump_snprintf_ctx ctx = { 1048 .bf = bf, 1049 .size = size, 1050 }; 1051 struct augmented_arg *augmented_arg = arg->augmented.args; 1052 int type_id = arg->fmt->type_id, consumed; 1053 struct btf_dump *btf_dump; 1054 1055 LIBBPF_OPTS(btf_dump_opts, dump_opts); 1056 LIBBPF_OPTS(btf_dump_type_data_opts, dump_data_opts); 1057 1058 if (arg == NULL || arg->augmented.args == NULL) 1059 return 0; 1060 1061 dump_data_opts.compact = true; 1062 dump_data_opts.skip_names = !arg->trace->show_arg_names; 1063 1064 btf_dump = btf_dump__new(btf, trace__btf_dump_snprintf, &ctx, &dump_opts); 1065 if (btf_dump == NULL) 1066 return 0; 1067 1068 /* pretty print the struct data here */ 1069 if (btf_dump__dump_type_data(btf_dump, type_id, arg->augmented.args->value, type->size, &dump_data_opts) == 0) 1070 return 0; 1071 1072 consumed = sizeof(*augmented_arg) + augmented_arg->size; 1073 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1074 arg->augmented.size -= consumed; 1075 1076 btf_dump__free(btf_dump); 1077 1078 return ctx.printed; 1079 } 1080 1081 static size_t trace__btf_scnprintf(struct trace *trace, struct syscall_arg *arg, char *bf, 1082 size_t size, int val, char *type) 1083 { 1084 struct syscall_arg_fmt *arg_fmt = arg->fmt; 1085 1086 if (trace->btf == NULL) 1087 return 0; 1088 1089 if (arg_fmt->type == NULL) { 1090 // Check if this is an enum and if we have the BTF type for it. 1091 syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type); 1092 } 1093 1094 // Did we manage to find a BTF type for the syscall/tracepoint argument? 1095 if (arg_fmt->type == NULL) 1096 return 0; 1097 1098 if (btf_is_enum(arg_fmt->type)) 1099 return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val); 1100 else if (btf_is_struct(arg_fmt->type) || btf_is_union(arg_fmt->type)) 1101 return btf_struct_scnprintf(arg_fmt->type, trace->btf, bf, size, arg); 1102 1103 return 0; 1104 } 1105 1106 #else // HAVE_LIBBPF_SUPPORT 1107 static size_t trace__btf_scnprintf(struct trace *trace __maybe_unused, struct syscall_arg *arg __maybe_unused, 1108 char *bf __maybe_unused, size_t size __maybe_unused, int val __maybe_unused, 1109 char *type __maybe_unused) 1110 { 1111 return 0; 1112 } 1113 1114 static bool syscall_arg__strtoul_btf_type(char *bf __maybe_unused, size_t size __maybe_unused, 1115 struct syscall_arg *arg __maybe_unused, u64 *val __maybe_unused) 1116 { 1117 return false; 1118 } 1119 #endif // HAVE_LIBBPF_SUPPORT 1120 1121 #define STUL_BTF_TYPE syscall_arg__strtoul_btf_type 1122 1123 #define STRARRAY(name, array) \ 1124 { .scnprintf = SCA_STRARRAY, \ 1125 .strtoul = STUL_STRARRAY, \ 1126 .parm = &strarray__##array, } 1127 1128 #define STRARRAY_FLAGS(name, array) \ 1129 { .scnprintf = SCA_STRARRAY_FLAGS, \ 1130 .strtoul = STUL_STRARRAY_FLAGS, \ 1131 .parm = &strarray__##array, } 1132 1133 #include "trace/beauty/eventfd.c" 1134 #include "trace/beauty/futex_op.c" 1135 #include "trace/beauty/futex_val3.c" 1136 #include "trace/beauty/mmap.c" 1137 #include "trace/beauty/mode_t.c" 1138 #include "trace/beauty/msg_flags.c" 1139 #include "trace/beauty/open_flags.c" 1140 #include "trace/beauty/perf_event_open.c" 1141 #include "trace/beauty/pid.c" 1142 #include "trace/beauty/sched_policy.c" 1143 #include "trace/beauty/seccomp.c" 1144 #include "trace/beauty/signum.c" 1145 #include "trace/beauty/socket_type.c" 1146 #include "trace/beauty/waitid_options.c" 1147 1148 static const struct syscall_fmt syscall_fmts[] = { 1149 { .name = "access", 1150 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 1151 { .name = "arch_prctl", 1152 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ }, 1153 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, }, 1154 { .name = "bind", 1155 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 1156 [1] = SCA_SOCKADDR_FROM_USER(umyaddr), 1157 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 1158 { .name = "bpf", 1159 .arg = { [0] = STRARRAY(cmd, bpf_cmd), 1160 [1] = { .from_user = true /* attr */, }, } }, 1161 { .name = "brk", .hexret = true, 1162 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, }, 1163 { .name = "clock_gettime", 1164 .arg = { [0] = STRARRAY(clk_id, clockid), }, }, 1165 { .name = "clock_nanosleep", 1166 .arg = { [2] = SCA_TIMESPEC_FROM_USER(req), }, }, 1167 { .name = "clone", .errpid = true, .nr_args = 5, 1168 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, }, 1169 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, }, 1170 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, }, 1171 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, }, 1172 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, }, 1173 { .name = "close", 1174 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, }, 1175 { .name = "connect", 1176 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 1177 [1] = SCA_SOCKADDR_FROM_USER(servaddr), 1178 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 1179 { .name = "epoll_ctl", 1180 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, }, 1181 { .name = "eventfd2", 1182 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, }, 1183 { .name = "faccessat", 1184 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1185 [1] = SCA_FILENAME_FROM_USER(pathname), 1186 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 1187 { .name = "faccessat2", 1188 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1189 [1] = SCA_FILENAME_FROM_USER(pathname), 1190 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, 1191 [3] = { .scnprintf = SCA_FACCESSAT2_FLAGS, /* flags */ }, }, }, 1192 { .name = "fchmodat", 1193 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1194 { .name = "fchownat", 1195 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1196 { .name = "fcntl", 1197 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */ 1198 .strtoul = STUL_STRARRAYS, 1199 .parm = &strarrays__fcntl_cmds_arrays, 1200 .show_zero = true, }, 1201 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, }, 1202 { .name = "flock", 1203 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, }, 1204 { .name = "fsconfig", 1205 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, }, 1206 { .name = "fsmount", 1207 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags), 1208 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, }, 1209 { .name = "fspick", 1210 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1211 [1] = SCA_FILENAME_FROM_USER(path), 1212 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, }, 1213 { .name = "fstat", .alias = "newfstat", }, 1214 { .name = "futex", 1215 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, 1216 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, }, 1217 { .name = "futimesat", 1218 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1219 { .name = "getitimer", 1220 .arg = { [0] = STRARRAY(which, itimers), }, }, 1221 { .name = "getpid", .errpid = true, }, 1222 { .name = "getpgid", .errpid = true, }, 1223 { .name = "getppid", .errpid = true, }, 1224 { .name = "getrandom", 1225 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, }, 1226 { .name = "getrlimit", 1227 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 1228 { .name = "getsockopt", 1229 .arg = { [1] = STRARRAY(level, socket_level), }, }, 1230 { .name = "gettid", .errpid = true, }, 1231 { .name = "ioctl", 1232 .arg = { 1233 #if defined(__i386__) || defined(__x86_64__) 1234 /* 1235 * FIXME: Make this available to all arches. 1236 */ 1237 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ }, 1238 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 1239 #else 1240 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 1241 #endif 1242 { .name = "kcmp", .nr_args = 5, 1243 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, }, 1244 [1] = { .name = "pid2", .scnprintf = SCA_PID, }, 1245 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, }, 1246 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, }, 1247 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, }, 1248 { .name = "keyctl", 1249 .arg = { [0] = STRARRAY(option, keyctl_options), }, }, 1250 { .name = "kill", 1251 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1252 { .name = "linkat", 1253 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1254 { .name = "lseek", 1255 .arg = { [2] = STRARRAY(whence, whences), }, }, 1256 { .name = "lstat", .alias = "newlstat", }, 1257 { .name = "madvise", 1258 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1259 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, }, 1260 { .name = "mkdirat", 1261 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1262 { .name = "mknodat", 1263 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1264 { .name = "mmap", .hexret = true, 1265 /* The standard mmap maps to old_mmap on s390x */ 1266 #if defined(__s390x__) 1267 .alias = "old_mmap", 1268 #endif 1269 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, 1270 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ 1271 .strtoul = STUL_STRARRAY_FLAGS, 1272 .parm = &strarray__mmap_flags, }, 1273 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, }, 1274 { .name = "mount", 1275 .arg = { [0] = SCA_FILENAME_FROM_USER(devname), 1276 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */ 1277 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, }, 1278 { .name = "move_mount", 1279 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ }, 1280 [1] = SCA_FILENAME_FROM_USER(pathname), 1281 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ }, 1282 [3] = SCA_FILENAME_FROM_USER(pathname), 1283 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, }, 1284 { .name = "mprotect", 1285 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1286 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, }, }, 1287 { .name = "mq_unlink", 1288 .arg = { [0] = SCA_FILENAME_FROM_USER(u_name), }, }, 1289 { .name = "mremap", .hexret = true, 1290 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, }, 1291 { .name = "name_to_handle_at", 1292 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1293 { .name = "nanosleep", 1294 .arg = { [0] = SCA_TIMESPEC_FROM_USER(req), }, }, 1295 { .name = "newfstatat", .alias = "fstatat", 1296 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1297 [1] = SCA_FILENAME_FROM_USER(pathname), 1298 [3] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, 1299 { .name = "open", 1300 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1301 { .name = "open_by_handle_at", 1302 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1303 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1304 { .name = "openat", 1305 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1306 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1307 { .name = "perf_event_open", 1308 .arg = { [0] = SCA_PERF_ATTR_FROM_USER(attr), 1309 [2] = { .scnprintf = SCA_INT, /* cpu */ }, 1310 [3] = { .scnprintf = SCA_FD, /* group_fd */ }, 1311 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, }, 1312 { .name = "pipe2", 1313 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, }, 1314 { .name = "pkey_alloc", 1315 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, }, 1316 { .name = "pkey_free", 1317 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, }, 1318 { .name = "pkey_mprotect", 1319 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1320 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, 1321 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, }, 1322 { .name = "poll", .timeout = true, }, 1323 { .name = "ppoll", .timeout = true, }, 1324 { .name = "prctl", 1325 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ 1326 .strtoul = STUL_STRARRAY, 1327 .parm = &strarray__prctl_options, }, 1328 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ }, 1329 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, }, 1330 { .name = "pread", .alias = "pread64", }, 1331 { .name = "preadv", .alias = "pread", }, 1332 { .name = "prlimit64", 1333 .arg = { [1] = STRARRAY(resource, rlimit_resources), 1334 [2] = { .from_user = true /* new_rlim */, }, }, }, 1335 { .name = "pwrite", .alias = "pwrite64", }, 1336 { .name = "readlinkat", 1337 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1338 { .name = "recvfrom", 1339 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1340 { .name = "recvmmsg", 1341 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1342 { .name = "recvmsg", 1343 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1344 { .name = "renameat", 1345 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1346 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, }, 1347 { .name = "renameat2", 1348 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1349 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, 1350 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, }, 1351 { .name = "rseq", 1352 .arg = { [0] = { .from_user = true /* rseq */, }, }, }, 1353 { .name = "rt_sigaction", 1354 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1355 { .name = "rt_sigprocmask", 1356 .arg = { [0] = STRARRAY(how, sighow), }, }, 1357 { .name = "rt_sigqueueinfo", 1358 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1359 { .name = "rt_tgsigqueueinfo", 1360 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1361 { .name = "sched_setscheduler", 1362 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, }, 1363 { .name = "seccomp", 1364 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ }, 1365 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, }, 1366 { .name = "select", .timeout = true, }, 1367 { .name = "sendfile", .alias = "sendfile64", }, 1368 { .name = "sendmmsg", 1369 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1370 { .name = "sendmsg", 1371 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1372 { .name = "sendto", 1373 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, 1374 [4] = SCA_SOCKADDR_FROM_USER(addr), }, }, 1375 { .name = "set_robust_list", 1376 .arg = { [0] = { .from_user = true /* head */, }, }, }, 1377 { .name = "set_tid_address", .errpid = true, }, 1378 { .name = "setitimer", 1379 .arg = { [0] = STRARRAY(which, itimers), }, }, 1380 { .name = "setrlimit", 1381 .arg = { [0] = STRARRAY(resource, rlimit_resources), 1382 [1] = { .from_user = true /* rlim */, }, }, }, 1383 { .name = "setsockopt", 1384 .arg = { [1] = STRARRAY(level, socket_level), }, }, 1385 { .name = "socket", 1386 .arg = { [0] = STRARRAY(family, socket_families), 1387 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1388 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1389 { .name = "socketpair", 1390 .arg = { [0] = STRARRAY(family, socket_families), 1391 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1392 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1393 { .name = "stat", .alias = "newstat", }, 1394 { .name = "statx", 1395 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ }, 1396 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ } , 1397 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, }, 1398 { .name = "swapoff", 1399 .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, }, 1400 { .name = "swapon", 1401 .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, }, 1402 { .name = "symlinkat", 1403 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1404 { .name = "sync_file_range", 1405 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, }, 1406 { .name = "tgkill", 1407 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1408 { .name = "tkill", 1409 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1410 { .name = "umount2", .alias = "umount", 1411 .arg = { [0] = SCA_FILENAME_FROM_USER(name), }, }, 1412 { .name = "uname", .alias = "newuname", }, 1413 { .name = "unlinkat", 1414 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1415 [1] = SCA_FILENAME_FROM_USER(pathname), 1416 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, 1417 { .name = "utimensat", 1418 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, }, 1419 { .name = "wait4", .errpid = true, 1420 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1421 { .name = "waitid", .errpid = true, 1422 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1423 { .name = "write", 1424 .arg = { [1] = { .scnprintf = SCA_BUF /* buf */, .from_user = true, }, }, }, 1425 }; 1426 1427 static int syscall_fmt__cmp(const void *name, const void *fmtp) 1428 { 1429 const struct syscall_fmt *fmt = fmtp; 1430 return strcmp(name, fmt->name); 1431 } 1432 1433 static const struct syscall_fmt *__syscall_fmt__find(const struct syscall_fmt *fmts, 1434 const int nmemb, 1435 const char *name) 1436 { 1437 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); 1438 } 1439 1440 static const struct syscall_fmt *syscall_fmt__find(const char *name) 1441 { 1442 const int nmemb = ARRAY_SIZE(syscall_fmts); 1443 return __syscall_fmt__find(syscall_fmts, nmemb, name); 1444 } 1445 1446 static const struct syscall_fmt *__syscall_fmt__find_by_alias(const struct syscall_fmt *fmts, 1447 const int nmemb, const char *alias) 1448 { 1449 int i; 1450 1451 for (i = 0; i < nmemb; ++i) { 1452 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0) 1453 return &fmts[i]; 1454 } 1455 1456 return NULL; 1457 } 1458 1459 static const struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias) 1460 { 1461 const int nmemb = ARRAY_SIZE(syscall_fmts); 1462 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias); 1463 } 1464 1465 /** 1466 * struct syscall 1467 */ 1468 struct syscall { 1469 /** @e_machine: The ELF machine associated with the entry. */ 1470 int e_machine; 1471 /** @id: id value from the tracepoint, the system call number. */ 1472 int id; 1473 struct tep_event *tp_format; 1474 int nr_args; 1475 /** 1476 * @args_size: sum of the sizes of the syscall arguments, anything 1477 * after that is augmented stuff: pathname for openat, etc. 1478 */ 1479 1480 int args_size; 1481 struct { 1482 struct bpf_program *sys_enter, 1483 *sys_exit; 1484 } bpf_prog; 1485 /** @is_exit: is this "exit" or "exit_group"? */ 1486 bool is_exit; 1487 /** 1488 * @is_open: is this "open" or "openat"? To associate the fd returned in 1489 * sys_exit with the pathname in sys_enter. 1490 */ 1491 bool is_open; 1492 /** 1493 * @nonexistent: Name lookup failed. Just a hole in the syscall table, 1494 * syscall id not allocated. 1495 */ 1496 bool nonexistent; 1497 bool use_btf; 1498 struct tep_format_field *args; 1499 const char *name; 1500 const struct syscall_fmt *fmt; 1501 struct syscall_arg_fmt *arg_fmt; 1502 }; 1503 1504 /* 1505 * We need to have this 'calculated' boolean because in some cases we really 1506 * don't know what is the duration of a syscall, for instance, when we start 1507 * a session and some threads are waiting for a syscall to finish, say 'poll', 1508 * in which case all we can do is to print "( ? ) for duration and for the 1509 * start timestamp. 1510 */ 1511 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp) 1512 { 1513 double duration = (double)t / NSEC_PER_MSEC; 1514 size_t printed = fprintf(fp, "("); 1515 1516 if (!calculated) 1517 printed += fprintf(fp, " "); 1518 else if (duration >= 1.0) 1519 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration); 1520 else if (duration >= 0.01) 1521 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration); 1522 else 1523 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration); 1524 return printed + fprintf(fp, "): "); 1525 } 1526 1527 /** 1528 * filename.ptr: The filename char pointer that will be vfs_getname'd 1529 * filename.entry_str_pos: Where to insert the string translated from 1530 * filename.ptr by the vfs_getname tracepoint/kprobe. 1531 * ret_scnprintf: syscall args may set this to a different syscall return 1532 * formatter, for instance, fcntl may return fds, file flags, etc. 1533 */ 1534 struct thread_trace { 1535 u64 entry_time; 1536 bool entry_pending; 1537 unsigned long nr_events; 1538 unsigned long pfmaj, pfmin; 1539 char *entry_str; 1540 double runtime_ms; 1541 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 1542 struct { 1543 unsigned long ptr; 1544 short int entry_str_pos; 1545 bool pending_open; 1546 unsigned int namelen; 1547 char *name; 1548 } filename; 1549 struct { 1550 int max; 1551 struct file *table; 1552 } files; 1553 1554 struct hashmap *syscall_stats; 1555 }; 1556 1557 static size_t syscall_id_hash(long key, void *ctx __maybe_unused) 1558 { 1559 return key; 1560 } 1561 1562 static bool syscall_id_equal(long key1, long key2, void *ctx __maybe_unused) 1563 { 1564 return key1 == key2; 1565 } 1566 1567 static struct hashmap *alloc_syscall_stats(void) 1568 { 1569 return hashmap__new(syscall_id_hash, syscall_id_equal, NULL); 1570 } 1571 1572 static void delete_syscall_stats(struct hashmap *syscall_stats) 1573 { 1574 struct hashmap_entry *pos; 1575 size_t bkt; 1576 1577 if (syscall_stats == NULL) 1578 return; 1579 1580 hashmap__for_each_entry(syscall_stats, pos, bkt) 1581 zfree(&pos->pvalue); 1582 hashmap__free(syscall_stats); 1583 } 1584 1585 static struct thread_trace *thread_trace__new(struct trace *trace) 1586 { 1587 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace)); 1588 1589 if (ttrace) { 1590 ttrace->files.max = -1; 1591 if (trace->summary) { 1592 ttrace->syscall_stats = alloc_syscall_stats(); 1593 if (IS_ERR(ttrace->syscall_stats)) 1594 zfree(&ttrace); 1595 } 1596 } 1597 1598 return ttrace; 1599 } 1600 1601 static void thread_trace__free_files(struct thread_trace *ttrace); 1602 1603 static void thread_trace__delete(void *pttrace) 1604 { 1605 struct thread_trace *ttrace = pttrace; 1606 1607 if (!ttrace) 1608 return; 1609 1610 delete_syscall_stats(ttrace->syscall_stats); 1611 ttrace->syscall_stats = NULL; 1612 thread_trace__free_files(ttrace); 1613 zfree(&ttrace->entry_str); 1614 free(ttrace); 1615 } 1616 1617 static struct thread_trace *thread__trace(struct thread *thread, struct trace *trace) 1618 { 1619 struct thread_trace *ttrace; 1620 1621 if (thread == NULL) 1622 goto fail; 1623 1624 if (thread__priv(thread) == NULL) 1625 thread__set_priv(thread, thread_trace__new(trace)); 1626 1627 if (thread__priv(thread) == NULL) 1628 goto fail; 1629 1630 ttrace = thread__priv(thread); 1631 ++ttrace->nr_events; 1632 1633 return ttrace; 1634 fail: 1635 color_fprintf(trace->output, PERF_COLOR_RED, 1636 "WARNING: not enough memory, dropping samples!\n"); 1637 return NULL; 1638 } 1639 1640 1641 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg, 1642 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg)) 1643 { 1644 struct thread_trace *ttrace = thread__priv(arg->thread); 1645 1646 ttrace->ret_scnprintf = ret_scnprintf; 1647 } 1648 1649 #define TRACE_PFMAJ (1 << 0) 1650 #define TRACE_PFMIN (1 << 1) 1651 1652 static const size_t trace__entry_str_size = 2048; 1653 1654 static void thread_trace__free_files(struct thread_trace *ttrace) 1655 { 1656 for (int i = 0; i <= ttrace->files.max; ++i) { 1657 struct file *file = ttrace->files.table + i; 1658 zfree(&file->pathname); 1659 } 1660 1661 zfree(&ttrace->files.table); 1662 ttrace->files.max = -1; 1663 } 1664 1665 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd) 1666 { 1667 if (fd < 0) 1668 return NULL; 1669 1670 if (fd > ttrace->files.max) { 1671 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); 1672 1673 if (nfiles == NULL) 1674 return NULL; 1675 1676 if (ttrace->files.max != -1) { 1677 memset(nfiles + ttrace->files.max + 1, 0, 1678 (fd - ttrace->files.max) * sizeof(struct file)); 1679 } else { 1680 memset(nfiles, 0, (fd + 1) * sizeof(struct file)); 1681 } 1682 1683 ttrace->files.table = nfiles; 1684 ttrace->files.max = fd; 1685 } 1686 1687 return ttrace->files.table + fd; 1688 } 1689 1690 struct file *thread__files_entry(struct thread *thread, int fd) 1691 { 1692 return thread_trace__files_entry(thread__priv(thread), fd); 1693 } 1694 1695 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) 1696 { 1697 struct thread_trace *ttrace = thread__priv(thread); 1698 struct file *file = thread_trace__files_entry(ttrace, fd); 1699 1700 if (file != NULL) { 1701 struct stat st; 1702 1703 if (stat(pathname, &st) == 0) 1704 file->dev_maj = major(st.st_rdev); 1705 file->pathname = strdup(pathname); 1706 if (file->pathname) 1707 return 0; 1708 } 1709 1710 return -1; 1711 } 1712 1713 static int thread__read_fd_path(struct thread *thread, int fd) 1714 { 1715 char linkname[PATH_MAX], pathname[PATH_MAX]; 1716 struct stat st; 1717 int ret; 1718 1719 if (thread__pid(thread) == thread__tid(thread)) { 1720 scnprintf(linkname, sizeof(linkname), 1721 "/proc/%d/fd/%d", thread__pid(thread), fd); 1722 } else { 1723 scnprintf(linkname, sizeof(linkname), 1724 "/proc/%d/task/%d/fd/%d", 1725 thread__pid(thread), thread__tid(thread), fd); 1726 } 1727 1728 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname)) 1729 return -1; 1730 1731 ret = readlink(linkname, pathname, sizeof(pathname)); 1732 1733 if (ret < 0 || ret > st.st_size) 1734 return -1; 1735 1736 pathname[ret] = '\0'; 1737 return trace__set_fd_pathname(thread, fd, pathname); 1738 } 1739 1740 static const char *thread__fd_path(struct thread *thread, int fd, 1741 struct trace *trace) 1742 { 1743 struct thread_trace *ttrace = thread__priv(thread); 1744 1745 if (ttrace == NULL || trace->fd_path_disabled) 1746 return NULL; 1747 1748 if (fd < 0) 1749 return NULL; 1750 1751 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { 1752 if (!trace->live) 1753 return NULL; 1754 ++trace->stats.proc_getname; 1755 if (thread__read_fd_path(thread, fd)) 1756 return NULL; 1757 } 1758 1759 return ttrace->files.table[fd].pathname; 1760 } 1761 1762 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg) 1763 { 1764 int fd = arg->val; 1765 size_t printed = scnprintf(bf, size, "%d", fd); 1766 const char *path = thread__fd_path(arg->thread, fd, arg->trace); 1767 1768 if (path) 1769 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1770 1771 return printed; 1772 } 1773 1774 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) 1775 { 1776 size_t printed = scnprintf(bf, size, "%d", fd); 1777 struct thread *thread = machine__find_thread(trace->host, pid, pid); 1778 1779 if (thread) { 1780 const char *path = thread__fd_path(thread, fd, trace); 1781 1782 if (path) 1783 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1784 1785 thread__put(thread); 1786 } 1787 1788 return printed; 1789 } 1790 1791 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 1792 struct syscall_arg *arg) 1793 { 1794 int fd = arg->val; 1795 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg); 1796 struct thread_trace *ttrace = thread__priv(arg->thread); 1797 1798 if (ttrace && fd >= 0 && fd <= ttrace->files.max) 1799 zfree(&ttrace->files.table[fd].pathname); 1800 1801 return printed; 1802 } 1803 1804 static void thread__set_filename_pos(struct thread *thread, const char *bf, 1805 unsigned long ptr) 1806 { 1807 struct thread_trace *ttrace = thread__priv(thread); 1808 1809 ttrace->filename.ptr = ptr; 1810 ttrace->filename.entry_str_pos = bf - ttrace->entry_str; 1811 } 1812 1813 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size) 1814 { 1815 struct augmented_arg *augmented_arg = arg->augmented.args; 1816 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value); 1817 /* 1818 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls 1819 * we would have two strings, each prefixed by its size. 1820 */ 1821 int consumed = sizeof(*augmented_arg) + augmented_arg->size; 1822 1823 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1824 arg->augmented.size -= consumed; 1825 1826 return printed; 1827 } 1828 1829 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 1830 struct syscall_arg *arg) 1831 { 1832 unsigned long ptr = arg->val; 1833 1834 if (arg->augmented.args) 1835 return syscall_arg__scnprintf_augmented_string(arg, bf, size); 1836 1837 if (!arg->trace->vfs_getname) 1838 return scnprintf(bf, size, "%#x", ptr); 1839 1840 thread__set_filename_pos(arg->thread, bf, ptr); 1841 return 0; 1842 } 1843 1844 #define MAX_CONTROL_CHAR 31 1845 #define MAX_ASCII 127 1846 1847 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg) 1848 { 1849 struct augmented_arg *augmented_arg = arg->augmented.args; 1850 unsigned char *orig = (unsigned char *)augmented_arg->value; 1851 size_t printed = 0; 1852 int consumed; 1853 1854 if (augmented_arg == NULL) 1855 return 0; 1856 1857 for (int j = 0; j < augmented_arg->size; ++j) { 1858 bool control_char = orig[j] <= MAX_CONTROL_CHAR || orig[j] >= MAX_ASCII; 1859 /* print control characters (0~31 and 127), and non-ascii characters in \(digits) */ 1860 printed += scnprintf(bf + printed, size - printed, control_char ? "\\%d" : "%c", (int)orig[j]); 1861 } 1862 1863 consumed = sizeof(*augmented_arg) + augmented_arg->size; 1864 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1865 arg->augmented.size -= consumed; 1866 1867 return printed; 1868 } 1869 1870 static bool trace__filter_duration(struct trace *trace, double t) 1871 { 1872 return t < (trace->duration_filter * NSEC_PER_MSEC); 1873 } 1874 1875 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1876 { 1877 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; 1878 1879 return fprintf(fp, "%10.3f ", ts); 1880 } 1881 1882 /* 1883 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are 1884 * using ttrace->entry_time for a thread that receives a sys_exit without 1885 * first having received a sys_enter ("poll" issued before tracing session 1886 * starts, lost sys_enter exit due to ring buffer overflow). 1887 */ 1888 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1889 { 1890 if (tstamp > 0) 1891 return __trace__fprintf_tstamp(trace, tstamp, fp); 1892 1893 return fprintf(fp, " ? "); 1894 } 1895 1896 static pid_t workload_pid = -1; 1897 static volatile sig_atomic_t done = false; 1898 static volatile sig_atomic_t interrupted = false; 1899 1900 static void sighandler_interrupt(int sig __maybe_unused) 1901 { 1902 done = interrupted = true; 1903 } 1904 1905 static void sighandler_chld(int sig __maybe_unused, siginfo_t *info, 1906 void *context __maybe_unused) 1907 { 1908 if (info->si_pid == workload_pid) 1909 done = true; 1910 } 1911 1912 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) 1913 { 1914 size_t printed = 0; 1915 1916 if (trace->multiple_threads) { 1917 if (trace->show_comm) 1918 printed += fprintf(fp, "%.14s/", thread__comm_str(thread)); 1919 printed += fprintf(fp, "%d ", thread__tid(thread)); 1920 } 1921 1922 return printed; 1923 } 1924 1925 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, 1926 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp) 1927 { 1928 size_t printed = 0; 1929 1930 if (trace->show_tstamp) 1931 printed = trace__fprintf_tstamp(trace, tstamp, fp); 1932 if (trace->show_duration) 1933 printed += fprintf_duration(duration, duration_calculated, fp); 1934 return printed + trace__fprintf_comm_tid(trace, thread, fp); 1935 } 1936 1937 static int trace__process_event(struct trace *trace, struct machine *machine, 1938 union perf_event *event, struct perf_sample *sample) 1939 { 1940 int ret = 0; 1941 1942 switch (event->header.type) { 1943 case PERF_RECORD_LOST: 1944 color_fprintf(trace->output, PERF_COLOR_RED, 1945 "LOST %" PRIu64 " events!\n", (u64)event->lost.lost); 1946 ret = machine__process_lost_event(machine, event, sample); 1947 break; 1948 default: 1949 ret = machine__process_event(machine, event, sample); 1950 break; 1951 } 1952 1953 return ret; 1954 } 1955 1956 static int trace__tool_process(const struct perf_tool *tool, 1957 union perf_event *event, 1958 struct perf_sample *sample, 1959 struct machine *machine) 1960 { 1961 struct trace *trace = container_of(tool, struct trace, tool); 1962 return trace__process_event(trace, machine, event, sample); 1963 } 1964 1965 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 1966 { 1967 struct machine *machine = vmachine; 1968 1969 if (machine->kptr_restrict_warned) 1970 return NULL; 1971 1972 if (symbol_conf.kptr_restrict) { 1973 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 1974 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n" 1975 "Kernel samples will not be resolved.\n"); 1976 machine->kptr_restrict_warned = true; 1977 return NULL; 1978 } 1979 1980 return machine__resolve_kernel_addr(vmachine, addrp, modp); 1981 } 1982 1983 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) 1984 { 1985 int err = symbol__init(NULL); 1986 1987 if (err) 1988 return err; 1989 1990 trace->host = machine__new_host(); 1991 if (trace->host == NULL) 1992 return -ENOMEM; 1993 1994 thread__set_priv_destructor(thread_trace__delete); 1995 1996 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); 1997 if (err < 0) 1998 goto out; 1999 2000 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 2001 evlist->core.threads, trace__tool_process, 2002 true, false, 1); 2003 out: 2004 if (err) 2005 symbol__exit(); 2006 2007 return err; 2008 } 2009 2010 static void trace__symbols__exit(struct trace *trace) 2011 { 2012 machine__exit(trace->host); 2013 trace->host = NULL; 2014 2015 symbol__exit(); 2016 } 2017 2018 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) 2019 { 2020 int idx; 2021 2022 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) 2023 nr_args = sc->fmt->nr_args; 2024 2025 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); 2026 if (sc->arg_fmt == NULL) 2027 return -1; 2028 2029 for (idx = 0; idx < nr_args; ++idx) { 2030 if (sc->fmt) 2031 sc->arg_fmt[idx] = sc->fmt->arg[idx]; 2032 } 2033 2034 sc->nr_args = nr_args; 2035 return 0; 2036 } 2037 2038 static const struct syscall_arg_fmt syscall_arg_fmts__by_name[] = { 2039 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, }, 2040 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, }, 2041 }; 2042 2043 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp) 2044 { 2045 const struct syscall_arg_fmt *fmt = fmtp; 2046 return strcmp(name, fmt->name); 2047 } 2048 2049 static const struct syscall_arg_fmt * 2050 __syscall_arg_fmt__find_by_name(const struct syscall_arg_fmt *fmts, const int nmemb, 2051 const char *name) 2052 { 2053 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp); 2054 } 2055 2056 static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name) 2057 { 2058 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name); 2059 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name); 2060 } 2061 2062 static struct tep_format_field * 2063 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field, 2064 bool *use_btf) 2065 { 2066 struct tep_format_field *last_field = NULL; 2067 int len; 2068 2069 for (; field; field = field->next, ++arg) { 2070 last_field = field; 2071 2072 if (arg->scnprintf) 2073 continue; 2074 2075 len = strlen(field->name); 2076 2077 // As far as heuristics (or intention) goes this seems to hold true, and makes sense! 2078 if ((field->flags & TEP_FIELD_IS_POINTER) && strstarts(field->type, "const ")) 2079 arg->from_user = true; 2080 2081 if (strcmp(field->type, "const char *") == 0 && 2082 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) || 2083 strstr(field->name, "path") != NULL)) { 2084 arg->scnprintf = SCA_FILENAME; 2085 } else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr")) 2086 arg->scnprintf = SCA_PTR; 2087 else if (strcmp(field->type, "pid_t") == 0) 2088 arg->scnprintf = SCA_PID; 2089 else if (strcmp(field->type, "umode_t") == 0) 2090 arg->scnprintf = SCA_MODE_T; 2091 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) { 2092 arg->scnprintf = SCA_CHAR_ARRAY; 2093 arg->nr_entries = field->arraylen; 2094 } else if ((strcmp(field->type, "int") == 0 || 2095 strcmp(field->type, "unsigned int") == 0 || 2096 strcmp(field->type, "long") == 0) && 2097 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) { 2098 /* 2099 * /sys/kernel/tracing/events/syscalls/sys_enter* 2100 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c 2101 * 65 int 2102 * 23 unsigned int 2103 * 7 unsigned long 2104 */ 2105 arg->scnprintf = SCA_FD; 2106 } else if (strstr(field->type, "enum") && use_btf != NULL) { 2107 *use_btf = true; 2108 arg->strtoul = STUL_BTF_TYPE; 2109 } else { 2110 const struct syscall_arg_fmt *fmt = 2111 syscall_arg_fmt__find_by_name(field->name); 2112 2113 if (fmt) { 2114 arg->scnprintf = fmt->scnprintf; 2115 arg->strtoul = fmt->strtoul; 2116 } 2117 } 2118 } 2119 2120 return last_field; 2121 } 2122 2123 static int syscall__set_arg_fmts(struct syscall *sc) 2124 { 2125 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args, 2126 &sc->use_btf); 2127 2128 if (last_field) 2129 sc->args_size = last_field->offset + last_field->size; 2130 2131 return 0; 2132 } 2133 2134 static int syscall__read_info(struct syscall *sc, struct trace *trace) 2135 { 2136 char tp_name[128]; 2137 const char *name; 2138 int err; 2139 2140 if (sc->nonexistent) 2141 return -EEXIST; 2142 2143 if (sc->name) { 2144 /* Info already read. */ 2145 return 0; 2146 } 2147 2148 name = syscalltbl__name(sc->e_machine, sc->id); 2149 if (name == NULL) { 2150 sc->nonexistent = true; 2151 return -EEXIST; 2152 } 2153 2154 sc->name = name; 2155 sc->fmt = syscall_fmt__find(sc->name); 2156 2157 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 2158 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 2159 2160 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) { 2161 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); 2162 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 2163 } 2164 2165 /* 2166 * Fails to read trace point format via sysfs node, so the trace point 2167 * doesn't exist. Set the 'nonexistent' flag as true. 2168 */ 2169 if (IS_ERR(sc->tp_format)) { 2170 sc->nonexistent = true; 2171 err = PTR_ERR(sc->tp_format); 2172 sc->tp_format = NULL; 2173 return err; 2174 } 2175 2176 /* 2177 * The tracepoint format contains __syscall_nr field, so it's one more 2178 * than the actual number of syscall arguments. 2179 */ 2180 if (syscall__alloc_arg_fmts(sc, sc->tp_format->format.nr_fields - 1)) 2181 return -ENOMEM; 2182 2183 sc->args = sc->tp_format->format.fields; 2184 /* 2185 * We need to check and discard the first variable '__syscall_nr' 2186 * or 'nr' that mean the syscall number. It is needless here. 2187 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels. 2188 */ 2189 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) { 2190 sc->args = sc->args->next; 2191 --sc->nr_args; 2192 } 2193 2194 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit"); 2195 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat"); 2196 2197 err = syscall__set_arg_fmts(sc); 2198 2199 /* after calling syscall__set_arg_fmts() we'll know whether use_btf is true */ 2200 if (sc->use_btf) 2201 trace__load_vmlinux_btf(trace); 2202 2203 return err; 2204 } 2205 2206 static int evsel__init_tp_arg_scnprintf(struct evsel *evsel, bool *use_btf) 2207 { 2208 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 2209 2210 if (fmt != NULL) { 2211 const struct tep_event *tp_format = evsel__tp_format(evsel); 2212 2213 if (tp_format) { 2214 syscall_arg_fmt__init_array(fmt, tp_format->format.fields, use_btf); 2215 return 0; 2216 } 2217 } 2218 2219 return -ENOMEM; 2220 } 2221 2222 static int intcmp(const void *a, const void *b) 2223 { 2224 const int *one = a, *another = b; 2225 2226 return *one - *another; 2227 } 2228 2229 static int trace__validate_ev_qualifier(struct trace *trace) 2230 { 2231 int err = 0; 2232 bool printed_invalid_prefix = false; 2233 struct str_node *pos; 2234 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); 2235 2236 trace->ev_qualifier_ids.entries = malloc(nr_allocated * 2237 sizeof(trace->ev_qualifier_ids.entries[0])); 2238 2239 if (trace->ev_qualifier_ids.entries == NULL) { 2240 fputs("Error:\tNot enough memory for allocating events qualifier ids\n", 2241 trace->output); 2242 err = -EINVAL; 2243 goto out; 2244 } 2245 2246 strlist__for_each_entry(pos, trace->ev_qualifier) { 2247 const char *sc = pos->s; 2248 /* 2249 * TODO: Assume more than the validation/warnings are all for 2250 * the same binary type as perf. 2251 */ 2252 int id = syscalltbl__id(EM_HOST, sc), match_next = -1; 2253 2254 if (id < 0) { 2255 id = syscalltbl__strglobmatch_first(EM_HOST, sc, &match_next); 2256 if (id >= 0) 2257 goto matches; 2258 2259 if (!printed_invalid_prefix) { 2260 pr_debug("Skipping unknown syscalls: "); 2261 printed_invalid_prefix = true; 2262 } else { 2263 pr_debug(", "); 2264 } 2265 2266 pr_debug("%s", sc); 2267 continue; 2268 } 2269 matches: 2270 trace->ev_qualifier_ids.entries[nr_used++] = id; 2271 if (match_next == -1) 2272 continue; 2273 2274 while (1) { 2275 id = syscalltbl__strglobmatch_next(EM_HOST, sc, &match_next); 2276 if (id < 0) 2277 break; 2278 if (nr_allocated == nr_used) { 2279 void *entries; 2280 2281 nr_allocated += 8; 2282 entries = realloc(trace->ev_qualifier_ids.entries, 2283 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); 2284 if (entries == NULL) { 2285 err = -ENOMEM; 2286 fputs("\nError:\t Not enough memory for parsing\n", trace->output); 2287 goto out_free; 2288 } 2289 trace->ev_qualifier_ids.entries = entries; 2290 } 2291 trace->ev_qualifier_ids.entries[nr_used++] = id; 2292 } 2293 } 2294 2295 trace->ev_qualifier_ids.nr = nr_used; 2296 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); 2297 out: 2298 if (printed_invalid_prefix) 2299 pr_debug("\n"); 2300 return err; 2301 out_free: 2302 zfree(&trace->ev_qualifier_ids.entries); 2303 trace->ev_qualifier_ids.nr = 0; 2304 goto out; 2305 } 2306 2307 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) 2308 { 2309 bool in_ev_qualifier; 2310 2311 if (trace->ev_qualifier_ids.nr == 0) 2312 return true; 2313 2314 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, 2315 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; 2316 2317 if (in_ev_qualifier) 2318 return !trace->not_ev_qualifier; 2319 2320 return trace->not_ev_qualifier; 2321 } 2322 2323 /* 2324 * args is to be interpreted as a series of longs but we need to handle 2325 * 8-byte unaligned accesses. args points to raw_data within the event 2326 * and raw_data is guaranteed to be 8-byte unaligned because it is 2327 * preceded by raw_size which is a u32. So we need to copy args to a temp 2328 * variable to read it. Most notably this avoids extended load instructions 2329 * on unaligned addresses 2330 */ 2331 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx) 2332 { 2333 unsigned long val; 2334 unsigned char *p = arg->args + sizeof(unsigned long) * idx; 2335 2336 memcpy(&val, p, sizeof(val)); 2337 return val; 2338 } 2339 2340 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size, 2341 struct syscall_arg *arg) 2342 { 2343 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name) 2344 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name); 2345 2346 return scnprintf(bf, size, "arg%d: ", arg->idx); 2347 } 2348 2349 /* 2350 * Check if the value is in fact zero, i.e. mask whatever needs masking, such 2351 * as mount 'flags' argument that needs ignoring some magic flag, see comment 2352 * in tools/perf/trace/beauty/mount_flags.c 2353 */ 2354 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val) 2355 { 2356 if (fmt && fmt->mask_val) 2357 return fmt->mask_val(arg, val); 2358 2359 return val; 2360 } 2361 2362 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size, 2363 struct syscall_arg *arg, unsigned long val) 2364 { 2365 if (fmt && fmt->scnprintf) { 2366 arg->val = val; 2367 if (fmt->parm) 2368 arg->parm = fmt->parm; 2369 return fmt->scnprintf(bf, size, arg); 2370 } 2371 return scnprintf(bf, size, "%ld", val); 2372 } 2373 2374 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, 2375 unsigned char *args, void *augmented_args, int augmented_args_size, 2376 struct trace *trace, struct thread *thread) 2377 { 2378 size_t printed = 0, btf_printed; 2379 unsigned long val; 2380 u8 bit = 1; 2381 struct syscall_arg arg = { 2382 .args = args, 2383 .augmented = { 2384 .size = augmented_args_size, 2385 .args = augmented_args, 2386 }, 2387 .idx = 0, 2388 .mask = 0, 2389 .trace = trace, 2390 .thread = thread, 2391 .show_string_prefix = trace->show_string_prefix, 2392 }; 2393 struct thread_trace *ttrace = thread__priv(thread); 2394 void *default_scnprintf; 2395 2396 /* 2397 * Things like fcntl will set this in its 'cmd' formatter to pick the 2398 * right formatter for the return value (an fd? file flags?), which is 2399 * not needed for syscalls that always return a given type, say an fd. 2400 */ 2401 ttrace->ret_scnprintf = NULL; 2402 2403 if (sc->args != NULL) { 2404 struct tep_format_field *field; 2405 2406 for (field = sc->args; field; 2407 field = field->next, ++arg.idx, bit <<= 1) { 2408 if (arg.mask & bit) 2409 continue; 2410 2411 arg.fmt = &sc->arg_fmt[arg.idx]; 2412 val = syscall_arg__val(&arg, arg.idx); 2413 /* 2414 * Some syscall args need some mask, most don't and 2415 * return val untouched. 2416 */ 2417 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val); 2418 2419 /* 2420 * Suppress this argument if its value is zero and show_zero 2421 * property isn't set. 2422 * 2423 * If it has a BTF type, then override the zero suppression knob 2424 * as the common case is for zero in an enum to have an associated entry. 2425 */ 2426 if (val == 0 && !trace->show_zeros && 2427 !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero) && 2428 !(sc->arg_fmt && sc->arg_fmt[arg.idx].strtoul == STUL_BTF_TYPE)) 2429 continue; 2430 2431 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2432 2433 if (trace->show_arg_names) 2434 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2435 2436 default_scnprintf = sc->arg_fmt[arg.idx].scnprintf; 2437 2438 if (trace->force_btf || default_scnprintf == NULL || default_scnprintf == SCA_PTR) { 2439 btf_printed = trace__btf_scnprintf(trace, &arg, bf + printed, 2440 size - printed, val, field->type); 2441 if (btf_printed) { 2442 printed += btf_printed; 2443 continue; 2444 } 2445 } 2446 2447 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], 2448 bf + printed, size - printed, &arg, val); 2449 } 2450 } else if (IS_ERR(sc->tp_format)) { 2451 /* 2452 * If we managed to read the tracepoint /format file, then we 2453 * may end up not having any args, like with gettid(), so only 2454 * print the raw args when we didn't manage to read it. 2455 */ 2456 while (arg.idx < sc->nr_args) { 2457 if (arg.mask & bit) 2458 goto next_arg; 2459 val = syscall_arg__val(&arg, arg.idx); 2460 if (printed) 2461 printed += scnprintf(bf + printed, size - printed, ", "); 2462 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg); 2463 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val); 2464 next_arg: 2465 ++arg.idx; 2466 bit <<= 1; 2467 } 2468 } 2469 2470 return printed; 2471 } 2472 2473 static struct syscall *syscall__new(int e_machine, int id) 2474 { 2475 struct syscall *sc = zalloc(sizeof(*sc)); 2476 2477 if (!sc) 2478 return NULL; 2479 2480 sc->e_machine = e_machine; 2481 sc->id = id; 2482 return sc; 2483 } 2484 2485 static void syscall__delete(struct syscall *sc) 2486 { 2487 if (!sc) 2488 return; 2489 2490 free(sc->arg_fmt); 2491 free(sc); 2492 } 2493 2494 static int syscall__bsearch_cmp(const void *key, const void *entry) 2495 { 2496 const struct syscall *a = key, *b = *((const struct syscall **)entry); 2497 2498 if (a->e_machine != b->e_machine) 2499 return a->e_machine - b->e_machine; 2500 2501 return a->id - b->id; 2502 } 2503 2504 static int syscall__cmp(const void *va, const void *vb) 2505 { 2506 const struct syscall *a = *((const struct syscall **)va); 2507 const struct syscall *b = *((const struct syscall **)vb); 2508 2509 if (a->e_machine != b->e_machine) 2510 return a->e_machine - b->e_machine; 2511 2512 return a->id - b->id; 2513 } 2514 2515 static struct syscall *trace__find_syscall(struct trace *trace, int e_machine, int id) 2516 { 2517 struct syscall key = { 2518 .e_machine = e_machine, 2519 .id = id, 2520 }; 2521 struct syscall *sc, **tmp; 2522 2523 if (trace->syscalls.table) { 2524 struct syscall **sc_entry = bsearch(&key, trace->syscalls.table, 2525 trace->syscalls.table_size, 2526 sizeof(trace->syscalls.table[0]), 2527 syscall__bsearch_cmp); 2528 2529 if (sc_entry) 2530 return *sc_entry; 2531 } 2532 2533 sc = syscall__new(e_machine, id); 2534 if (!sc) 2535 return NULL; 2536 2537 tmp = reallocarray(trace->syscalls.table, trace->syscalls.table_size + 1, 2538 sizeof(trace->syscalls.table[0])); 2539 if (!tmp) { 2540 syscall__delete(sc); 2541 return NULL; 2542 } 2543 2544 trace->syscalls.table = tmp; 2545 trace->syscalls.table[trace->syscalls.table_size++] = sc; 2546 qsort(trace->syscalls.table, trace->syscalls.table_size, sizeof(trace->syscalls.table[0]), 2547 syscall__cmp); 2548 return sc; 2549 } 2550 2551 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel, 2552 union perf_event *event, 2553 struct perf_sample *sample); 2554 2555 static struct syscall *trace__syscall_info(struct trace *trace, struct evsel *evsel, 2556 int e_machine, int id) 2557 { 2558 struct syscall *sc; 2559 int err = 0; 2560 2561 if (id < 0) { 2562 2563 /* 2564 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried 2565 * before that, leaving at a higher verbosity level till that is 2566 * explained. Reproduced with plain ftrace with: 2567 * 2568 * echo 1 > /t/events/raw_syscalls/sys_exit/enable 2569 * grep "NR -1 " /t/trace_pipe 2570 * 2571 * After generating some load on the machine. 2572 */ 2573 if (verbose > 1) { 2574 static u64 n; 2575 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", 2576 id, evsel__name(evsel), ++n); 2577 } 2578 return NULL; 2579 } 2580 2581 err = -EINVAL; 2582 2583 sc = trace__find_syscall(trace, e_machine, id); 2584 if (sc) 2585 err = syscall__read_info(sc, trace); 2586 2587 if (err && verbose > 0) { 2588 char sbuf[STRERR_BUFSIZE]; 2589 2590 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, 2591 str_error_r(-err, sbuf, sizeof(sbuf))); 2592 if (sc && sc->name) 2593 fprintf(trace->output, "(%s)", sc->name); 2594 fputs(" information\n", trace->output); 2595 } 2596 return err ? NULL : sc; 2597 } 2598 2599 struct syscall_stats { 2600 struct stats stats; 2601 u64 nr_failures; 2602 int max_errno; 2603 u32 *errnos; 2604 }; 2605 2606 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace, 2607 int id, struct perf_sample *sample, long err, 2608 struct trace *trace) 2609 { 2610 struct hashmap *syscall_stats = ttrace->syscall_stats; 2611 struct syscall_stats *stats = NULL; 2612 u64 duration = 0; 2613 2614 if (trace->summary_bpf) 2615 return; 2616 2617 if (trace->summary_mode == SUMMARY__BY_TOTAL) 2618 syscall_stats = trace->syscall_stats; 2619 2620 if (!hashmap__find(syscall_stats, id, &stats)) { 2621 stats = zalloc(sizeof(*stats)); 2622 if (stats == NULL) 2623 return; 2624 2625 init_stats(&stats->stats); 2626 if (hashmap__add(syscall_stats, id, stats) < 0) { 2627 free(stats); 2628 return; 2629 } 2630 } 2631 2632 if (ttrace->entry_time && sample->time > ttrace->entry_time) 2633 duration = sample->time - ttrace->entry_time; 2634 2635 update_stats(&stats->stats, duration); 2636 2637 if (err < 0) { 2638 ++stats->nr_failures; 2639 2640 if (!trace->errno_summary) 2641 return; 2642 2643 err = -err; 2644 if (err > stats->max_errno) { 2645 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32)); 2646 2647 if (new_errnos) { 2648 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32)); 2649 } else { 2650 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n", 2651 thread__comm_str(thread), thread__pid(thread), 2652 thread__tid(thread)); 2653 return; 2654 } 2655 2656 stats->errnos = new_errnos; 2657 stats->max_errno = err; 2658 } 2659 2660 ++stats->errnos[err - 1]; 2661 } 2662 } 2663 2664 static int trace__printf_interrupted_entry(struct trace *trace) 2665 { 2666 struct thread_trace *ttrace; 2667 size_t printed; 2668 int len; 2669 2670 if (trace->failure_only || trace->current == NULL) 2671 return 0; 2672 2673 ttrace = thread__priv(trace->current); 2674 2675 if (!ttrace->entry_pending) 2676 return 0; 2677 2678 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); 2679 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); 2680 2681 if (len < trace->args_alignment - 4) 2682 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); 2683 2684 printed += fprintf(trace->output, " ...\n"); 2685 2686 ttrace->entry_pending = false; 2687 ++trace->nr_events_printed; 2688 2689 return printed; 2690 } 2691 2692 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, 2693 struct perf_sample *sample, struct thread *thread) 2694 { 2695 int printed = 0; 2696 2697 if (trace->print_sample) { 2698 double ts = (double)sample->time / NSEC_PER_MSEC; 2699 2700 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n", 2701 evsel__name(evsel), ts, 2702 thread__comm_str(thread), 2703 sample->pid, sample->tid, sample->cpu); 2704 } 2705 2706 return printed; 2707 } 2708 2709 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size) 2710 { 2711 /* 2712 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter 2713 * and there we get all 6 syscall args plus the tracepoint common fields 2714 * that gets calculated at the start and the syscall_nr (another long). 2715 * So we check if that is the case and if so don't look after the 2716 * sc->args_size but always after the full raw_syscalls:sys_enter payload, 2717 * which is fixed. 2718 * 2719 * We'll revisit this later to pass s->args_size to the BPF augmenter 2720 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it 2721 * copies only what we need for each syscall, like what happens when we 2722 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace 2723 * traffic to just what is needed for each syscall. 2724 */ 2725 int args_size = raw_augmented_args_size ?: sc->args_size; 2726 2727 *augmented_args_size = sample->raw_size - args_size; 2728 if (*augmented_args_size > 0) { 2729 static uintptr_t argbuf[1024]; /* assuming single-threaded */ 2730 2731 if ((size_t)(*augmented_args_size) > sizeof(argbuf)) 2732 return NULL; 2733 2734 /* 2735 * The perf ring-buffer is 8-byte aligned but sample->raw_data 2736 * is not because it's preceded by u32 size. Later, beautifier 2737 * will use the augmented args with stricter alignments like in 2738 * some struct. To make sure it's aligned, let's copy the args 2739 * into a static buffer as it's single-threaded for now. 2740 */ 2741 memcpy(argbuf, sample->raw_data + args_size, *augmented_args_size); 2742 2743 return argbuf; 2744 } 2745 return NULL; 2746 } 2747 2748 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, 2749 union perf_event *event __maybe_unused, 2750 struct perf_sample *sample) 2751 { 2752 char *msg; 2753 void *args; 2754 int printed = 0; 2755 struct thread *thread; 2756 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2757 int augmented_args_size = 0, e_machine; 2758 void *augmented_args = NULL; 2759 struct syscall *sc; 2760 struct thread_trace *ttrace; 2761 2762 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2763 e_machine = thread__e_machine(thread, trace->host); 2764 sc = trace__syscall_info(trace, evsel, e_machine, id); 2765 if (sc == NULL) 2766 goto out_put; 2767 ttrace = thread__trace(thread, trace); 2768 if (ttrace == NULL) 2769 goto out_put; 2770 2771 trace__fprintf_sample(trace, evsel, sample, thread); 2772 2773 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2774 2775 if (ttrace->entry_str == NULL) { 2776 ttrace->entry_str = malloc(trace__entry_str_size); 2777 if (!ttrace->entry_str) 2778 goto out_put; 2779 } 2780 2781 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) 2782 trace__printf_interrupted_entry(trace); 2783 /* 2784 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible 2785 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments 2786 * this breaks syscall__augmented_args() check for augmented args, as we calculate 2787 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file, 2788 * so when handling, say the openat syscall, we end up getting 6 args for the 2789 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly 2790 * thinking that the extra 2 u64 args are the augmented filename, so just check 2791 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one. 2792 */ 2793 if (evsel != trace->syscalls.events.sys_enter) 2794 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2795 ttrace->entry_time = sample->time; 2796 msg = ttrace->entry_str; 2797 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); 2798 2799 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed, 2800 args, augmented_args, augmented_args_size, trace, thread); 2801 2802 if (sc->is_exit) { 2803 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { 2804 int alignment = 0; 2805 2806 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); 2807 printed = fprintf(trace->output, "%s)", ttrace->entry_str); 2808 if (trace->args_alignment > printed) 2809 alignment = trace->args_alignment - printed; 2810 fprintf(trace->output, "%*s= ?\n", alignment, " "); 2811 } 2812 } else { 2813 ttrace->entry_pending = true; 2814 /* See trace__vfs_getname & trace__sys_exit */ 2815 ttrace->filename.pending_open = false; 2816 } 2817 2818 if (trace->current != thread) { 2819 thread__put(trace->current); 2820 trace->current = thread__get(thread); 2821 } 2822 err = 0; 2823 out_put: 2824 thread__put(thread); 2825 return err; 2826 } 2827 2828 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, 2829 struct perf_sample *sample) 2830 { 2831 struct thread_trace *ttrace; 2832 struct thread *thread; 2833 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2834 struct syscall *sc; 2835 char msg[1024]; 2836 void *args, *augmented_args = NULL; 2837 int augmented_args_size, e_machine; 2838 size_t printed = 0; 2839 2840 2841 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2842 e_machine = thread__e_machine(thread, trace->host); 2843 sc = trace__syscall_info(trace, evsel, e_machine, id); 2844 if (sc == NULL) 2845 goto out_put; 2846 ttrace = thread__trace(thread, trace); 2847 /* 2848 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args() 2849 * and the rest of the beautifiers accessing it via struct syscall_arg touches it. 2850 */ 2851 if (ttrace == NULL) 2852 goto out_put; 2853 2854 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2855 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2856 printed += syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); 2857 fprintf(trace->output, "%.*s", (int)printed, msg); 2858 err = 0; 2859 out_put: 2860 thread__put(thread); 2861 return err; 2862 } 2863 2864 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, 2865 struct perf_sample *sample, 2866 struct callchain_cursor *cursor) 2867 { 2868 struct addr_location al; 2869 int max_stack = evsel->core.attr.sample_max_stack ? 2870 evsel->core.attr.sample_max_stack : 2871 trace->max_stack; 2872 int err = -1; 2873 2874 addr_location__init(&al); 2875 if (machine__resolve(trace->host, &al, sample) < 0) 2876 goto out; 2877 2878 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack); 2879 out: 2880 addr_location__exit(&al); 2881 return err; 2882 } 2883 2884 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) 2885 { 2886 /* TODO: user-configurable print_opts */ 2887 const unsigned int print_opts = EVSEL__PRINT_SYM | 2888 EVSEL__PRINT_DSO | 2889 EVSEL__PRINT_UNKNOWN_AS_ADDR; 2890 2891 return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output); 2892 } 2893 2894 static const char *errno_to_name(struct evsel *evsel, int err) 2895 { 2896 struct perf_env *env = evsel__env(evsel); 2897 2898 return perf_env__arch_strerrno(env, err); 2899 } 2900 2901 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, 2902 union perf_event *event __maybe_unused, 2903 struct perf_sample *sample) 2904 { 2905 long ret; 2906 u64 duration = 0; 2907 bool duration_calculated = false; 2908 struct thread *thread; 2909 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0; 2910 int alignment = trace->args_alignment, e_machine; 2911 struct syscall *sc; 2912 struct thread_trace *ttrace; 2913 2914 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2915 e_machine = thread__e_machine(thread, trace->host); 2916 sc = trace__syscall_info(trace, evsel, e_machine, id); 2917 if (sc == NULL) 2918 goto out_put; 2919 ttrace = thread__trace(thread, trace); 2920 if (ttrace == NULL) 2921 goto out_put; 2922 2923 trace__fprintf_sample(trace, evsel, sample, thread); 2924 2925 ret = perf_evsel__sc_tp_uint(evsel, ret, sample); 2926 2927 if (trace->summary) 2928 thread__update_stats(thread, ttrace, id, sample, ret, trace); 2929 2930 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) { 2931 trace__set_fd_pathname(thread, ret, ttrace->filename.name); 2932 ttrace->filename.pending_open = false; 2933 ++trace->stats.vfs_getname; 2934 } 2935 2936 if (ttrace->entry_time) { 2937 duration = sample->time - ttrace->entry_time; 2938 if (trace__filter_duration(trace, duration)) 2939 goto out; 2940 duration_calculated = true; 2941 } else if (trace->duration_filter) 2942 goto out; 2943 2944 if (sample->callchain) { 2945 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 2946 2947 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 2948 if (callchain_ret == 0) { 2949 if (cursor->nr < trace->min_stack) 2950 goto out; 2951 callchain_ret = 1; 2952 } 2953 } 2954 2955 if (trace->summary_only || (ret >= 0 && trace->failure_only)) 2956 goto out; 2957 2958 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output); 2959 2960 if (ttrace->entry_pending) { 2961 printed = fprintf(trace->output, "%s", ttrace->entry_str); 2962 } else { 2963 printed += fprintf(trace->output, " ... ["); 2964 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 2965 printed += 9; 2966 printed += fprintf(trace->output, "]: %s()", sc->name); 2967 } 2968 2969 printed++; /* the closing ')' */ 2970 2971 if (alignment > printed) 2972 alignment -= printed; 2973 else 2974 alignment = 0; 2975 2976 fprintf(trace->output, ")%*s= ", alignment, " "); 2977 2978 if (sc->fmt == NULL) { 2979 if (ret < 0) 2980 goto errno_print; 2981 signed_print: 2982 fprintf(trace->output, "%ld", ret); 2983 } else if (ret < 0) { 2984 errno_print: { 2985 char bf[STRERR_BUFSIZE]; 2986 const char *emsg = str_error_r(-ret, bf, sizeof(bf)), 2987 *e = errno_to_name(evsel, -ret); 2988 2989 fprintf(trace->output, "-1 %s (%s)", e, emsg); 2990 } 2991 } else if (ret == 0 && sc->fmt->timeout) 2992 fprintf(trace->output, "0 (Timeout)"); 2993 else if (ttrace->ret_scnprintf) { 2994 char bf[1024]; 2995 struct syscall_arg arg = { 2996 .val = ret, 2997 .thread = thread, 2998 .trace = trace, 2999 }; 3000 ttrace->ret_scnprintf(bf, sizeof(bf), &arg); 3001 ttrace->ret_scnprintf = NULL; 3002 fprintf(trace->output, "%s", bf); 3003 } else if (sc->fmt->hexret) 3004 fprintf(trace->output, "%#lx", ret); 3005 else if (sc->fmt->errpid) { 3006 struct thread *child = machine__find_thread(trace->host, ret, ret); 3007 3008 fprintf(trace->output, "%ld", ret); 3009 if (child != NULL) { 3010 if (thread__comm_set(child)) 3011 fprintf(trace->output, " (%s)", thread__comm_str(child)); 3012 thread__put(child); 3013 } 3014 } else 3015 goto signed_print; 3016 3017 fputc('\n', trace->output); 3018 3019 /* 3020 * We only consider an 'event' for the sake of --max-events a non-filtered 3021 * sys_enter + sys_exit and other tracepoint events. 3022 */ 3023 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) 3024 interrupted = true; 3025 3026 if (callchain_ret > 0) 3027 trace__fprintf_callchain(trace, sample); 3028 else if (callchain_ret < 0) 3029 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3030 out: 3031 ttrace->entry_pending = false; 3032 err = 0; 3033 out_put: 3034 thread__put(thread); 3035 return err; 3036 } 3037 3038 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, 3039 union perf_event *event __maybe_unused, 3040 struct perf_sample *sample) 3041 { 3042 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3043 struct thread_trace *ttrace; 3044 size_t filename_len, entry_str_len, to_move; 3045 ssize_t remaining_space; 3046 char *pos; 3047 const char *filename = evsel__rawptr(evsel, sample, "pathname"); 3048 3049 if (!thread) 3050 goto out; 3051 3052 ttrace = thread__priv(thread); 3053 if (!ttrace) 3054 goto out_put; 3055 3056 filename_len = strlen(filename); 3057 if (filename_len == 0) 3058 goto out_put; 3059 3060 if (ttrace->filename.namelen < filename_len) { 3061 char *f = realloc(ttrace->filename.name, filename_len + 1); 3062 3063 if (f == NULL) 3064 goto out_put; 3065 3066 ttrace->filename.namelen = filename_len; 3067 ttrace->filename.name = f; 3068 } 3069 3070 strcpy(ttrace->filename.name, filename); 3071 ttrace->filename.pending_open = true; 3072 3073 if (!ttrace->filename.ptr) 3074 goto out_put; 3075 3076 entry_str_len = strlen(ttrace->entry_str); 3077 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */ 3078 if (remaining_space <= 0) 3079 goto out_put; 3080 3081 if (filename_len > (size_t)remaining_space) { 3082 filename += filename_len - remaining_space; 3083 filename_len = remaining_space; 3084 } 3085 3086 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */ 3087 pos = ttrace->entry_str + ttrace->filename.entry_str_pos; 3088 memmove(pos + filename_len, pos, to_move); 3089 memcpy(pos, filename, filename_len); 3090 3091 ttrace->filename.ptr = 0; 3092 ttrace->filename.entry_str_pos = 0; 3093 out_put: 3094 thread__put(thread); 3095 out: 3096 return 0; 3097 } 3098 3099 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, 3100 union perf_event *event __maybe_unused, 3101 struct perf_sample *sample) 3102 { 3103 u64 runtime = evsel__intval(evsel, sample, "runtime"); 3104 double runtime_ms = (double)runtime / NSEC_PER_MSEC; 3105 struct thread *thread = machine__findnew_thread(trace->host, 3106 sample->pid, 3107 sample->tid); 3108 struct thread_trace *ttrace = thread__trace(thread, trace); 3109 3110 if (ttrace == NULL) 3111 goto out_dump; 3112 3113 ttrace->runtime_ms += runtime_ms; 3114 trace->runtime_ms += runtime_ms; 3115 out_put: 3116 thread__put(thread); 3117 return 0; 3118 3119 out_dump: 3120 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", 3121 evsel->name, 3122 evsel__strval(evsel, sample, "comm"), 3123 (pid_t)evsel__intval(evsel, sample, "pid"), 3124 runtime, 3125 evsel__intval(evsel, sample, "vruntime")); 3126 goto out_put; 3127 } 3128 3129 static int bpf_output__printer(enum binary_printer_ops op, 3130 unsigned int val, void *extra __maybe_unused, FILE *fp) 3131 { 3132 unsigned char ch = (unsigned char)val; 3133 3134 switch (op) { 3135 case BINARY_PRINT_CHAR_DATA: 3136 return fprintf(fp, "%c", isprint(ch) ? ch : '.'); 3137 case BINARY_PRINT_DATA_BEGIN: 3138 case BINARY_PRINT_LINE_BEGIN: 3139 case BINARY_PRINT_ADDR: 3140 case BINARY_PRINT_NUM_DATA: 3141 case BINARY_PRINT_NUM_PAD: 3142 case BINARY_PRINT_SEP: 3143 case BINARY_PRINT_CHAR_PAD: 3144 case BINARY_PRINT_LINE_END: 3145 case BINARY_PRINT_DATA_END: 3146 default: 3147 break; 3148 } 3149 3150 return 0; 3151 } 3152 3153 static void bpf_output__fprintf(struct trace *trace, 3154 struct perf_sample *sample) 3155 { 3156 binary__fprintf(sample->raw_data, sample->raw_size, 8, 3157 bpf_output__printer, NULL, trace->output); 3158 ++trace->nr_events_printed; 3159 } 3160 3161 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample, 3162 struct thread *thread, void *augmented_args, int augmented_args_size) 3163 { 3164 char bf[2048]; 3165 size_t size = sizeof(bf); 3166 const struct tep_event *tp_format = evsel__tp_format(evsel); 3167 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL; 3168 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel); 3169 size_t printed = 0, btf_printed; 3170 unsigned long val; 3171 u8 bit = 1; 3172 struct syscall_arg syscall_arg = { 3173 .augmented = { 3174 .size = augmented_args_size, 3175 .args = augmented_args, 3176 }, 3177 .idx = 0, 3178 .mask = 0, 3179 .trace = trace, 3180 .thread = thread, 3181 .show_string_prefix = trace->show_string_prefix, 3182 }; 3183 3184 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) { 3185 if (syscall_arg.mask & bit) 3186 continue; 3187 3188 syscall_arg.len = 0; 3189 syscall_arg.fmt = arg; 3190 if (field->flags & TEP_FIELD_IS_ARRAY) { 3191 int offset = field->offset; 3192 3193 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 3194 offset = format_field__intval(field, sample, evsel->needs_swap); 3195 syscall_arg.len = offset >> 16; 3196 offset &= 0xffff; 3197 if (tep_field_is_relative(field->flags)) 3198 offset += field->offset + field->size; 3199 } 3200 3201 val = (uintptr_t)(sample->raw_data + offset); 3202 } else 3203 val = format_field__intval(field, sample, evsel->needs_swap); 3204 /* 3205 * Some syscall args need some mask, most don't and 3206 * return val untouched. 3207 */ 3208 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val); 3209 3210 /* Suppress this argument if its value is zero and show_zero property isn't set. */ 3211 if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE) 3212 continue; 3213 3214 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 3215 3216 if (trace->show_arg_names) 3217 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 3218 3219 btf_printed = trace__btf_scnprintf(trace, &syscall_arg, bf + printed, size - printed, val, field->type); 3220 if (btf_printed) { 3221 printed += btf_printed; 3222 continue; 3223 } 3224 3225 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val); 3226 } 3227 3228 return fprintf(trace->output, "%.*s", (int)printed, bf); 3229 } 3230 3231 static int trace__event_handler(struct trace *trace, struct evsel *evsel, 3232 union perf_event *event __maybe_unused, 3233 struct perf_sample *sample) 3234 { 3235 struct thread *thread; 3236 int callchain_ret = 0; 3237 3238 if (evsel->nr_events_printed >= evsel->max_events) 3239 return 0; 3240 3241 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3242 3243 if (sample->callchain) { 3244 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 3245 3246 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 3247 if (callchain_ret == 0) { 3248 if (cursor->nr < trace->min_stack) 3249 goto out; 3250 callchain_ret = 1; 3251 } 3252 } 3253 3254 trace__printf_interrupted_entry(trace); 3255 trace__fprintf_tstamp(trace, sample->time, trace->output); 3256 3257 if (trace->trace_syscalls && trace->show_duration) 3258 fprintf(trace->output, "( ): "); 3259 3260 if (thread) 3261 trace__fprintf_comm_tid(trace, thread, trace->output); 3262 3263 if (evsel == trace->syscalls.events.bpf_output) { 3264 int id = perf_evsel__sc_tp_uint(evsel, id, sample); 3265 int e_machine = thread ? thread__e_machine(thread, trace->host) : EM_HOST; 3266 struct syscall *sc = trace__syscall_info(trace, evsel, e_machine, id); 3267 3268 if (sc) { 3269 fprintf(trace->output, "%s(", sc->name); 3270 trace__fprintf_sys_enter(trace, evsel, sample); 3271 fputc(')', trace->output); 3272 goto newline; 3273 } 3274 3275 /* 3276 * XXX: Not having the associated syscall info or not finding/adding 3277 * the thread should never happen, but if it does... 3278 * fall thru and print it as a bpf_output event. 3279 */ 3280 } 3281 3282 fprintf(trace->output, "%s(", evsel->name); 3283 3284 if (evsel__is_bpf_output(evsel)) { 3285 bpf_output__fprintf(trace, sample); 3286 } else { 3287 const struct tep_event *tp_format = evsel__tp_format(evsel); 3288 3289 if (tp_format && (strncmp(tp_format->name, "sys_enter_", 10) || 3290 trace__fprintf_sys_enter(trace, evsel, sample))) { 3291 if (trace->libtraceevent_print) { 3292 event_format__fprintf(tp_format, sample->cpu, 3293 sample->raw_data, sample->raw_size, 3294 trace->output); 3295 } else { 3296 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0); 3297 } 3298 } 3299 } 3300 3301 newline: 3302 fprintf(trace->output, ")\n"); 3303 3304 if (callchain_ret > 0) 3305 trace__fprintf_callchain(trace, sample); 3306 else if (callchain_ret < 0) 3307 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3308 3309 ++trace->nr_events_printed; 3310 3311 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { 3312 evsel__disable(evsel); 3313 evsel__close(evsel); 3314 } 3315 out: 3316 thread__put(thread); 3317 return 0; 3318 } 3319 3320 static void print_location(FILE *f, struct perf_sample *sample, 3321 struct addr_location *al, 3322 bool print_dso, bool print_sym) 3323 { 3324 3325 if ((verbose > 0 || print_dso) && al->map) 3326 fprintf(f, "%s@", dso__long_name(map__dso(al->map))); 3327 3328 if ((verbose > 0 || print_sym) && al->sym) 3329 fprintf(f, "%s+0x%" PRIx64, al->sym->name, 3330 al->addr - al->sym->start); 3331 else if (al->map) 3332 fprintf(f, "0x%" PRIx64, al->addr); 3333 else 3334 fprintf(f, "0x%" PRIx64, sample->addr); 3335 } 3336 3337 static int trace__pgfault(struct trace *trace, 3338 struct evsel *evsel, 3339 union perf_event *event __maybe_unused, 3340 struct perf_sample *sample) 3341 { 3342 struct thread *thread; 3343 struct addr_location al; 3344 char map_type = 'd'; 3345 struct thread_trace *ttrace; 3346 int err = -1; 3347 int callchain_ret = 0; 3348 3349 addr_location__init(&al); 3350 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3351 3352 if (sample->callchain) { 3353 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 3354 3355 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 3356 if (callchain_ret == 0) { 3357 if (cursor->nr < trace->min_stack) 3358 goto out_put; 3359 callchain_ret = 1; 3360 } 3361 } 3362 3363 ttrace = thread__trace(thread, trace); 3364 if (ttrace == NULL) 3365 goto out_put; 3366 3367 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ) { 3368 ttrace->pfmaj++; 3369 trace->pfmaj++; 3370 } else { 3371 ttrace->pfmin++; 3372 trace->pfmin++; 3373 } 3374 3375 if (trace->summary_only) 3376 goto out; 3377 3378 thread__find_symbol(thread, sample->cpumode, sample->ip, &al); 3379 3380 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); 3381 3382 fprintf(trace->output, "%sfault [", 3383 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ? 3384 "maj" : "min"); 3385 3386 print_location(trace->output, sample, &al, false, true); 3387 3388 fprintf(trace->output, "] => "); 3389 3390 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 3391 3392 if (!al.map) { 3393 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 3394 3395 if (al.map) 3396 map_type = 'x'; 3397 else 3398 map_type = '?'; 3399 } 3400 3401 print_location(trace->output, sample, &al, true, false); 3402 3403 fprintf(trace->output, " (%c%c)\n", map_type, al.level); 3404 3405 if (callchain_ret > 0) 3406 trace__fprintf_callchain(trace, sample); 3407 else if (callchain_ret < 0) 3408 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3409 3410 ++trace->nr_events_printed; 3411 out: 3412 err = 0; 3413 out_put: 3414 thread__put(thread); 3415 addr_location__exit(&al); 3416 return err; 3417 } 3418 3419 static void trace__set_base_time(struct trace *trace, 3420 struct evsel *evsel, 3421 struct perf_sample *sample) 3422 { 3423 /* 3424 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust 3425 * and don't use sample->time unconditionally, we may end up having 3426 * some other event in the future without PERF_SAMPLE_TIME for good 3427 * reason, i.e. we may not be interested in its timestamps, just in 3428 * it taking place, picking some piece of information when it 3429 * appears in our event stream (vfs_getname comes to mind). 3430 */ 3431 if (trace->base_time == 0 && !trace->full_time && 3432 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 3433 trace->base_time = sample->time; 3434 } 3435 3436 static int trace__process_sample(const struct perf_tool *tool, 3437 union perf_event *event, 3438 struct perf_sample *sample, 3439 struct evsel *evsel, 3440 struct machine *machine __maybe_unused) 3441 { 3442 struct trace *trace = container_of(tool, struct trace, tool); 3443 struct thread *thread; 3444 int err = 0; 3445 3446 tracepoint_handler handler = evsel->handler; 3447 3448 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3449 if (thread && thread__is_filtered(thread)) 3450 goto out; 3451 3452 trace__set_base_time(trace, evsel, sample); 3453 3454 if (handler) { 3455 ++trace->nr_events; 3456 handler(trace, evsel, event, sample); 3457 } 3458 out: 3459 thread__put(thread); 3460 return err; 3461 } 3462 3463 static int trace__record(struct trace *trace, int argc, const char **argv) 3464 { 3465 unsigned int rec_argc, i, j; 3466 const char **rec_argv; 3467 const char * const record_args[] = { 3468 "record", 3469 "-R", 3470 "-m", "1024", 3471 "-c", "1", 3472 }; 3473 pid_t pid = getpid(); 3474 char *filter = asprintf__tp_filter_pids(1, &pid); 3475 const char * const sc_args[] = { "-e", }; 3476 unsigned int sc_args_nr = ARRAY_SIZE(sc_args); 3477 const char * const majpf_args[] = { "-e", "major-faults" }; 3478 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args); 3479 const char * const minpf_args[] = { "-e", "minor-faults" }; 3480 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args); 3481 int err = -1; 3482 3483 /* +3 is for the event string below and the pid filter */ 3484 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 + 3485 majpf_args_nr + minpf_args_nr + argc; 3486 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3487 3488 if (rec_argv == NULL || filter == NULL) 3489 goto out_free; 3490 3491 j = 0; 3492 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3493 rec_argv[j++] = record_args[i]; 3494 3495 if (trace->trace_syscalls) { 3496 for (i = 0; i < sc_args_nr; i++) 3497 rec_argv[j++] = sc_args[i]; 3498 3499 /* event string may be different for older kernels - e.g., RHEL6 */ 3500 if (is_valid_tracepoint("raw_syscalls:sys_enter")) 3501 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit"; 3502 else if (is_valid_tracepoint("syscalls:sys_enter")) 3503 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit"; 3504 else { 3505 pr_err("Neither raw_syscalls nor syscalls events exist.\n"); 3506 goto out_free; 3507 } 3508 } 3509 3510 rec_argv[j++] = "--filter"; 3511 rec_argv[j++] = filter; 3512 3513 if (trace->trace_pgfaults & TRACE_PFMAJ) 3514 for (i = 0; i < majpf_args_nr; i++) 3515 rec_argv[j++] = majpf_args[i]; 3516 3517 if (trace->trace_pgfaults & TRACE_PFMIN) 3518 for (i = 0; i < minpf_args_nr; i++) 3519 rec_argv[j++] = minpf_args[i]; 3520 3521 for (i = 0; i < (unsigned int)argc; i++) 3522 rec_argv[j++] = argv[i]; 3523 3524 err = cmd_record(j, rec_argv); 3525 out_free: 3526 free(filter); 3527 free(rec_argv); 3528 return err; 3529 } 3530 3531 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); 3532 static size_t trace__fprintf_total_summary(struct trace *trace, FILE *fp); 3533 3534 static bool evlist__add_vfs_getname(struct evlist *evlist) 3535 { 3536 bool found = false; 3537 struct evsel *evsel, *tmp; 3538 struct parse_events_error err; 3539 int ret; 3540 3541 parse_events_error__init(&err); 3542 ret = parse_events(evlist, "probe:vfs_getname*", &err); 3543 parse_events_error__exit(&err); 3544 if (ret) 3545 return false; 3546 3547 evlist__for_each_entry_safe(evlist, evsel, tmp) { 3548 if (!strstarts(evsel__name(evsel), "probe:vfs_getname")) 3549 continue; 3550 3551 if (evsel__field(evsel, "pathname")) { 3552 evsel->handler = trace__vfs_getname; 3553 found = true; 3554 continue; 3555 } 3556 3557 list_del_init(&evsel->core.node); 3558 evsel->evlist = NULL; 3559 evsel__delete(evsel); 3560 } 3561 3562 return found; 3563 } 3564 3565 static struct evsel *evsel__new_pgfault(u64 config) 3566 { 3567 struct evsel *evsel; 3568 struct perf_event_attr attr = { 3569 .type = PERF_TYPE_SOFTWARE, 3570 .mmap_data = 1, 3571 }; 3572 3573 attr.config = config; 3574 attr.sample_period = 1; 3575 3576 event_attr_init(&attr); 3577 3578 evsel = evsel__new(&attr); 3579 if (evsel) 3580 evsel->handler = trace__pgfault; 3581 3582 return evsel; 3583 } 3584 3585 static void evlist__free_syscall_tp_fields(struct evlist *evlist) 3586 { 3587 struct evsel *evsel; 3588 3589 evlist__for_each_entry(evlist, evsel) { 3590 evsel_trace__delete(evsel->priv); 3591 evsel->priv = NULL; 3592 } 3593 } 3594 3595 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) 3596 { 3597 const u32 type = event->header.type; 3598 struct evsel *evsel; 3599 3600 if (type != PERF_RECORD_SAMPLE) { 3601 trace__process_event(trace, trace->host, event, sample); 3602 return; 3603 } 3604 3605 evsel = evlist__id2evsel(trace->evlist, sample->id); 3606 if (evsel == NULL) { 3607 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); 3608 return; 3609 } 3610 3611 if (evswitch__discard(&trace->evswitch, evsel)) 3612 return; 3613 3614 trace__set_base_time(trace, evsel, sample); 3615 3616 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 3617 sample->raw_data == NULL) { 3618 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", 3619 evsel__name(evsel), sample->tid, 3620 sample->cpu, sample->raw_size); 3621 } else { 3622 tracepoint_handler handler = evsel->handler; 3623 handler(trace, evsel, event, sample); 3624 } 3625 3626 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) 3627 interrupted = true; 3628 } 3629 3630 static int trace__add_syscall_newtp(struct trace *trace) 3631 { 3632 int ret = -1; 3633 struct evlist *evlist = trace->evlist; 3634 struct evsel *sys_enter, *sys_exit; 3635 3636 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter); 3637 if (sys_enter == NULL) 3638 goto out; 3639 3640 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args)) 3641 goto out_delete_sys_enter; 3642 3643 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit); 3644 if (sys_exit == NULL) 3645 goto out_delete_sys_enter; 3646 3647 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret)) 3648 goto out_delete_sys_exit; 3649 3650 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param); 3651 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param); 3652 3653 evlist__add(evlist, sys_enter); 3654 evlist__add(evlist, sys_exit); 3655 3656 if (callchain_param.enabled && !trace->kernel_syscallchains) { 3657 /* 3658 * We're interested only in the user space callchain 3659 * leading to the syscall, allow overriding that for 3660 * debugging reasons using --kernel_syscall_callchains 3661 */ 3662 sys_exit->core.attr.exclude_callchain_kernel = 1; 3663 } 3664 3665 trace->syscalls.events.sys_enter = sys_enter; 3666 trace->syscalls.events.sys_exit = sys_exit; 3667 3668 ret = 0; 3669 out: 3670 return ret; 3671 3672 out_delete_sys_exit: 3673 evsel__delete_priv(sys_exit); 3674 out_delete_sys_enter: 3675 evsel__delete_priv(sys_enter); 3676 goto out; 3677 } 3678 3679 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) 3680 { 3681 int err = -1; 3682 struct evsel *sys_exit; 3683 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier, 3684 trace->ev_qualifier_ids.nr, 3685 trace->ev_qualifier_ids.entries); 3686 3687 if (filter == NULL) 3688 goto out_enomem; 3689 3690 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) { 3691 sys_exit = trace->syscalls.events.sys_exit; 3692 err = evsel__append_tp_filter(sys_exit, filter); 3693 } 3694 3695 free(filter); 3696 out: 3697 return err; 3698 out_enomem: 3699 errno = ENOMEM; 3700 goto out; 3701 } 3702 3703 #ifdef HAVE_BPF_SKEL 3704 static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type) 3705 { 3706 int id; 3707 3708 if (arg_fmt->type != NULL) 3709 return -1; 3710 3711 id = btf__find_by_name(btf, type); 3712 if (id < 0) 3713 return -1; 3714 3715 arg_fmt->type = btf__type_by_id(btf, id); 3716 arg_fmt->type_id = id; 3717 3718 return 0; 3719 } 3720 3721 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) 3722 { 3723 struct bpf_program *pos, *prog = NULL; 3724 const char *sec_name; 3725 3726 if (trace->skel->obj == NULL) 3727 return NULL; 3728 3729 bpf_object__for_each_program(pos, trace->skel->obj) { 3730 sec_name = bpf_program__section_name(pos); 3731 if (sec_name && !strcmp(sec_name, name)) { 3732 prog = pos; 3733 break; 3734 } 3735 } 3736 3737 return prog; 3738 } 3739 3740 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, 3741 const char *prog_name, const char *type) 3742 { 3743 struct bpf_program *prog; 3744 3745 if (prog_name == NULL) { 3746 char default_prog_name[256]; 3747 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name); 3748 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3749 if (prog != NULL) 3750 goto out_found; 3751 if (sc->fmt && sc->fmt->alias) { 3752 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias); 3753 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3754 if (prog != NULL) 3755 goto out_found; 3756 } 3757 goto out_unaugmented; 3758 } 3759 3760 prog = trace__find_bpf_program_by_title(trace, prog_name); 3761 3762 if (prog != NULL) { 3763 out_found: 3764 return prog; 3765 } 3766 3767 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n", 3768 prog_name, type, sc->name); 3769 out_unaugmented: 3770 return trace->skel->progs.syscall_unaugmented; 3771 } 3772 3773 static void trace__init_syscall_bpf_progs(struct trace *trace, int e_machine, int id) 3774 { 3775 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id); 3776 3777 if (sc == NULL) 3778 return; 3779 3780 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3781 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit"); 3782 } 3783 3784 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int e_machine, int id) 3785 { 3786 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id); 3787 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3788 } 3789 3790 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int e_machine, int id) 3791 { 3792 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id); 3793 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3794 } 3795 3796 static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int e_machine, int key, unsigned int *beauty_array) 3797 { 3798 struct tep_format_field *field; 3799 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key); 3800 const struct btf_type *bt; 3801 char *struct_offset, *tmp, name[32]; 3802 bool can_augment = false; 3803 int i, cnt; 3804 3805 if (sc == NULL) 3806 return -1; 3807 3808 trace__load_vmlinux_btf(trace); 3809 if (trace->btf == NULL) 3810 return -1; 3811 3812 for (i = 0, field = sc->args; field; ++i, field = field->next) { 3813 // XXX We're only collecting pointer payloads _from_ user space 3814 if (!sc->arg_fmt[i].from_user) 3815 continue; 3816 3817 struct_offset = strstr(field->type, "struct "); 3818 if (struct_offset == NULL) 3819 struct_offset = strstr(field->type, "union "); 3820 else 3821 struct_offset++; // "union" is shorter 3822 3823 if (field->flags & TEP_FIELD_IS_POINTER && struct_offset) { /* struct or union (think BPF's attr arg) */ 3824 struct_offset += 6; 3825 3826 /* for 'struct foo *', we only want 'foo' */ 3827 for (tmp = struct_offset, cnt = 0; *tmp != ' ' && *tmp != '\0'; ++tmp, ++cnt) { 3828 } 3829 3830 strncpy(name, struct_offset, cnt); 3831 name[cnt] = '\0'; 3832 3833 /* cache struct's btf_type and type_id */ 3834 if (syscall_arg_fmt__cache_btf_struct(&sc->arg_fmt[i], trace->btf, name)) 3835 continue; 3836 3837 bt = sc->arg_fmt[i].type; 3838 beauty_array[i] = bt->size; 3839 can_augment = true; 3840 } else if (field->flags & TEP_FIELD_IS_POINTER && /* string */ 3841 strcmp(field->type, "const char *") == 0 && 3842 (strstr(field->name, "name") || 3843 strstr(field->name, "path") || 3844 strstr(field->name, "file") || 3845 strstr(field->name, "root") || 3846 strstr(field->name, "key") || 3847 strstr(field->name, "special") || 3848 strstr(field->name, "type") || 3849 strstr(field->name, "description"))) { 3850 beauty_array[i] = 1; 3851 can_augment = true; 3852 } else if (field->flags & TEP_FIELD_IS_POINTER && /* buffer */ 3853 strstr(field->type, "char *") && 3854 (strstr(field->name, "buf") || 3855 strstr(field->name, "val") || 3856 strstr(field->name, "msg"))) { 3857 int j; 3858 struct tep_format_field *field_tmp; 3859 3860 /* find the size of the buffer that appears in pairs with buf */ 3861 for (j = 0, field_tmp = sc->args; field_tmp; ++j, field_tmp = field_tmp->next) { 3862 if (!(field_tmp->flags & TEP_FIELD_IS_POINTER) && /* only integers */ 3863 (strstr(field_tmp->name, "count") || 3864 strstr(field_tmp->name, "siz") || /* size, bufsiz */ 3865 (strstr(field_tmp->name, "len") && strcmp(field_tmp->name, "filename")))) { 3866 /* filename's got 'len' in it, we don't want that */ 3867 beauty_array[i] = -(j + 1); 3868 can_augment = true; 3869 break; 3870 } 3871 } 3872 } 3873 } 3874 3875 if (can_augment) 3876 return 0; 3877 3878 return -1; 3879 } 3880 3881 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, 3882 struct syscall *sc) 3883 { 3884 struct tep_format_field *field, *candidate_field; 3885 /* 3886 * We're only interested in syscalls that have a pointer: 3887 */ 3888 for (field = sc->args; field; field = field->next) { 3889 if (field->flags & TEP_FIELD_IS_POINTER) 3890 goto try_to_find_pair; 3891 } 3892 3893 return NULL; 3894 3895 try_to_find_pair: 3896 for (int i = 0, num_idx = syscalltbl__num_idx(sc->e_machine); i < num_idx; ++i) { 3897 int id = syscalltbl__id_at_idx(sc->e_machine, i); 3898 struct syscall *pair = trace__syscall_info(trace, NULL, sc->e_machine, id); 3899 struct bpf_program *pair_prog; 3900 bool is_candidate = false; 3901 3902 if (pair == NULL || pair->id == sc->id || 3903 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented) 3904 continue; 3905 3906 for (field = sc->args, candidate_field = pair->args; 3907 field && candidate_field; field = field->next, candidate_field = candidate_field->next) { 3908 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER, 3909 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER; 3910 3911 if (is_pointer) { 3912 if (!candidate_is_pointer) { 3913 // The candidate just doesn't copies our pointer arg, might copy other pointers we want. 3914 continue; 3915 } 3916 } else { 3917 if (candidate_is_pointer) { 3918 // The candidate might copy a pointer we don't have, skip it. 3919 goto next_candidate; 3920 } 3921 continue; 3922 } 3923 3924 if (strcmp(field->type, candidate_field->type)) 3925 goto next_candidate; 3926 3927 /* 3928 * This is limited in the BPF program but sys_write 3929 * uses "const char *" for its "buf" arg so we need to 3930 * use some heuristic that is kinda future proof... 3931 */ 3932 if (strcmp(field->type, "const char *") == 0 && 3933 !(strstr(field->name, "name") || 3934 strstr(field->name, "path") || 3935 strstr(field->name, "file") || 3936 strstr(field->name, "root") || 3937 strstr(field->name, "description"))) 3938 goto next_candidate; 3939 3940 is_candidate = true; 3941 } 3942 3943 if (!is_candidate) 3944 goto next_candidate; 3945 3946 /* 3947 * Check if the tentative pair syscall augmenter has more pointers, if it has, 3948 * then it may be collecting that and we then can't use it, as it would collect 3949 * more than what is common to the two syscalls. 3950 */ 3951 if (candidate_field) { 3952 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next) 3953 if (candidate_field->flags & TEP_FIELD_IS_POINTER) 3954 goto next_candidate; 3955 } 3956 3957 pair_prog = pair->bpf_prog.sys_enter; 3958 /* 3959 * If the pair isn't enabled, then its bpf_prog.sys_enter will not 3960 * have been searched for, so search it here and if it returns the 3961 * unaugmented one, then ignore it, otherwise we'll reuse that BPF 3962 * program for a filtered syscall on a non-filtered one. 3963 * 3964 * For instance, we have "!syscalls:sys_enter_renameat" and that is 3965 * useful for "renameat2". 3966 */ 3967 if (pair_prog == NULL) { 3968 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3969 if (pair_prog == trace->skel->progs.syscall_unaugmented) 3970 goto next_candidate; 3971 } 3972 3973 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, 3974 sc->name); 3975 return pair_prog; 3976 next_candidate: 3977 continue; 3978 } 3979 3980 return NULL; 3981 } 3982 3983 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_machine) 3984 { 3985 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter); 3986 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit); 3987 int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter); 3988 int err = 0; 3989 unsigned int beauty_array[6]; 3990 3991 for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) { 3992 int prog_fd, key = syscalltbl__id_at_idx(e_machine, i); 3993 3994 if (!trace__syscall_enabled(trace, key)) 3995 continue; 3996 3997 trace__init_syscall_bpf_progs(trace, e_machine, key); 3998 3999 // It'll get at least the "!raw_syscalls:unaugmented" 4000 prog_fd = trace__bpf_prog_sys_enter_fd(trace, e_machine, key); 4001 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 4002 if (err) 4003 break; 4004 prog_fd = trace__bpf_prog_sys_exit_fd(trace, e_machine, key); 4005 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY); 4006 if (err) 4007 break; 4008 4009 /* use beauty_map to tell BPF how many bytes to collect, set beauty_map's value here */ 4010 memset(beauty_array, 0, sizeof(beauty_array)); 4011 err = trace__bpf_sys_enter_beauty_map(trace, e_machine, key, (unsigned int *)beauty_array); 4012 if (err) 4013 continue; 4014 err = bpf_map_update_elem(beauty_map_fd, &key, beauty_array, BPF_ANY); 4015 if (err) 4016 break; 4017 } 4018 4019 /* 4020 * Now lets do a second pass looking for enabled syscalls without 4021 * an augmenter that have a signature that is a superset of another 4022 * syscall with an augmenter so that we can auto-reuse it. 4023 * 4024 * I.e. if we have an augmenter for the "open" syscall that has 4025 * this signature: 4026 * 4027 * int open(const char *pathname, int flags, mode_t mode); 4028 * 4029 * I.e. that will collect just the first string argument, then we 4030 * can reuse it for the 'creat' syscall, that has this signature: 4031 * 4032 * int creat(const char *pathname, mode_t mode); 4033 * 4034 * and for: 4035 * 4036 * int stat(const char *pathname, struct stat *statbuf); 4037 * int lstat(const char *pathname, struct stat *statbuf); 4038 * 4039 * Because the 'open' augmenter will collect the first arg as a string, 4040 * and leave alone all the other args, which already helps with 4041 * beautifying 'stat' and 'lstat''s pathname arg. 4042 * 4043 * Then, in time, when 'stat' gets an augmenter that collects both 4044 * first and second arg (this one on the raw_syscalls:sys_exit prog 4045 * array tail call, then that one will be used. 4046 */ 4047 for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) { 4048 int key = syscalltbl__id_at_idx(e_machine, i); 4049 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key); 4050 struct bpf_program *pair_prog; 4051 int prog_fd; 4052 4053 if (sc == NULL || sc->bpf_prog.sys_enter == NULL) 4054 continue; 4055 4056 /* 4057 * For now we're just reusing the sys_enter prog, and if it 4058 * already has an augmenter, we don't need to find one. 4059 */ 4060 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented) 4061 continue; 4062 4063 /* 4064 * Look at all the other syscalls for one that has a signature 4065 * that is close enough that we can share: 4066 */ 4067 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc); 4068 if (pair_prog == NULL) 4069 continue; 4070 4071 sc->bpf_prog.sys_enter = pair_prog; 4072 4073 /* 4074 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter 4075 * with the fd for the program we're reusing: 4076 */ 4077 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter); 4078 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 4079 if (err) 4080 break; 4081 } 4082 4083 return err; 4084 } 4085 #endif // HAVE_BPF_SKEL 4086 4087 static int trace__set_ev_qualifier_filter(struct trace *trace) 4088 { 4089 if (trace->syscalls.events.sys_enter) 4090 return trace__set_ev_qualifier_tp_filter(trace); 4091 return 0; 4092 } 4093 4094 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, 4095 size_t npids __maybe_unused, pid_t *pids __maybe_unused) 4096 { 4097 int err = 0; 4098 #ifdef HAVE_LIBBPF_SUPPORT 4099 bool value = true; 4100 int map_fd = bpf_map__fd(map); 4101 size_t i; 4102 4103 for (i = 0; i < npids; ++i) { 4104 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY); 4105 if (err) 4106 break; 4107 } 4108 #endif 4109 return err; 4110 } 4111 4112 static int trace__set_filter_loop_pids(struct trace *trace) 4113 { 4114 unsigned int nr = 1, err; 4115 pid_t pids[32] = { 4116 getpid(), 4117 }; 4118 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]); 4119 4120 while (thread && nr < ARRAY_SIZE(pids)) { 4121 struct thread *parent = machine__find_thread(trace->host, 4122 thread__ppid(thread), 4123 thread__ppid(thread)); 4124 4125 if (parent == NULL) 4126 break; 4127 4128 if (!strcmp(thread__comm_str(parent), "sshd") || 4129 strstarts(thread__comm_str(parent), "gnome-terminal")) { 4130 pids[nr++] = thread__tid(parent); 4131 thread__put(parent); 4132 break; 4133 } 4134 thread__put(thread); 4135 thread = parent; 4136 } 4137 thread__put(thread); 4138 4139 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids); 4140 if (!err && trace->filter_pids.map) 4141 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); 4142 4143 return err; 4144 } 4145 4146 static int trace__set_filter_pids(struct trace *trace) 4147 { 4148 int err = 0; 4149 /* 4150 * Better not use !target__has_task() here because we need to cover the 4151 * case where no threads were specified in the command line, but a 4152 * workload was, and in that case we will fill in the thread_map when 4153 * we fork the workload in evlist__prepare_workload. 4154 */ 4155 if (trace->filter_pids.nr > 0) { 4156 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr, 4157 trace->filter_pids.entries); 4158 if (!err && trace->filter_pids.map) { 4159 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, 4160 trace->filter_pids.entries); 4161 } 4162 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { 4163 err = trace__set_filter_loop_pids(trace); 4164 } 4165 4166 return err; 4167 } 4168 4169 static int __trace__deliver_event(struct trace *trace, union perf_event *event) 4170 { 4171 struct evlist *evlist = trace->evlist; 4172 struct perf_sample sample; 4173 int err; 4174 4175 perf_sample__init(&sample, /*all=*/false); 4176 err = evlist__parse_sample(evlist, event, &sample); 4177 if (err) 4178 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); 4179 else 4180 trace__handle_event(trace, event, &sample); 4181 4182 perf_sample__exit(&sample); 4183 return 0; 4184 } 4185 4186 static int __trace__flush_events(struct trace *trace) 4187 { 4188 u64 first = ordered_events__first_time(&trace->oe.data); 4189 u64 flush = trace->oe.last - NSEC_PER_SEC; 4190 4191 /* Is there some thing to flush.. */ 4192 if (first && first < flush) 4193 return ordered_events__flush_time(&trace->oe.data, flush); 4194 4195 return 0; 4196 } 4197 4198 static int trace__flush_events(struct trace *trace) 4199 { 4200 return !trace->sort_events ? 0 : __trace__flush_events(trace); 4201 } 4202 4203 static int trace__deliver_event(struct trace *trace, union perf_event *event) 4204 { 4205 int err; 4206 4207 if (!trace->sort_events) 4208 return __trace__deliver_event(trace, event); 4209 4210 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); 4211 if (err && err != -1) 4212 return err; 4213 4214 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL); 4215 if (err) 4216 return err; 4217 4218 return trace__flush_events(trace); 4219 } 4220 4221 static int ordered_events__deliver_event(struct ordered_events *oe, 4222 struct ordered_event *event) 4223 { 4224 struct trace *trace = container_of(oe, struct trace, oe.data); 4225 4226 return __trace__deliver_event(trace, event->event); 4227 } 4228 4229 static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg, 4230 char **type) 4231 { 4232 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel); 4233 const struct tep_event *tp_format; 4234 4235 if (!fmt) 4236 return NULL; 4237 4238 tp_format = evsel__tp_format(evsel); 4239 if (!tp_format) 4240 return NULL; 4241 4242 for (const struct tep_format_field *field = tp_format->format.fields; field; 4243 field = field->next, ++fmt) { 4244 if (strcmp(field->name, arg) == 0) { 4245 *type = field->type; 4246 return fmt; 4247 } 4248 } 4249 4250 return NULL; 4251 } 4252 4253 static int trace__expand_filter(struct trace *trace, struct evsel *evsel) 4254 { 4255 char *tok, *left = evsel->filter, *new_filter = evsel->filter; 4256 4257 while ((tok = strpbrk(left, "=<>!")) != NULL) { 4258 char *right = tok + 1, *right_end; 4259 4260 if (*right == '=') 4261 ++right; 4262 4263 while (isspace(*right)) 4264 ++right; 4265 4266 if (*right == '\0') 4267 break; 4268 4269 while (!isalpha(*left)) 4270 if (++left == tok) { 4271 /* 4272 * Bail out, can't find the name of the argument that is being 4273 * used in the filter, let it try to set this filter, will fail later. 4274 */ 4275 return 0; 4276 } 4277 4278 right_end = right + 1; 4279 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|') 4280 ++right_end; 4281 4282 if (isalpha(*right)) { 4283 struct syscall_arg_fmt *fmt; 4284 int left_size = tok - left, 4285 right_size = right_end - right; 4286 char arg[128], *type; 4287 4288 while (isspace(left[left_size - 1])) 4289 --left_size; 4290 4291 scnprintf(arg, sizeof(arg), "%.*s", left_size, left); 4292 4293 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg, &type); 4294 if (fmt == NULL) { 4295 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n", 4296 arg, evsel->name, evsel->filter); 4297 return -1; 4298 } 4299 4300 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ", 4301 arg, (int)(right - tok), tok, right_size, right); 4302 4303 if (fmt->strtoul) { 4304 u64 val; 4305 struct syscall_arg syscall_arg = { 4306 .trace = trace, 4307 .fmt = fmt, 4308 .type_name = type, 4309 .parm = fmt->parm, 4310 }; 4311 4312 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) { 4313 char *n, expansion[19]; 4314 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val); 4315 int expansion_offset = right - new_filter; 4316 4317 pr_debug("%s", expansion); 4318 4319 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) { 4320 pr_debug(" out of memory!\n"); 4321 free(new_filter); 4322 return -1; 4323 } 4324 if (new_filter != evsel->filter) 4325 free(new_filter); 4326 left = n + expansion_offset + expansion_lenght; 4327 new_filter = n; 4328 } else { 4329 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n", 4330 right_size, right, arg, evsel->name, evsel->filter); 4331 return -1; 4332 } 4333 } else { 4334 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n", 4335 arg, evsel->name, evsel->filter); 4336 return -1; 4337 } 4338 4339 pr_debug("\n"); 4340 } else { 4341 left = right_end; 4342 } 4343 } 4344 4345 if (new_filter != evsel->filter) { 4346 pr_debug("New filter for %s: %s\n", evsel->name, new_filter); 4347 evsel__set_filter(evsel, new_filter); 4348 free(new_filter); 4349 } 4350 4351 return 0; 4352 } 4353 4354 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) 4355 { 4356 struct evlist *evlist = trace->evlist; 4357 struct evsel *evsel; 4358 4359 evlist__for_each_entry(evlist, evsel) { 4360 if (evsel->filter == NULL) 4361 continue; 4362 4363 if (trace__expand_filter(trace, evsel)) { 4364 *err_evsel = evsel; 4365 return -1; 4366 } 4367 } 4368 4369 return 0; 4370 } 4371 4372 static int trace__run(struct trace *trace, int argc, const char **argv) 4373 { 4374 struct evlist *evlist = trace->evlist; 4375 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL; 4376 int err = -1, i; 4377 unsigned long before; 4378 const bool forks = argc > 0; 4379 bool draining = false; 4380 4381 trace->live = true; 4382 4383 if (trace->summary_bpf) { 4384 if (trace_prepare_bpf_summary(trace->summary_mode) < 0) 4385 goto out_delete_evlist; 4386 4387 if (trace->summary_only) 4388 goto create_maps; 4389 } 4390 4391 if (!trace->raw_augmented_syscalls) { 4392 if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) 4393 goto out_error_raw_syscalls; 4394 4395 if (trace->trace_syscalls) 4396 trace->vfs_getname = evlist__add_vfs_getname(evlist); 4397 } 4398 4399 if ((trace->trace_pgfaults & TRACE_PFMAJ)) { 4400 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ); 4401 if (pgfault_maj == NULL) 4402 goto out_error_mem; 4403 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param); 4404 evlist__add(evlist, pgfault_maj); 4405 } 4406 4407 if ((trace->trace_pgfaults & TRACE_PFMIN)) { 4408 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN); 4409 if (pgfault_min == NULL) 4410 goto out_error_mem; 4411 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param); 4412 evlist__add(evlist, pgfault_min); 4413 } 4414 4415 /* Enable ignoring missing threads when -u/-p option is defined. */ 4416 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid; 4417 4418 if (trace->sched && 4419 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime)) 4420 goto out_error_sched_stat_runtime; 4421 /* 4422 * If a global cgroup was set, apply it to all the events without an 4423 * explicit cgroup. I.e.: 4424 * 4425 * trace -G A -e sched:*switch 4426 * 4427 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc 4428 * _and_ sched:sched_switch to the 'A' cgroup, while: 4429 * 4430 * trace -e sched:*switch -G A 4431 * 4432 * will only set the sched:sched_switch event to the 'A' cgroup, all the 4433 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without" 4434 * a cgroup (on the root cgroup, sys wide, etc). 4435 * 4436 * Multiple cgroups: 4437 * 4438 * trace -G A -e sched:*switch -G B 4439 * 4440 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes 4441 * to the 'B' cgroup. 4442 * 4443 * evlist__set_default_cgroup() grabs a reference of the passed cgroup 4444 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL. 4445 */ 4446 if (trace->cgroup) 4447 evlist__set_default_cgroup(trace->evlist, trace->cgroup); 4448 4449 create_maps: 4450 err = evlist__create_maps(evlist, &trace->opts.target); 4451 if (err < 0) { 4452 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); 4453 goto out_delete_evlist; 4454 } 4455 4456 err = trace__symbols_init(trace, evlist); 4457 if (err < 0) { 4458 fprintf(trace->output, "Problems initializing symbol libraries!\n"); 4459 goto out_delete_evlist; 4460 } 4461 4462 if (trace->summary_mode == SUMMARY__BY_TOTAL && !trace->summary_bpf) { 4463 trace->syscall_stats = alloc_syscall_stats(); 4464 if (trace->syscall_stats == NULL) 4465 goto out_delete_evlist; 4466 } 4467 4468 evlist__config(evlist, &trace->opts, &callchain_param); 4469 4470 if (forks) { 4471 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL); 4472 if (err < 0) { 4473 fprintf(trace->output, "Couldn't run the workload!\n"); 4474 goto out_delete_evlist; 4475 } 4476 workload_pid = evlist->workload.pid; 4477 } 4478 4479 err = evlist__open(evlist); 4480 if (err < 0) 4481 goto out_error_open; 4482 #ifdef HAVE_BPF_SKEL 4483 if (trace->syscalls.events.bpf_output) { 4484 struct perf_cpu cpu; 4485 4486 /* 4487 * Set up the __augmented_syscalls__ BPF map to hold for each 4488 * CPU the bpf-output event's file descriptor. 4489 */ 4490 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) { 4491 int mycpu = cpu.cpu; 4492 4493 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__, 4494 &mycpu, sizeof(mycpu), 4495 xyarray__entry(trace->syscalls.events.bpf_output->core.fd, 4496 mycpu, 0), 4497 sizeof(__u32), BPF_ANY); 4498 } 4499 } 4500 4501 if (trace->skel) 4502 trace->filter_pids.map = trace->skel->maps.pids_filtered; 4503 #endif 4504 err = trace__set_filter_pids(trace); 4505 if (err < 0) 4506 goto out_error_mem; 4507 4508 #ifdef HAVE_BPF_SKEL 4509 if (trace->skel && trace->skel->progs.sys_enter) { 4510 /* 4511 * TODO: Initialize for all host binary machine types, not just 4512 * those matching the perf binary. 4513 */ 4514 trace__init_syscalls_bpf_prog_array_maps(trace, EM_HOST); 4515 } 4516 #endif 4517 4518 if (trace->ev_qualifier_ids.nr > 0) { 4519 err = trace__set_ev_qualifier_filter(trace); 4520 if (err < 0) 4521 goto out_errno; 4522 4523 if (trace->syscalls.events.sys_exit) { 4524 pr_debug("event qualifier tracepoint filter: %s\n", 4525 trace->syscalls.events.sys_exit->filter); 4526 } 4527 } 4528 4529 /* 4530 * If the "close" syscall is not traced, then we will not have the 4531 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the 4532 * fd->pathname table and were ending up showing the last value set by 4533 * syscalls opening a pathname and associating it with a descriptor or 4534 * reading it from /proc/pid/fd/ in cases where that doesn't make 4535 * sense. 4536 * 4537 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is 4538 * not in use. 4539 */ 4540 /* TODO: support for more than just perf binary machine type close. */ 4541 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(EM_HOST, "close")); 4542 4543 err = trace__expand_filters(trace, &evsel); 4544 if (err) 4545 goto out_delete_evlist; 4546 err = evlist__apply_filters(evlist, &evsel, &trace->opts.target); 4547 if (err < 0) 4548 goto out_error_apply_filters; 4549 4550 if (!trace->summary_only || !trace->summary_bpf) { 4551 err = evlist__mmap(evlist, trace->opts.mmap_pages); 4552 if (err < 0) 4553 goto out_error_mmap; 4554 } 4555 4556 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay) 4557 evlist__enable(evlist); 4558 4559 if (forks) 4560 evlist__start_workload(evlist); 4561 4562 if (trace->opts.target.initial_delay) { 4563 usleep(trace->opts.target.initial_delay * 1000); 4564 evlist__enable(evlist); 4565 } 4566 4567 if (trace->summary_bpf) 4568 trace_start_bpf_summary(); 4569 4570 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || 4571 perf_thread_map__nr(evlist->core.threads) > 1 || 4572 evlist__first(evlist)->core.attr.inherit; 4573 4574 /* 4575 * Now that we already used evsel->core.attr to ask the kernel to setup the 4576 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in 4577 * trace__resolve_callchain(), allowing per-event max-stack settings 4578 * to override an explicitly set --max-stack global setting. 4579 */ 4580 evlist__for_each_entry(evlist, evsel) { 4581 if (evsel__has_callchain(evsel) && 4582 evsel->core.attr.sample_max_stack == 0) 4583 evsel->core.attr.sample_max_stack = trace->max_stack; 4584 } 4585 again: 4586 before = trace->nr_events; 4587 4588 for (i = 0; i < evlist->core.nr_mmaps; i++) { 4589 union perf_event *event; 4590 struct mmap *md; 4591 4592 md = &evlist->mmap[i]; 4593 if (perf_mmap__read_init(&md->core) < 0) 4594 continue; 4595 4596 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 4597 ++trace->nr_events; 4598 4599 err = trace__deliver_event(trace, event); 4600 if (err) 4601 goto out_disable; 4602 4603 perf_mmap__consume(&md->core); 4604 4605 if (interrupted) 4606 goto out_disable; 4607 4608 if (done && !draining) { 4609 evlist__disable(evlist); 4610 draining = true; 4611 } 4612 } 4613 perf_mmap__read_done(&md->core); 4614 } 4615 4616 if (trace->nr_events == before) { 4617 int timeout = done ? 100 : -1; 4618 4619 if (!draining && evlist__poll(evlist, timeout) > 0) { 4620 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) 4621 draining = true; 4622 4623 goto again; 4624 } else { 4625 if (trace__flush_events(trace)) 4626 goto out_disable; 4627 } 4628 } else { 4629 goto again; 4630 } 4631 4632 out_disable: 4633 thread__zput(trace->current); 4634 4635 evlist__disable(evlist); 4636 4637 if (trace->summary_bpf) 4638 trace_end_bpf_summary(); 4639 4640 if (trace->sort_events) 4641 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL); 4642 4643 if (!err) { 4644 if (trace->summary) { 4645 if (trace->summary_bpf) 4646 trace_print_bpf_summary(trace->output); 4647 else if (trace->summary_mode == SUMMARY__BY_TOTAL) 4648 trace__fprintf_total_summary(trace, trace->output); 4649 else 4650 trace__fprintf_thread_summary(trace, trace->output); 4651 } 4652 4653 if (trace->show_tool_stats) { 4654 fprintf(trace->output, "Stats:\n " 4655 " vfs_getname : %" PRIu64 "\n" 4656 " proc_getname: %" PRIu64 "\n", 4657 trace->stats.vfs_getname, 4658 trace->stats.proc_getname); 4659 } 4660 } 4661 4662 out_delete_evlist: 4663 trace_cleanup_bpf_summary(); 4664 delete_syscall_stats(trace->syscall_stats); 4665 trace__symbols__exit(trace); 4666 evlist__free_syscall_tp_fields(evlist); 4667 evlist__delete(evlist); 4668 cgroup__put(trace->cgroup); 4669 trace->evlist = NULL; 4670 trace->live = false; 4671 return err; 4672 { 4673 char errbuf[BUFSIZ]; 4674 4675 out_error_sched_stat_runtime: 4676 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime"); 4677 goto out_error; 4678 4679 out_error_raw_syscalls: 4680 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)"); 4681 goto out_error; 4682 4683 out_error_mmap: 4684 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf)); 4685 goto out_error; 4686 4687 out_error_open: 4688 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); 4689 4690 out_error: 4691 fprintf(trace->output, "%s\n", errbuf); 4692 goto out_delete_evlist; 4693 4694 out_error_apply_filters: 4695 fprintf(trace->output, 4696 "Failed to set filter \"%s\" on event %s with %d (%s)\n", 4697 evsel->filter, evsel__name(evsel), errno, 4698 str_error_r(errno, errbuf, sizeof(errbuf))); 4699 goto out_delete_evlist; 4700 } 4701 out_error_mem: 4702 fprintf(trace->output, "Not enough memory to run!\n"); 4703 goto out_delete_evlist; 4704 4705 out_errno: 4706 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno)); 4707 goto out_delete_evlist; 4708 } 4709 4710 static int trace__replay(struct trace *trace) 4711 { 4712 const struct evsel_str_handler handlers[] = { 4713 { "probe:vfs_getname", trace__vfs_getname, }, 4714 }; 4715 struct perf_data data = { 4716 .path = input_name, 4717 .mode = PERF_DATA_MODE_READ, 4718 .force = trace->force, 4719 }; 4720 struct perf_session *session; 4721 struct evsel *evsel; 4722 int err = -1; 4723 4724 perf_tool__init(&trace->tool, /*ordered_events=*/true); 4725 trace->tool.sample = trace__process_sample; 4726 trace->tool.mmap = perf_event__process_mmap; 4727 trace->tool.mmap2 = perf_event__process_mmap2; 4728 trace->tool.comm = perf_event__process_comm; 4729 trace->tool.exit = perf_event__process_exit; 4730 trace->tool.fork = perf_event__process_fork; 4731 trace->tool.attr = perf_event__process_attr; 4732 trace->tool.tracing_data = perf_event__process_tracing_data; 4733 trace->tool.build_id = perf_event__process_build_id; 4734 trace->tool.namespaces = perf_event__process_namespaces; 4735 4736 trace->tool.ordered_events = true; 4737 trace->tool.ordering_requires_timestamps = true; 4738 4739 /* add tid to output */ 4740 trace->multiple_threads = true; 4741 4742 session = perf_session__new(&data, &trace->tool); 4743 if (IS_ERR(session)) 4744 return PTR_ERR(session); 4745 4746 if (trace->opts.target.pid) 4747 symbol_conf.pid_list_str = strdup(trace->opts.target.pid); 4748 4749 if (trace->opts.target.tid) 4750 symbol_conf.tid_list_str = strdup(trace->opts.target.tid); 4751 4752 if (symbol__init(&session->header.env) < 0) 4753 goto out; 4754 4755 trace->host = &session->machines.host; 4756 4757 err = perf_session__set_tracepoints_handlers(session, handlers); 4758 if (err) 4759 goto out; 4760 4761 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter"); 4762 trace->syscalls.events.sys_enter = evsel; 4763 /* older kernels have syscalls tp versus raw_syscalls */ 4764 if (evsel == NULL) 4765 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter"); 4766 4767 if (evsel && 4768 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 || 4769 perf_evsel__init_sc_tp_ptr_field(evsel, args))) { 4770 pr_err("Error during initialize raw_syscalls:sys_enter event\n"); 4771 goto out; 4772 } 4773 4774 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit"); 4775 trace->syscalls.events.sys_exit = evsel; 4776 if (evsel == NULL) 4777 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit"); 4778 if (evsel && 4779 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 || 4780 perf_evsel__init_sc_tp_uint_field(evsel, ret))) { 4781 pr_err("Error during initialize raw_syscalls:sys_exit event\n"); 4782 goto out; 4783 } 4784 4785 evlist__for_each_entry(session->evlist, evsel) { 4786 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && 4787 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ || 4788 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 4789 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS)) 4790 evsel->handler = trace__pgfault; 4791 } 4792 4793 if (trace->summary_mode == SUMMARY__BY_TOTAL) { 4794 trace->syscall_stats = alloc_syscall_stats(); 4795 if (trace->syscall_stats == NULL) 4796 goto out; 4797 } 4798 4799 setup_pager(); 4800 4801 err = perf_session__process_events(session); 4802 if (err) 4803 pr_err("Failed to process events, error %d", err); 4804 4805 else if (trace->summary) 4806 trace__fprintf_thread_summary(trace, trace->output); 4807 4808 out: 4809 delete_syscall_stats(trace->syscall_stats); 4810 perf_session__delete(session); 4811 4812 return err; 4813 } 4814 4815 static size_t trace__fprintf_summary_header(FILE *fp) 4816 { 4817 size_t printed; 4818 4819 printed = fprintf(fp, "\n Summary of events:\n\n"); 4820 4821 return printed; 4822 } 4823 4824 struct syscall_entry { 4825 struct syscall_stats *stats; 4826 double msecs; 4827 int syscall; 4828 }; 4829 4830 static int entry_cmp(const void *e1, const void *e2) 4831 { 4832 const struct syscall_entry *entry1 = e1; 4833 const struct syscall_entry *entry2 = e2; 4834 4835 return entry1->msecs > entry2->msecs ? -1 : 1; 4836 } 4837 4838 static struct syscall_entry *syscall__sort_stats(struct hashmap *syscall_stats) 4839 { 4840 struct syscall_entry *entry; 4841 struct hashmap_entry *pos; 4842 unsigned bkt, i, nr; 4843 4844 nr = syscall_stats->sz; 4845 entry = malloc(nr * sizeof(*entry)); 4846 if (entry == NULL) 4847 return NULL; 4848 4849 i = 0; 4850 hashmap__for_each_entry(syscall_stats, pos, bkt) { 4851 struct syscall_stats *ss = pos->pvalue; 4852 struct stats *st = &ss->stats; 4853 4854 entry[i].stats = ss; 4855 entry[i].msecs = (u64)st->n * (avg_stats(st) / NSEC_PER_MSEC); 4856 entry[i].syscall = pos->key; 4857 i++; 4858 } 4859 assert(i == nr); 4860 4861 qsort(entry, nr, sizeof(*entry), entry_cmp); 4862 return entry; 4863 } 4864 4865 static size_t syscall__dump_stats(struct trace *trace, int e_machine, FILE *fp, 4866 struct hashmap *syscall_stats) 4867 { 4868 size_t printed = 0; 4869 struct syscall *sc; 4870 struct syscall_entry *entries; 4871 4872 entries = syscall__sort_stats(syscall_stats); 4873 if (entries == NULL) 4874 return 0; 4875 4876 printed += fprintf(fp, "\n"); 4877 4878 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n"); 4879 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n"); 4880 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n"); 4881 4882 for (size_t i = 0; i < syscall_stats->sz; i++) { 4883 struct syscall_entry *entry = &entries[i]; 4884 struct syscall_stats *stats = entry->stats; 4885 4886 if (stats) { 4887 double min = (double)(stats->stats.min) / NSEC_PER_MSEC; 4888 double max = (double)(stats->stats.max) / NSEC_PER_MSEC; 4889 double avg = avg_stats(&stats->stats); 4890 double pct; 4891 u64 n = (u64)stats->stats.n; 4892 4893 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0; 4894 avg /= NSEC_PER_MSEC; 4895 4896 sc = trace__syscall_info(trace, /*evsel=*/NULL, e_machine, entry->syscall); 4897 if (!sc) 4898 continue; 4899 4900 printed += fprintf(fp, " %-15s", sc->name); 4901 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f", 4902 n, stats->nr_failures, entry->msecs, min, avg); 4903 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct); 4904 4905 if (trace->errno_summary && stats->nr_failures) { 4906 int e; 4907 4908 for (e = 0; e < stats->max_errno; ++e) { 4909 if (stats->errnos[e] != 0) 4910 fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]); 4911 } 4912 } 4913 } 4914 } 4915 4916 free(entries); 4917 printed += fprintf(fp, "\n\n"); 4918 4919 return printed; 4920 } 4921 4922 static size_t thread__dump_stats(struct thread_trace *ttrace, 4923 struct trace *trace, int e_machine, FILE *fp) 4924 { 4925 return syscall__dump_stats(trace, e_machine, fp, ttrace->syscall_stats); 4926 } 4927 4928 static size_t system__dump_stats(struct trace *trace, int e_machine, FILE *fp) 4929 { 4930 return syscall__dump_stats(trace, e_machine, fp, trace->syscall_stats); 4931 } 4932 4933 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) 4934 { 4935 size_t printed = 0; 4936 struct thread_trace *ttrace = thread__priv(thread); 4937 int e_machine = thread__e_machine(thread, trace->host); 4938 double ratio; 4939 4940 if (ttrace == NULL) 4941 return 0; 4942 4943 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; 4944 4945 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread)); 4946 printed += fprintf(fp, "%lu events, ", ttrace->nr_events); 4947 printed += fprintf(fp, "%.1f%%", ratio); 4948 if (ttrace->pfmaj) 4949 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj); 4950 if (ttrace->pfmin) 4951 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin); 4952 if (trace->sched) 4953 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); 4954 else if (fputc('\n', fp) != EOF) 4955 ++printed; 4956 4957 printed += thread__dump_stats(ttrace, trace, e_machine, fp); 4958 4959 return printed; 4960 } 4961 4962 static unsigned long thread__nr_events(struct thread_trace *ttrace) 4963 { 4964 return ttrace ? ttrace->nr_events : 0; 4965 } 4966 4967 static int trace_nr_events_cmp(void *priv __maybe_unused, 4968 const struct list_head *la, 4969 const struct list_head *lb) 4970 { 4971 struct thread_list *a = list_entry(la, struct thread_list, list); 4972 struct thread_list *b = list_entry(lb, struct thread_list, list); 4973 unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread)); 4974 unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread)); 4975 4976 if (a_nr_events != b_nr_events) 4977 return a_nr_events < b_nr_events ? -1 : 1; 4978 4979 /* Identical number of threads, place smaller tids first. */ 4980 return thread__tid(a->thread) < thread__tid(b->thread) 4981 ? -1 4982 : (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0); 4983 } 4984 4985 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) 4986 { 4987 size_t printed = trace__fprintf_summary_header(fp); 4988 LIST_HEAD(threads); 4989 4990 if (machine__thread_list(trace->host, &threads) == 0) { 4991 struct thread_list *pos; 4992 4993 list_sort(NULL, &threads, trace_nr_events_cmp); 4994 4995 list_for_each_entry(pos, &threads, list) 4996 printed += trace__fprintf_thread(fp, pos->thread, trace); 4997 } 4998 thread_list__delete(&threads); 4999 return printed; 5000 } 5001 5002 static size_t trace__fprintf_total_summary(struct trace *trace, FILE *fp) 5003 { 5004 size_t printed = trace__fprintf_summary_header(fp); 5005 5006 printed += fprintf(fp, " total, "); 5007 printed += fprintf(fp, "%lu events", trace->nr_events); 5008 5009 if (trace->pfmaj) 5010 printed += fprintf(fp, ", %lu majfaults", trace->pfmaj); 5011 if (trace->pfmin) 5012 printed += fprintf(fp, ", %lu minfaults", trace->pfmin); 5013 if (trace->sched) 5014 printed += fprintf(fp, ", %.3f msec\n", trace->runtime_ms); 5015 else if (fputc('\n', fp) != EOF) 5016 ++printed; 5017 5018 /* TODO: get all system e_machines. */ 5019 printed += system__dump_stats(trace, EM_HOST, fp); 5020 5021 return printed; 5022 } 5023 5024 static int trace__set_duration(const struct option *opt, const char *str, 5025 int unset __maybe_unused) 5026 { 5027 struct trace *trace = opt->value; 5028 5029 trace->duration_filter = atof(str); 5030 return 0; 5031 } 5032 5033 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str, 5034 int unset __maybe_unused) 5035 { 5036 int ret = -1; 5037 size_t i; 5038 struct trace *trace = opt->value; 5039 /* 5040 * FIXME: introduce a intarray class, plain parse csv and create a 5041 * { int nr, int entries[] } struct... 5042 */ 5043 struct intlist *list = intlist__new(str); 5044 5045 if (list == NULL) 5046 return -1; 5047 5048 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1; 5049 trace->filter_pids.entries = calloc(i, sizeof(pid_t)); 5050 5051 if (trace->filter_pids.entries == NULL) 5052 goto out; 5053 5054 trace->filter_pids.entries[0] = getpid(); 5055 5056 for (i = 1; i < trace->filter_pids.nr; ++i) 5057 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; 5058 5059 intlist__delete(list); 5060 ret = 0; 5061 out: 5062 return ret; 5063 } 5064 5065 static int trace__open_output(struct trace *trace, const char *filename) 5066 { 5067 struct stat st; 5068 5069 if (!stat(filename, &st) && st.st_size) { 5070 char oldname[PATH_MAX]; 5071 5072 scnprintf(oldname, sizeof(oldname), "%s.old", filename); 5073 unlink(oldname); 5074 rename(filename, oldname); 5075 } 5076 5077 trace->output = fopen(filename, "w"); 5078 5079 return trace->output == NULL ? -errno : 0; 5080 } 5081 5082 static int parse_pagefaults(const struct option *opt, const char *str, 5083 int unset __maybe_unused) 5084 { 5085 int *trace_pgfaults = opt->value; 5086 5087 if (strcmp(str, "all") == 0) 5088 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN; 5089 else if (strcmp(str, "maj") == 0) 5090 *trace_pgfaults |= TRACE_PFMAJ; 5091 else if (strcmp(str, "min") == 0) 5092 *trace_pgfaults |= TRACE_PFMIN; 5093 else 5094 return -1; 5095 5096 return 0; 5097 } 5098 5099 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler) 5100 { 5101 struct evsel *evsel; 5102 5103 evlist__for_each_entry(evlist, evsel) { 5104 if (evsel->handler == NULL) 5105 evsel->handler = handler; 5106 } 5107 } 5108 5109 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name) 5110 { 5111 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 5112 5113 if (fmt) { 5114 const struct syscall_fmt *scfmt = syscall_fmt__find(name); 5115 5116 if (scfmt) { 5117 const struct tep_event *tp_format = evsel__tp_format(evsel); 5118 5119 if (tp_format) { 5120 int skip = 0; 5121 5122 if (strcmp(tp_format->format.fields->name, "__syscall_nr") == 0 || 5123 strcmp(tp_format->format.fields->name, "nr") == 0) 5124 ++skip; 5125 5126 memcpy(fmt + skip, scfmt->arg, 5127 (tp_format->format.nr_fields - skip) * sizeof(*fmt)); 5128 } 5129 } 5130 } 5131 } 5132 5133 static int evlist__set_syscall_tp_fields(struct evlist *evlist, bool *use_btf) 5134 { 5135 struct evsel *evsel; 5136 5137 evlist__for_each_entry(evlist, evsel) { 5138 const struct tep_event *tp_format; 5139 5140 if (evsel->priv) 5141 continue; 5142 5143 tp_format = evsel__tp_format(evsel); 5144 if (!tp_format) 5145 continue; 5146 5147 if (strcmp(tp_format->system, "syscalls")) { 5148 evsel__init_tp_arg_scnprintf(evsel, use_btf); 5149 continue; 5150 } 5151 5152 if (evsel__init_syscall_tp(evsel)) 5153 return -1; 5154 5155 if (!strncmp(tp_format->name, "sys_enter_", 10)) { 5156 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 5157 5158 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64))) 5159 return -1; 5160 5161 evsel__set_syscall_arg_fmt(evsel, 5162 tp_format->name + sizeof("sys_enter_") - 1); 5163 } else if (!strncmp(tp_format->name, "sys_exit_", 9)) { 5164 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 5165 5166 if (__tp_field__init_uint(&sc->ret, sizeof(u64), 5167 sc->id.offset + sizeof(u64), 5168 evsel->needs_swap)) 5169 return -1; 5170 5171 evsel__set_syscall_arg_fmt(evsel, 5172 tp_format->name + sizeof("sys_exit_") - 1); 5173 } 5174 } 5175 5176 return 0; 5177 } 5178 5179 /* 5180 * XXX: Hackish, just splitting the combined -e+--event (syscalls 5181 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use 5182 * existing facilities unchanged (trace->ev_qualifier + parse_options()). 5183 * 5184 * It'd be better to introduce a parse_options() variant that would return a 5185 * list with the terms it didn't match to an event... 5186 */ 5187 static int trace__parse_events_option(const struct option *opt, const char *str, 5188 int unset __maybe_unused) 5189 { 5190 struct trace *trace = (struct trace *)opt->value; 5191 const char *s = str; 5192 char *sep = NULL, *lists[2] = { NULL, NULL, }; 5193 int len = strlen(str) + 1, err = -1, list, idx; 5194 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR); 5195 char group_name[PATH_MAX]; 5196 const struct syscall_fmt *fmt; 5197 5198 if (strace_groups_dir == NULL) 5199 return -1; 5200 5201 if (*s == '!') { 5202 ++s; 5203 trace->not_ev_qualifier = true; 5204 } 5205 5206 while (1) { 5207 if ((sep = strchr(s, ',')) != NULL) 5208 *sep = '\0'; 5209 5210 list = 0; 5211 /* TODO: support for more than just perf binary machine type syscalls. */ 5212 if (syscalltbl__id(EM_HOST, s) >= 0 || 5213 syscalltbl__strglobmatch_first(EM_HOST, s, &idx) >= 0) { 5214 list = 1; 5215 goto do_concat; 5216 } 5217 5218 fmt = syscall_fmt__find_by_alias(s); 5219 if (fmt != NULL) { 5220 list = 1; 5221 s = fmt->name; 5222 } else { 5223 path__join(group_name, sizeof(group_name), strace_groups_dir, s); 5224 if (access(group_name, R_OK) == 0) 5225 list = 1; 5226 } 5227 do_concat: 5228 if (lists[list]) { 5229 sprintf(lists[list] + strlen(lists[list]), ",%s", s); 5230 } else { 5231 lists[list] = malloc(len); 5232 if (lists[list] == NULL) 5233 goto out; 5234 strcpy(lists[list], s); 5235 } 5236 5237 if (!sep) 5238 break; 5239 5240 *sep = ','; 5241 s = sep + 1; 5242 } 5243 5244 if (lists[1] != NULL) { 5245 struct strlist_config slist_config = { 5246 .dirname = strace_groups_dir, 5247 }; 5248 5249 trace->ev_qualifier = strlist__new(lists[1], &slist_config); 5250 if (trace->ev_qualifier == NULL) { 5251 fputs("Not enough memory to parse event qualifier", trace->output); 5252 goto out; 5253 } 5254 5255 if (trace__validate_ev_qualifier(trace)) 5256 goto out; 5257 trace->trace_syscalls = true; 5258 } 5259 5260 err = 0; 5261 5262 if (lists[0]) { 5263 struct parse_events_option_args parse_events_option_args = { 5264 .evlistp = &trace->evlist, 5265 }; 5266 struct option o = { 5267 .value = &parse_events_option_args, 5268 }; 5269 err = parse_events_option(&o, lists[0], 0); 5270 } 5271 out: 5272 free(strace_groups_dir); 5273 free(lists[0]); 5274 free(lists[1]); 5275 if (sep) 5276 *sep = ','; 5277 5278 return err; 5279 } 5280 5281 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset) 5282 { 5283 struct trace *trace = opt->value; 5284 5285 if (!list_empty(&trace->evlist->core.entries)) { 5286 struct option o = { 5287 .value = &trace->evlist, 5288 }; 5289 return parse_cgroups(&o, str, unset); 5290 } 5291 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); 5292 5293 return 0; 5294 } 5295 5296 static int trace__parse_summary_mode(const struct option *opt, const char *str, 5297 int unset __maybe_unused) 5298 { 5299 struct trace *trace = opt->value; 5300 5301 if (!strcmp(str, "thread")) { 5302 trace->summary_mode = SUMMARY__BY_THREAD; 5303 } else if (!strcmp(str, "total")) { 5304 trace->summary_mode = SUMMARY__BY_TOTAL; 5305 } else if (!strcmp(str, "cgroup")) { 5306 trace->summary_mode = SUMMARY__BY_CGROUP; 5307 } else { 5308 pr_err("Unknown summary mode: %s\n", str); 5309 return -1; 5310 } 5311 5312 return 0; 5313 } 5314 5315 static int trace__config(const char *var, const char *value, void *arg) 5316 { 5317 struct trace *trace = arg; 5318 int err = 0; 5319 5320 if (!strcmp(var, "trace.add_events")) { 5321 trace->perfconfig_events = strdup(value); 5322 if (trace->perfconfig_events == NULL) { 5323 pr_err("Not enough memory for %s\n", "trace.add_events"); 5324 return -1; 5325 } 5326 } else if (!strcmp(var, "trace.show_timestamp")) { 5327 trace->show_tstamp = perf_config_bool(var, value); 5328 } else if (!strcmp(var, "trace.show_duration")) { 5329 trace->show_duration = perf_config_bool(var, value); 5330 } else if (!strcmp(var, "trace.show_arg_names")) { 5331 trace->show_arg_names = perf_config_bool(var, value); 5332 if (!trace->show_arg_names) 5333 trace->show_zeros = true; 5334 } else if (!strcmp(var, "trace.show_zeros")) { 5335 bool new_show_zeros = perf_config_bool(var, value); 5336 if (!trace->show_arg_names && !new_show_zeros) { 5337 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n"); 5338 goto out; 5339 } 5340 trace->show_zeros = new_show_zeros; 5341 } else if (!strcmp(var, "trace.show_prefix")) { 5342 trace->show_string_prefix = perf_config_bool(var, value); 5343 } else if (!strcmp(var, "trace.no_inherit")) { 5344 trace->opts.no_inherit = perf_config_bool(var, value); 5345 } else if (!strcmp(var, "trace.args_alignment")) { 5346 int args_alignment = 0; 5347 if (perf_config_int(&args_alignment, var, value) == 0) 5348 trace->args_alignment = args_alignment; 5349 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) { 5350 if (strcasecmp(value, "libtraceevent") == 0) 5351 trace->libtraceevent_print = true; 5352 else if (strcasecmp(value, "libbeauty") == 0) 5353 trace->libtraceevent_print = false; 5354 } 5355 out: 5356 return err; 5357 } 5358 5359 static void trace__exit(struct trace *trace) 5360 { 5361 strlist__delete(trace->ev_qualifier); 5362 zfree(&trace->ev_qualifier_ids.entries); 5363 if (trace->syscalls.table) { 5364 for (size_t i = 0; i < trace->syscalls.table_size; i++) 5365 syscall__delete(trace->syscalls.table[i]); 5366 zfree(&trace->syscalls.table); 5367 } 5368 zfree(&trace->perfconfig_events); 5369 evlist__delete(trace->evlist); 5370 trace->evlist = NULL; 5371 #ifdef HAVE_LIBBPF_SUPPORT 5372 btf__free(trace->btf); 5373 trace->btf = NULL; 5374 #endif 5375 } 5376 5377 #ifdef HAVE_BPF_SKEL 5378 static int bpf__setup_bpf_output(struct evlist *evlist) 5379 { 5380 int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/"); 5381 5382 if (err) 5383 pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n"); 5384 5385 return err; 5386 } 5387 #endif 5388 5389 int cmd_trace(int argc, const char **argv) 5390 { 5391 const char *trace_usage[] = { 5392 "perf trace [<options>] [<command>]", 5393 "perf trace [<options>] -- <command> [<options>]", 5394 "perf trace record [<options>] [<command>]", 5395 "perf trace record [<options>] -- <command> [<options>]", 5396 NULL 5397 }; 5398 struct trace trace = { 5399 .opts = { 5400 .target = { 5401 .uid = UINT_MAX, 5402 .uses_mmap = true, 5403 }, 5404 .user_freq = UINT_MAX, 5405 .user_interval = ULLONG_MAX, 5406 .no_buffering = true, 5407 .mmap_pages = UINT_MAX, 5408 }, 5409 .output = stderr, 5410 .show_comm = true, 5411 .show_tstamp = true, 5412 .show_duration = true, 5413 .show_arg_names = true, 5414 .args_alignment = 70, 5415 .trace_syscalls = false, 5416 .kernel_syscallchains = false, 5417 .max_stack = UINT_MAX, 5418 .max_events = ULONG_MAX, 5419 }; 5420 const char *output_name = NULL; 5421 const struct option trace_options[] = { 5422 OPT_CALLBACK('e', "event", &trace, "event", 5423 "event/syscall selector. use 'perf list' to list available events", 5424 trace__parse_events_option), 5425 OPT_CALLBACK(0, "filter", &trace.evlist, "filter", 5426 "event filter", parse_filter), 5427 OPT_BOOLEAN(0, "comm", &trace.show_comm, 5428 "show the thread COMM next to its id"), 5429 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"), 5430 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace", 5431 trace__parse_events_option), 5432 OPT_STRING('o', "output", &output_name, "file", "output file name"), 5433 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"), 5434 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", 5435 "trace events on existing process id"), 5436 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", 5437 "trace events on existing thread id"), 5438 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids", 5439 "pids to filter (by the kernel)", trace__set_filter_pids_from_option), 5440 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, 5441 "system-wide collection from all CPUs"), 5442 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", 5443 "list of cpus to monitor"), 5444 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, 5445 "child tasks do not inherit counters"), 5446 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", 5447 "number of mmap data pages", evlist__parse_mmap_pages), 5448 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user", 5449 "user to profile"), 5450 OPT_CALLBACK(0, "duration", &trace, "float", 5451 "show only events with duration > N.M ms", 5452 trace__set_duration), 5453 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), 5454 OPT_INCR('v', "verbose", &verbose, "be more verbose"), 5455 OPT_BOOLEAN('T', "time", &trace.full_time, 5456 "Show full timestamp, not time relative to first start"), 5457 OPT_BOOLEAN(0, "failure", &trace.failure_only, 5458 "Show only syscalls that failed"), 5459 OPT_BOOLEAN('s', "summary", &trace.summary_only, 5460 "Show only syscall summary with statistics"), 5461 OPT_BOOLEAN('S', "with-summary", &trace.summary, 5462 "Show all syscalls and summary with statistics"), 5463 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary, 5464 "Show errno stats per syscall, use with -s or -S"), 5465 OPT_CALLBACK(0, "summary-mode", &trace, "mode", 5466 "How to show summary: select thread (default), total or cgroup", 5467 trace__parse_summary_mode), 5468 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min", 5469 "Trace pagefaults", parse_pagefaults, "maj"), 5470 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"), 5471 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"), 5472 OPT_CALLBACK(0, "call-graph", &trace.opts, 5473 "record_mode[,record_size]", record_callchain_help, 5474 &record_parse_callchain_opt), 5475 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print, 5476 "Use libtraceevent to print the tracepoint arguments."), 5477 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, 5478 "Show the kernel callchains on the syscall exit path"), 5479 OPT_ULONG(0, "max-events", &trace.max_events, 5480 "Set the maximum number of events to print, exit after that is reached. "), 5481 OPT_UINTEGER(0, "min-stack", &trace.min_stack, 5482 "Set the minimum stack depth when parsing the callchain, " 5483 "anything below the specified depth will be ignored."), 5484 OPT_UINTEGER(0, "max-stack", &trace.max_stack, 5485 "Set the maximum stack depth when parsing the callchain, " 5486 "anything beyond the specified depth will be ignored. " 5487 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 5488 OPT_BOOLEAN(0, "sort-events", &trace.sort_events, 5489 "Sort batch of events before processing, use if getting out of order events"), 5490 OPT_BOOLEAN(0, "print-sample", &trace.print_sample, 5491 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"), 5492 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, 5493 "per thread proc mmap processing timeout in ms"), 5494 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", 5495 trace__parse_cgroups), 5496 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay, 5497 "ms to wait before starting measurement after program " 5498 "start"), 5499 OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer" 5500 "to customized ones"), 5501 OPT_BOOLEAN(0, "bpf-summary", &trace.summary_bpf, "Summary syscall stats in BPF"), 5502 OPTS_EVSWITCH(&trace.evswitch), 5503 OPT_END() 5504 }; 5505 bool __maybe_unused max_stack_user_set = true; 5506 bool mmap_pages_user_set = true; 5507 struct evsel *evsel; 5508 const char * const trace_subcommands[] = { "record", NULL }; 5509 int err = -1; 5510 char bf[BUFSIZ]; 5511 struct sigaction sigchld_act; 5512 5513 signal(SIGSEGV, sighandler_dump_stack); 5514 signal(SIGFPE, sighandler_dump_stack); 5515 signal(SIGINT, sighandler_interrupt); 5516 5517 memset(&sigchld_act, 0, sizeof(sigchld_act)); 5518 sigchld_act.sa_flags = SA_SIGINFO; 5519 sigchld_act.sa_sigaction = sighandler_chld; 5520 sigaction(SIGCHLD, &sigchld_act, NULL); 5521 5522 trace.evlist = evlist__new(); 5523 5524 if (trace.evlist == NULL) { 5525 pr_err("Not enough memory to run!\n"); 5526 err = -ENOMEM; 5527 goto out; 5528 } 5529 5530 /* 5531 * Parsing .perfconfig may entail creating a BPF event, that may need 5532 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting 5533 * is too small. This affects just this process, not touching the 5534 * global setting. If it fails we'll get something in 'perf trace -v' 5535 * to help diagnose the problem. 5536 */ 5537 rlimit__bump_memlock(); 5538 5539 err = perf_config(trace__config, &trace); 5540 if (err) 5541 goto out; 5542 5543 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands, 5544 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION); 5545 5546 /* 5547 * Here we already passed thru trace__parse_events_option() and it has 5548 * already figured out if -e syscall_name, if not but if --event 5549 * foo:bar was used, the user is interested _just_ in those, say, 5550 * tracepoint events, not in the strace-like syscall-name-based mode. 5551 * 5552 * This is important because we need to check if strace-like mode is 5553 * needed to decided if we should filter out the eBPF 5554 * __augmented_syscalls__ code, if it is in the mix, say, via 5555 * .perfconfig trace.add_events, and filter those out. 5556 */ 5557 if (!trace.trace_syscalls && !trace.trace_pgfaults && 5558 trace.evlist->core.nr_entries == 0 /* Was --events used? */) { 5559 trace.trace_syscalls = true; 5560 } 5561 /* 5562 * Now that we have --verbose figured out, lets see if we need to parse 5563 * events from .perfconfig, so that if those events fail parsing, say some 5564 * BPF program fails, then we'll be able to use --verbose to see what went 5565 * wrong in more detail. 5566 */ 5567 if (trace.perfconfig_events != NULL) { 5568 struct parse_events_error parse_err; 5569 5570 parse_events_error__init(&parse_err); 5571 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err); 5572 if (err) 5573 parse_events_error__print(&parse_err, trace.perfconfig_events); 5574 parse_events_error__exit(&parse_err); 5575 if (err) 5576 goto out; 5577 } 5578 5579 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) { 5580 usage_with_options_msg(trace_usage, trace_options, 5581 "cgroup monitoring only available in system-wide mode"); 5582 } 5583 5584 #ifdef HAVE_BPF_SKEL 5585 if (!trace.trace_syscalls) 5586 goto skip_augmentation; 5587 5588 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) { 5589 pr_debug("Syscall augmentation fails with record, disabling augmentation"); 5590 goto skip_augmentation; 5591 } 5592 5593 if (trace.summary_bpf) { 5594 if (!trace.opts.target.system_wide) { 5595 /* TODO: Add filters in the BPF to support other targets. */ 5596 pr_err("Error: --bpf-summary only works for system-wide mode.\n"); 5597 goto out; 5598 } 5599 if (trace.summary_only) 5600 goto skip_augmentation; 5601 } 5602 5603 trace.skel = augmented_raw_syscalls_bpf__open(); 5604 if (!trace.skel) { 5605 pr_debug("Failed to open augmented syscalls BPF skeleton"); 5606 } else { 5607 /* 5608 * Disable attaching the BPF programs except for sys_enter and 5609 * sys_exit that tail call into this as necessary. 5610 */ 5611 struct bpf_program *prog; 5612 5613 bpf_object__for_each_program(prog, trace.skel->obj) { 5614 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit) 5615 bpf_program__set_autoattach(prog, /*autoattach=*/false); 5616 } 5617 5618 err = augmented_raw_syscalls_bpf__load(trace.skel); 5619 5620 if (err < 0) { 5621 libbpf_strerror(err, bf, sizeof(bf)); 5622 pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf); 5623 } else { 5624 augmented_raw_syscalls_bpf__attach(trace.skel); 5625 trace__add_syscall_newtp(&trace); 5626 } 5627 } 5628 5629 err = bpf__setup_bpf_output(trace.evlist); 5630 if (err) { 5631 libbpf_strerror(err, bf, sizeof(bf)); 5632 pr_err("ERROR: Setup BPF output event failed: %s\n", bf); 5633 goto out; 5634 } 5635 trace.syscalls.events.bpf_output = evlist__last(trace.evlist); 5636 assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__")); 5637 skip_augmentation: 5638 #endif 5639 err = -1; 5640 5641 if (trace.trace_pgfaults) { 5642 trace.opts.sample_address = true; 5643 trace.opts.sample_time = true; 5644 } 5645 5646 if (trace.opts.mmap_pages == UINT_MAX) 5647 mmap_pages_user_set = false; 5648 5649 if (trace.max_stack == UINT_MAX) { 5650 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack(); 5651 max_stack_user_set = false; 5652 } 5653 5654 #ifdef HAVE_DWARF_UNWIND_SUPPORT 5655 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) { 5656 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); 5657 } 5658 #endif 5659 5660 if (callchain_param.enabled) { 5661 if (!mmap_pages_user_set && geteuid() == 0) 5662 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4; 5663 5664 symbol_conf.use_callchain = true; 5665 } 5666 5667 if (trace.evlist->core.nr_entries > 0) { 5668 bool use_btf = false; 5669 5670 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler); 5671 if (evlist__set_syscall_tp_fields(trace.evlist, &use_btf)) { 5672 perror("failed to set syscalls:* tracepoint fields"); 5673 goto out; 5674 } 5675 5676 if (use_btf) 5677 trace__load_vmlinux_btf(&trace); 5678 } 5679 5680 if (trace.sort_events) { 5681 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace); 5682 ordered_events__set_copy_on_queue(&trace.oe.data, true); 5683 } 5684 5685 /* 5686 * If we are augmenting syscalls, then combine what we put in the 5687 * __augmented_syscalls__ BPF map with what is in the 5688 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF, 5689 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit. 5690 * 5691 * We'll switch to look at two BPF maps, one for sys_enter and the 5692 * other for sys_exit when we start augmenting the sys_exit paths with 5693 * buffers that are being copied from kernel to userspace, think 'read' 5694 * syscall. 5695 */ 5696 if (trace.syscalls.events.bpf_output) { 5697 evlist__for_each_entry(trace.evlist, evsel) { 5698 bool raw_syscalls_sys_exit = evsel__name_is(evsel, "raw_syscalls:sys_exit"); 5699 5700 if (raw_syscalls_sys_exit) { 5701 trace.raw_augmented_syscalls = true; 5702 goto init_augmented_syscall_tp; 5703 } 5704 5705 if (trace.syscalls.events.bpf_output->priv == NULL && 5706 strstr(evsel__name(evsel), "syscalls:sys_enter")) { 5707 struct evsel *augmented = trace.syscalls.events.bpf_output; 5708 if (evsel__init_augmented_syscall_tp(augmented, evsel) || 5709 evsel__init_augmented_syscall_tp_args(augmented)) 5710 goto out; 5711 /* 5712 * Augmented is __augmented_syscalls__ BPF_OUTPUT event 5713 * Above we made sure we can get from the payload the tp fields 5714 * that we get from syscalls:sys_enter tracefs format file. 5715 */ 5716 augmented->handler = trace__sys_enter; 5717 /* 5718 * Now we do the same for the *syscalls:sys_enter event so that 5719 * if we handle it directly, i.e. if the BPF prog returns 0 so 5720 * as not to filter it, then we'll handle it just like we would 5721 * for the BPF_OUTPUT one: 5722 */ 5723 if (evsel__init_augmented_syscall_tp(evsel, evsel) || 5724 evsel__init_augmented_syscall_tp_args(evsel)) 5725 goto out; 5726 evsel->handler = trace__sys_enter; 5727 } 5728 5729 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) { 5730 struct syscall_tp *sc; 5731 init_augmented_syscall_tp: 5732 if (evsel__init_augmented_syscall_tp(evsel, evsel)) 5733 goto out; 5734 sc = __evsel__syscall_tp(evsel); 5735 /* 5736 * For now with BPF raw_augmented we hook into 5737 * raw_syscalls:sys_enter and there we get all 5738 * 6 syscall args plus the tracepoint common 5739 * fields and the syscall_nr (another long). 5740 * So we check if that is the case and if so 5741 * don't look after the sc->args_size but 5742 * always after the full raw_syscalls:sys_enter 5743 * payload, which is fixed. 5744 * 5745 * We'll revisit this later to pass 5746 * s->args_size to the BPF augmenter (now 5747 * tools/perf/examples/bpf/augmented_raw_syscalls.c, 5748 * so that it copies only what we need for each 5749 * syscall, like what happens when we use 5750 * syscalls:sys_enter_NAME, so that we reduce 5751 * the kernel/userspace traffic to just what is 5752 * needed for each syscall. 5753 */ 5754 if (trace.raw_augmented_syscalls) 5755 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; 5756 evsel__init_augmented_syscall_tp_ret(evsel); 5757 evsel->handler = trace__sys_exit; 5758 } 5759 } 5760 } 5761 5762 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) { 5763 err = trace__record(&trace, argc-1, &argv[1]); 5764 goto out; 5765 } 5766 5767 /* Using just --errno-summary will trigger --summary */ 5768 if (trace.errno_summary && !trace.summary && !trace.summary_only) 5769 trace.summary_only = true; 5770 5771 /* summary_only implies summary option, but don't overwrite summary if set */ 5772 if (trace.summary_only) 5773 trace.summary = trace.summary_only; 5774 5775 /* Keep exited threads, otherwise information might be lost for summary */ 5776 if (trace.summary) { 5777 symbol_conf.keep_exited_threads = true; 5778 if (trace.summary_mode == SUMMARY__NONE) 5779 trace.summary_mode = SUMMARY__BY_THREAD; 5780 5781 if (!trace.summary_bpf && trace.summary_mode == SUMMARY__BY_CGROUP) { 5782 pr_err("Error: --summary-mode=cgroup only works with --bpf-summary\n"); 5783 err = -EINVAL; 5784 goto out; 5785 } 5786 } 5787 5788 if (output_name != NULL) { 5789 err = trace__open_output(&trace, output_name); 5790 if (err < 0) { 5791 perror("failed to create output file"); 5792 goto out; 5793 } 5794 } 5795 5796 err = evswitch__init(&trace.evswitch, trace.evlist, stderr); 5797 if (err) 5798 goto out_close; 5799 5800 err = target__validate(&trace.opts.target); 5801 if (err) { 5802 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5803 fprintf(trace.output, "%s", bf); 5804 goto out_close; 5805 } 5806 5807 err = target__parse_uid(&trace.opts.target); 5808 if (err) { 5809 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5810 fprintf(trace.output, "%s", bf); 5811 goto out_close; 5812 } 5813 5814 if (!argc && target__none(&trace.opts.target)) 5815 trace.opts.target.system_wide = true; 5816 5817 if (input_name) 5818 err = trace__replay(&trace); 5819 else 5820 err = trace__run(&trace, argc, argv); 5821 5822 out_close: 5823 if (output_name != NULL) 5824 fclose(trace.output); 5825 out: 5826 trace__exit(&trace); 5827 #ifdef HAVE_BPF_SKEL 5828 augmented_raw_syscalls_bpf__destroy(trace.skel); 5829 #endif 5830 return err; 5831 } 5832