1 /* 2 * builtin-trace.c 3 * 4 * Builtin 'trace' command: 5 * 6 * Display a continuously updated trace of any workload, CPU, specific PID, 7 * system wide, etc. Default format is loosely strace like, but any other 8 * event may be specified using --event. 9 * 10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 11 * 12 * Initially based on the 'trace' prototype by Thomas Gleixner: 13 * 14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'") 15 */ 16 17 #include "util/record.h" 18 #include <api/fs/tracing_path.h> 19 #ifdef HAVE_LIBBPF_SUPPORT 20 #include <bpf/bpf.h> 21 #include <bpf/libbpf.h> 22 #include <bpf/btf.h> 23 #ifdef HAVE_BPF_SKEL 24 #include "bpf_skel/augmented_raw_syscalls.skel.h" 25 #endif 26 #endif 27 #include "util/bpf_map.h" 28 #include "util/rlimit.h" 29 #include "builtin.h" 30 #include "util/cgroup.h" 31 #include "util/color.h" 32 #include "util/config.h" 33 #include "util/debug.h" 34 #include "util/dso.h" 35 #include "util/env.h" 36 #include "util/event.h" 37 #include "util/evsel.h" 38 #include "util/evsel_fprintf.h" 39 #include "util/synthetic-events.h" 40 #include "util/evlist.h" 41 #include "util/evswitch.h" 42 #include "util/mmap.h" 43 #include <subcmd/pager.h> 44 #include <subcmd/exec-cmd.h> 45 #include "util/machine.h" 46 #include "util/map.h" 47 #include "util/symbol.h" 48 #include "util/path.h" 49 #include "util/session.h" 50 #include "util/thread.h" 51 #include <subcmd/parse-options.h> 52 #include "util/strlist.h" 53 #include "util/intlist.h" 54 #include "util/thread_map.h" 55 #include "util/stat.h" 56 #include "util/tool.h" 57 #include "util/util.h" 58 #include "trace/beauty/beauty.h" 59 #include "trace-event.h" 60 #include "util/parse-events.h" 61 #include "util/tracepoint.h" 62 #include "callchain.h" 63 #include "print_binary.h" 64 #include "string2.h" 65 #include "syscalltbl.h" 66 #include "rb_resort.h" 67 #include "../perf.h" 68 #include "trace_augment.h" 69 70 #include <errno.h> 71 #include <inttypes.h> 72 #include <poll.h> 73 #include <signal.h> 74 #include <stdlib.h> 75 #include <string.h> 76 #include <linux/err.h> 77 #include <linux/filter.h> 78 #include <linux/kernel.h> 79 #include <linux/list_sort.h> 80 #include <linux/random.h> 81 #include <linux/stringify.h> 82 #include <linux/time64.h> 83 #include <linux/zalloc.h> 84 #include <fcntl.h> 85 #include <sys/sysmacros.h> 86 87 #include <linux/ctype.h> 88 #include <perf/mmap.h> 89 90 #ifdef HAVE_LIBTRACEEVENT 91 #include <event-parse.h> 92 #endif 93 94 #ifndef O_CLOEXEC 95 # define O_CLOEXEC 02000000 96 #endif 97 98 #ifndef F_LINUX_SPECIFIC_BASE 99 # define F_LINUX_SPECIFIC_BASE 1024 100 #endif 101 102 #define RAW_SYSCALL_ARGS_NUM 6 103 104 /* 105 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100 106 * 107 * We have to explicitely mark the direction of the flow of data, if from the 108 * kernel to user space or the other way around, since the BPF collector we 109 * have so far copies only from user to kernel space, mark the arguments that 110 * go that direction, so that we don´t end up collecting the previous contents 111 * for syscall args that goes from kernel to user space. 112 */ 113 struct syscall_arg_fmt { 114 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 115 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val); 116 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val); 117 void *parm; 118 const char *name; 119 u16 nr_entries; // for arrays 120 bool from_user; 121 bool show_zero; 122 #ifdef HAVE_LIBBPF_SUPPORT 123 const struct btf_type *type; 124 int type_id; /* used in btf_dump */ 125 #endif 126 }; 127 128 struct syscall_fmt { 129 const char *name; 130 const char *alias; 131 struct { 132 const char *sys_enter, 133 *sys_exit; 134 } bpf_prog_name; 135 struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM]; 136 u8 nr_args; 137 bool errpid; 138 bool timeout; 139 bool hexret; 140 }; 141 142 struct trace { 143 struct perf_tool tool; 144 struct syscalltbl *sctbl; 145 struct { 146 struct syscall *table; 147 struct { 148 struct evsel *sys_enter, 149 *sys_exit, 150 *bpf_output; 151 } events; 152 } syscalls; 153 #ifdef HAVE_BPF_SKEL 154 struct augmented_raw_syscalls_bpf *skel; 155 #endif 156 #ifdef HAVE_LIBBPF_SUPPORT 157 struct btf *btf; 158 #endif 159 struct record_opts opts; 160 struct evlist *evlist; 161 struct machine *host; 162 struct thread *current; 163 struct cgroup *cgroup; 164 u64 base_time; 165 FILE *output; 166 unsigned long nr_events; 167 unsigned long nr_events_printed; 168 unsigned long max_events; 169 struct evswitch evswitch; 170 struct strlist *ev_qualifier; 171 struct { 172 size_t nr; 173 int *entries; 174 } ev_qualifier_ids; 175 struct { 176 size_t nr; 177 pid_t *entries; 178 struct bpf_map *map; 179 } filter_pids; 180 double duration_filter; 181 double runtime_ms; 182 struct { 183 u64 vfs_getname, 184 proc_getname; 185 } stats; 186 unsigned int max_stack; 187 unsigned int min_stack; 188 int raw_augmented_syscalls_args_size; 189 bool raw_augmented_syscalls; 190 bool fd_path_disabled; 191 bool sort_events; 192 bool not_ev_qualifier; 193 bool live; 194 bool full_time; 195 bool sched; 196 bool multiple_threads; 197 bool summary; 198 bool summary_only; 199 bool errno_summary; 200 bool failure_only; 201 bool show_comm; 202 bool print_sample; 203 bool show_tool_stats; 204 bool trace_syscalls; 205 bool libtraceevent_print; 206 bool kernel_syscallchains; 207 s16 args_alignment; 208 bool show_tstamp; 209 bool show_duration; 210 bool show_zeros; 211 bool show_arg_names; 212 bool show_string_prefix; 213 bool force; 214 bool vfs_getname; 215 bool force_btf; 216 int trace_pgfaults; 217 char *perfconfig_events; 218 struct { 219 struct ordered_events data; 220 u64 last; 221 } oe; 222 }; 223 224 static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused) 225 { 226 #ifdef HAVE_LIBBPF_SUPPORT 227 if (trace->btf != NULL) 228 return; 229 230 trace->btf = btf__load_vmlinux_btf(); 231 if (verbose > 0) { 232 fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" : 233 "Failed to load vmlinux BTF\n"); 234 } 235 #endif 236 } 237 238 struct tp_field { 239 int offset; 240 union { 241 u64 (*integer)(struct tp_field *field, struct perf_sample *sample); 242 void *(*pointer)(struct tp_field *field, struct perf_sample *sample); 243 }; 244 }; 245 246 #define TP_UINT_FIELD(bits) \ 247 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \ 248 { \ 249 u##bits value; \ 250 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 251 return value; \ 252 } 253 254 TP_UINT_FIELD(8); 255 TP_UINT_FIELD(16); 256 TP_UINT_FIELD(32); 257 TP_UINT_FIELD(64); 258 259 #define TP_UINT_FIELD__SWAPPED(bits) \ 260 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \ 261 { \ 262 u##bits value; \ 263 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 264 return bswap_##bits(value);\ 265 } 266 267 TP_UINT_FIELD__SWAPPED(16); 268 TP_UINT_FIELD__SWAPPED(32); 269 TP_UINT_FIELD__SWAPPED(64); 270 271 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap) 272 { 273 field->offset = offset; 274 275 switch (size) { 276 case 1: 277 field->integer = tp_field__u8; 278 break; 279 case 2: 280 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; 281 break; 282 case 4: 283 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; 284 break; 285 case 8: 286 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; 287 break; 288 default: 289 return -1; 290 } 291 292 return 0; 293 } 294 295 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap) 296 { 297 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); 298 } 299 300 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample) 301 { 302 return sample->raw_data + field->offset; 303 } 304 305 static int __tp_field__init_ptr(struct tp_field *field, int offset) 306 { 307 field->offset = offset; 308 field->pointer = tp_field__ptr; 309 return 0; 310 } 311 312 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field) 313 { 314 return __tp_field__init_ptr(field, format_field->offset); 315 } 316 317 struct syscall_tp { 318 struct tp_field id; 319 union { 320 struct tp_field args, ret; 321 }; 322 }; 323 324 /* 325 * The evsel->priv as used by 'perf trace' 326 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME 327 * fmt: for all the other tracepoints 328 */ 329 struct evsel_trace { 330 struct syscall_tp sc; 331 struct syscall_arg_fmt *fmt; 332 }; 333 334 static struct evsel_trace *evsel_trace__new(void) 335 { 336 return zalloc(sizeof(struct evsel_trace)); 337 } 338 339 static void evsel_trace__delete(struct evsel_trace *et) 340 { 341 if (et == NULL) 342 return; 343 344 zfree(&et->fmt); 345 free(et); 346 } 347 348 /* 349 * Used with raw_syscalls:sys_{enter,exit} and with the 350 * syscalls:sys_{enter,exit}_SYSCALL tracepoints 351 */ 352 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel) 353 { 354 struct evsel_trace *et = evsel->priv; 355 356 return &et->sc; 357 } 358 359 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel) 360 { 361 if (evsel->priv == NULL) { 362 evsel->priv = evsel_trace__new(); 363 if (evsel->priv == NULL) 364 return NULL; 365 } 366 367 return __evsel__syscall_tp(evsel); 368 } 369 370 /* 371 * Used with all the other tracepoints. 372 */ 373 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel) 374 { 375 struct evsel_trace *et = evsel->priv; 376 377 return et->fmt; 378 } 379 380 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel) 381 { 382 struct evsel_trace *et = evsel->priv; 383 384 if (evsel->priv == NULL) { 385 et = evsel->priv = evsel_trace__new(); 386 387 if (et == NULL) 388 return NULL; 389 } 390 391 if (et->fmt == NULL) { 392 const struct tep_event *tp_format = evsel__tp_format(evsel); 393 394 if (tp_format == NULL) 395 goto out_delete; 396 397 et->fmt = calloc(tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt)); 398 if (et->fmt == NULL) 399 goto out_delete; 400 } 401 402 return __evsel__syscall_arg_fmt(evsel); 403 404 out_delete: 405 evsel_trace__delete(evsel->priv); 406 evsel->priv = NULL; 407 return NULL; 408 } 409 410 static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name) 411 { 412 struct tep_format_field *format_field = evsel__field(evsel, name); 413 414 if (format_field == NULL) 415 return -1; 416 417 return tp_field__init_uint(field, format_field, evsel->needs_swap); 418 } 419 420 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \ 421 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 422 evsel__init_tp_uint_field(evsel, &sc->name, #name); }) 423 424 static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name) 425 { 426 struct tep_format_field *format_field = evsel__field(evsel, name); 427 428 if (format_field == NULL) 429 return -1; 430 431 return tp_field__init_ptr(field, format_field); 432 } 433 434 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \ 435 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 436 evsel__init_tp_ptr_field(evsel, &sc->name, #name); }) 437 438 static void evsel__delete_priv(struct evsel *evsel) 439 { 440 zfree(&evsel->priv); 441 evsel__delete(evsel); 442 } 443 444 static int evsel__init_syscall_tp(struct evsel *evsel) 445 { 446 struct syscall_tp *sc = evsel__syscall_tp(evsel); 447 448 if (sc != NULL) { 449 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") && 450 evsel__init_tp_uint_field(evsel, &sc->id, "nr")) 451 return -ENOENT; 452 453 return 0; 454 } 455 456 return -ENOMEM; 457 } 458 459 static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp) 460 { 461 struct syscall_tp *sc = evsel__syscall_tp(evsel); 462 463 if (sc != NULL) { 464 struct tep_format_field *syscall_id = evsel__field(tp, "id"); 465 if (syscall_id == NULL) 466 syscall_id = evsel__field(tp, "__syscall_nr"); 467 if (syscall_id == NULL || 468 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) 469 return -EINVAL; 470 471 return 0; 472 } 473 474 return -ENOMEM; 475 } 476 477 static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel) 478 { 479 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 480 481 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); 482 } 483 484 static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel) 485 { 486 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 487 488 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap); 489 } 490 491 static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler) 492 { 493 if (evsel__syscall_tp(evsel) != NULL) { 494 if (perf_evsel__init_sc_tp_uint_field(evsel, id)) 495 return -ENOENT; 496 497 evsel->handler = handler; 498 return 0; 499 } 500 501 return -ENOMEM; 502 } 503 504 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler) 505 { 506 struct evsel *evsel = evsel__newtp("raw_syscalls", direction); 507 508 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */ 509 if (IS_ERR(evsel)) 510 evsel = evsel__newtp("syscalls", direction); 511 512 if (IS_ERR(evsel)) 513 return NULL; 514 515 if (evsel__init_raw_syscall_tp(evsel, handler)) 516 goto out_delete; 517 518 return evsel; 519 520 out_delete: 521 evsel__delete_priv(evsel); 522 return NULL; 523 } 524 525 #define perf_evsel__sc_tp_uint(evsel, name, sample) \ 526 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 527 fields->name.integer(&fields->name, sample); }) 528 529 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \ 530 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 531 fields->name.pointer(&fields->name, sample); }) 532 533 size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val) 534 { 535 int idx = val - sa->offset; 536 537 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 538 size_t printed = scnprintf(bf, size, intfmt, val); 539 if (show_suffix) 540 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 541 return printed; 542 } 543 544 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : ""); 545 } 546 547 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 548 { 549 int idx = val - sa->offset; 550 551 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 552 size_t printed = scnprintf(bf, size, intfmt, val); 553 if (show_prefix) 554 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 555 return printed; 556 } 557 558 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 559 } 560 561 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size, 562 const char *intfmt, 563 struct syscall_arg *arg) 564 { 565 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val); 566 } 567 568 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, 569 struct syscall_arg *arg) 570 { 571 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg); 572 } 573 574 #define SCA_STRARRAY syscall_arg__scnprintf_strarray 575 576 bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 577 { 578 return strarray__strtoul(arg->parm, bf, size, ret); 579 } 580 581 bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 582 { 583 return strarray__strtoul_flags(arg->parm, bf, size, ret); 584 } 585 586 bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 587 { 588 return strarrays__strtoul(arg->parm, bf, size, ret); 589 } 590 591 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg) 592 { 593 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val); 594 } 595 596 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 597 { 598 size_t printed; 599 int i; 600 601 for (i = 0; i < sas->nr_entries; ++i) { 602 struct strarray *sa = sas->entries[i]; 603 int idx = val - sa->offset; 604 605 if (idx >= 0 && idx < sa->nr_entries) { 606 if (sa->entries[idx] == NULL) 607 break; 608 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 609 } 610 } 611 612 printed = scnprintf(bf, size, intfmt, val); 613 if (show_prefix) 614 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); 615 return printed; 616 } 617 618 bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret) 619 { 620 int i; 621 622 for (i = 0; i < sa->nr_entries; ++i) { 623 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') { 624 *ret = sa->offset + i; 625 return true; 626 } 627 } 628 629 return false; 630 } 631 632 bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret) 633 { 634 u64 val = 0; 635 char *tok = bf, *sep, *end; 636 637 *ret = 0; 638 639 while (size != 0) { 640 int toklen = size; 641 642 sep = memchr(tok, '|', size); 643 if (sep != NULL) { 644 size -= sep - tok + 1; 645 646 end = sep - 1; 647 while (end > tok && isspace(*end)) 648 --end; 649 650 toklen = end - tok + 1; 651 } 652 653 while (isspace(*tok)) 654 ++tok; 655 656 if (isalpha(*tok) || *tok == '_') { 657 if (!strarray__strtoul(sa, tok, toklen, &val)) 658 return false; 659 } else 660 val = strtoul(tok, NULL, 0); 661 662 *ret |= (1 << (val - 1)); 663 664 if (sep == NULL) 665 break; 666 tok = sep + 1; 667 } 668 669 return true; 670 } 671 672 bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret) 673 { 674 int i; 675 676 for (i = 0; i < sas->nr_entries; ++i) { 677 struct strarray *sa = sas->entries[i]; 678 679 if (strarray__strtoul(sa, bf, size, ret)) 680 return true; 681 } 682 683 return false; 684 } 685 686 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size, 687 struct syscall_arg *arg) 688 { 689 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val); 690 } 691 692 #ifndef AT_FDCWD 693 #define AT_FDCWD -100 694 #endif 695 696 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size, 697 struct syscall_arg *arg) 698 { 699 int fd = arg->val; 700 const char *prefix = "AT_FD"; 701 702 if (fd == AT_FDCWD) 703 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD"); 704 705 return syscall_arg__scnprintf_fd(bf, size, arg); 706 } 707 708 #define SCA_FDAT syscall_arg__scnprintf_fd_at 709 710 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 711 struct syscall_arg *arg); 712 713 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd 714 715 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg) 716 { 717 return scnprintf(bf, size, "%#lx", arg->val); 718 } 719 720 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg) 721 { 722 if (arg->val == 0) 723 return scnprintf(bf, size, "NULL"); 724 return syscall_arg__scnprintf_hex(bf, size, arg); 725 } 726 727 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg) 728 { 729 return scnprintf(bf, size, "%d", arg->val); 730 } 731 732 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg) 733 { 734 return scnprintf(bf, size, "%ld", arg->val); 735 } 736 737 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg) 738 { 739 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can 740 // fill missing comms using thread__set_comm()... 741 // here or in a special syscall_arg__scnprintf_pid_sched_tp... 742 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val); 743 } 744 745 #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array 746 747 static const char *bpf_cmd[] = { 748 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM", 749 "MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH", 750 "PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID", 751 "PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD", 752 "PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID", 753 "TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE", 754 "BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH", 755 "MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE", 756 "LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE", 757 "LINK_DETACH", "PROG_BIND_MAP", 758 }; 759 static DEFINE_STRARRAY(bpf_cmd, "BPF_"); 760 761 static const char *fsmount_flags[] = { 762 [1] = "CLOEXEC", 763 }; 764 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_"); 765 766 #include "trace/beauty/generated/fsconfig_arrays.c" 767 768 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_"); 769 770 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", }; 771 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1); 772 773 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", }; 774 static DEFINE_STRARRAY(itimers, "ITIMER_"); 775 776 static const char *keyctl_options[] = { 777 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN", 778 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ", 779 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT", 780 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT", 781 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT", 782 }; 783 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_"); 784 785 static const char *whences[] = { "SET", "CUR", "END", 786 #ifdef SEEK_DATA 787 "DATA", 788 #endif 789 #ifdef SEEK_HOLE 790 "HOLE", 791 #endif 792 }; 793 static DEFINE_STRARRAY(whences, "SEEK_"); 794 795 static const char *fcntl_cmds[] = { 796 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK", 797 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64", 798 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX", 799 "GETOWNER_UIDS", 800 }; 801 static DEFINE_STRARRAY(fcntl_cmds, "F_"); 802 803 static const char *fcntl_linux_specific_cmds[] = { 804 "SETLEASE", "GETLEASE", "NOTIFY", "DUPFD_QUERY", [5] = "CANCELLK", "DUPFD_CLOEXEC", 805 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS", 806 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT", 807 }; 808 809 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE); 810 811 static struct strarray *fcntl_cmds_arrays[] = { 812 &strarray__fcntl_cmds, 813 &strarray__fcntl_linux_specific_cmds, 814 }; 815 816 static DEFINE_STRARRAYS(fcntl_cmds_arrays); 817 818 static const char *rlimit_resources[] = { 819 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE", 820 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO", 821 "RTTIME", 822 }; 823 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_"); 824 825 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", }; 826 static DEFINE_STRARRAY(sighow, "SIG_"); 827 828 static const char *clockid[] = { 829 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID", 830 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME", 831 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI" 832 }; 833 static DEFINE_STRARRAY(clockid, "CLOCK_"); 834 835 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size, 836 struct syscall_arg *arg) 837 { 838 bool show_prefix = arg->show_string_prefix; 839 const char *suffix = "_OK"; 840 size_t printed = 0; 841 int mode = arg->val; 842 843 if (mode == F_OK) /* 0 */ 844 return scnprintf(bf, size, "F%s", show_prefix ? suffix : ""); 845 #define P_MODE(n) \ 846 if (mode & n##_OK) { \ 847 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ 848 mode &= ~n##_OK; \ 849 } 850 851 P_MODE(R); 852 P_MODE(W); 853 P_MODE(X); 854 #undef P_MODE 855 856 if (mode) 857 printed += scnprintf(bf + printed, size - printed, "|%#x", mode); 858 859 return printed; 860 } 861 862 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode 863 864 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 865 struct syscall_arg *arg); 866 867 #define SCA_FILENAME syscall_arg__scnprintf_filename 868 869 // 'argname' is just documentational at this point, to remove the previous comment with that info 870 #define SCA_FILENAME_FROM_USER(argname) \ 871 { .scnprintf = SCA_FILENAME, \ 872 .from_user = true, } 873 874 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg); 875 876 #define SCA_BUF syscall_arg__scnprintf_buf 877 878 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size, 879 struct syscall_arg *arg) 880 { 881 bool show_prefix = arg->show_string_prefix; 882 const char *prefix = "O_"; 883 int printed = 0, flags = arg->val; 884 885 #define P_FLAG(n) \ 886 if (flags & O_##n) { \ 887 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 888 flags &= ~O_##n; \ 889 } 890 891 P_FLAG(CLOEXEC); 892 P_FLAG(NONBLOCK); 893 #undef P_FLAG 894 895 if (flags) 896 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 897 898 return printed; 899 } 900 901 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags 902 903 #ifndef GRND_NONBLOCK 904 #define GRND_NONBLOCK 0x0001 905 #endif 906 #ifndef GRND_RANDOM 907 #define GRND_RANDOM 0x0002 908 #endif 909 910 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size, 911 struct syscall_arg *arg) 912 { 913 bool show_prefix = arg->show_string_prefix; 914 const char *prefix = "GRND_"; 915 int printed = 0, flags = arg->val; 916 917 #define P_FLAG(n) \ 918 if (flags & GRND_##n) { \ 919 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 920 flags &= ~GRND_##n; \ 921 } 922 923 P_FLAG(RANDOM); 924 P_FLAG(NONBLOCK); 925 #undef P_FLAG 926 927 if (flags) 928 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 929 930 return printed; 931 } 932 933 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags 934 935 #ifdef HAVE_LIBBPF_SUPPORT 936 static void syscall_arg_fmt__cache_btf_enum(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type) 937 { 938 int id; 939 940 type = strstr(type, "enum "); 941 if (type == NULL) 942 return; 943 944 type += 5; // skip "enum " to get the enumeration name 945 946 id = btf__find_by_name(btf, type); 947 if (id < 0) 948 return; 949 950 arg_fmt->type = btf__type_by_id(btf, id); 951 } 952 953 static bool syscall_arg__strtoul_btf_enum(char *bf, size_t size, struct syscall_arg *arg, u64 *val) 954 { 955 const struct btf_type *bt = arg->fmt->type; 956 struct btf *btf = arg->trace->btf; 957 struct btf_enum *be = btf_enum(bt); 958 959 for (int i = 0; i < btf_vlen(bt); ++i, ++be) { 960 const char *name = btf__name_by_offset(btf, be->name_off); 961 int max_len = max(size, strlen(name)); 962 963 if (strncmp(name, bf, max_len) == 0) { 964 *val = be->val; 965 return true; 966 } 967 } 968 969 return false; 970 } 971 972 static bool syscall_arg__strtoul_btf_type(char *bf, size_t size, struct syscall_arg *arg, u64 *val) 973 { 974 const struct btf_type *bt; 975 char *type = arg->type_name; 976 struct btf *btf; 977 978 trace__load_vmlinux_btf(arg->trace); 979 980 btf = arg->trace->btf; 981 if (btf == NULL) 982 return false; 983 984 if (arg->fmt->type == NULL) { 985 // See if this is an enum 986 syscall_arg_fmt__cache_btf_enum(arg->fmt, btf, type); 987 } 988 989 // Now let's see if we have a BTF type resolved 990 bt = arg->fmt->type; 991 if (bt == NULL) 992 return false; 993 994 // If it is an enum: 995 if (btf_is_enum(arg->fmt->type)) 996 return syscall_arg__strtoul_btf_enum(bf, size, arg, val); 997 998 return false; 999 } 1000 1001 static size_t btf_enum_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, int val) 1002 { 1003 struct btf_enum *be = btf_enum(type); 1004 const int nr_entries = btf_vlen(type); 1005 1006 for (int i = 0; i < nr_entries; ++i, ++be) { 1007 if (be->val == val) { 1008 return scnprintf(bf, size, "%s", 1009 btf__name_by_offset(btf, be->name_off)); 1010 } 1011 } 1012 1013 return 0; 1014 } 1015 1016 struct trace_btf_dump_snprintf_ctx { 1017 char *bf; 1018 size_t printed, size; 1019 }; 1020 1021 static void trace__btf_dump_snprintf(void *vctx, const char *fmt, va_list args) 1022 { 1023 struct trace_btf_dump_snprintf_ctx *ctx = vctx; 1024 1025 ctx->printed += vscnprintf(ctx->bf + ctx->printed, ctx->size - ctx->printed, fmt, args); 1026 } 1027 1028 static size_t btf_struct_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, struct syscall_arg *arg) 1029 { 1030 struct trace_btf_dump_snprintf_ctx ctx = { 1031 .bf = bf, 1032 .size = size, 1033 }; 1034 struct augmented_arg *augmented_arg = arg->augmented.args; 1035 int type_id = arg->fmt->type_id, consumed; 1036 struct btf_dump *btf_dump; 1037 1038 LIBBPF_OPTS(btf_dump_opts, dump_opts); 1039 LIBBPF_OPTS(btf_dump_type_data_opts, dump_data_opts); 1040 1041 if (arg == NULL || arg->augmented.args == NULL) 1042 return 0; 1043 1044 dump_data_opts.compact = true; 1045 dump_data_opts.skip_names = !arg->trace->show_arg_names; 1046 1047 btf_dump = btf_dump__new(btf, trace__btf_dump_snprintf, &ctx, &dump_opts); 1048 if (btf_dump == NULL) 1049 return 0; 1050 1051 /* pretty print the struct data here */ 1052 if (btf_dump__dump_type_data(btf_dump, type_id, arg->augmented.args->value, type->size, &dump_data_opts) == 0) 1053 return 0; 1054 1055 consumed = sizeof(*augmented_arg) + augmented_arg->size; 1056 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1057 arg->augmented.size -= consumed; 1058 1059 btf_dump__free(btf_dump); 1060 1061 return ctx.printed; 1062 } 1063 1064 static size_t trace__btf_scnprintf(struct trace *trace, struct syscall_arg *arg, char *bf, 1065 size_t size, int val, char *type) 1066 { 1067 struct syscall_arg_fmt *arg_fmt = arg->fmt; 1068 1069 if (trace->btf == NULL) 1070 return 0; 1071 1072 if (arg_fmt->type == NULL) { 1073 // Check if this is an enum and if we have the BTF type for it. 1074 syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type); 1075 } 1076 1077 // Did we manage to find a BTF type for the syscall/tracepoint argument? 1078 if (arg_fmt->type == NULL) 1079 return 0; 1080 1081 if (btf_is_enum(arg_fmt->type)) 1082 return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val); 1083 else if (btf_is_struct(arg_fmt->type) || btf_is_union(arg_fmt->type)) 1084 return btf_struct_scnprintf(arg_fmt->type, trace->btf, bf, size, arg); 1085 1086 return 0; 1087 } 1088 1089 #else // HAVE_LIBBPF_SUPPORT 1090 static size_t trace__btf_scnprintf(struct trace *trace __maybe_unused, struct syscall_arg *arg __maybe_unused, 1091 char *bf __maybe_unused, size_t size __maybe_unused, int val __maybe_unused, 1092 char *type __maybe_unused) 1093 { 1094 return 0; 1095 } 1096 1097 static bool syscall_arg__strtoul_btf_type(char *bf __maybe_unused, size_t size __maybe_unused, 1098 struct syscall_arg *arg __maybe_unused, u64 *val __maybe_unused) 1099 { 1100 return false; 1101 } 1102 #endif // HAVE_LIBBPF_SUPPORT 1103 1104 #define STUL_BTF_TYPE syscall_arg__strtoul_btf_type 1105 1106 #define STRARRAY(name, array) \ 1107 { .scnprintf = SCA_STRARRAY, \ 1108 .strtoul = STUL_STRARRAY, \ 1109 .parm = &strarray__##array, } 1110 1111 #define STRARRAY_FLAGS(name, array) \ 1112 { .scnprintf = SCA_STRARRAY_FLAGS, \ 1113 .strtoul = STUL_STRARRAY_FLAGS, \ 1114 .parm = &strarray__##array, } 1115 1116 #include "trace/beauty/eventfd.c" 1117 #include "trace/beauty/futex_op.c" 1118 #include "trace/beauty/futex_val3.c" 1119 #include "trace/beauty/mmap.c" 1120 #include "trace/beauty/mode_t.c" 1121 #include "trace/beauty/msg_flags.c" 1122 #include "trace/beauty/open_flags.c" 1123 #include "trace/beauty/perf_event_open.c" 1124 #include "trace/beauty/pid.c" 1125 #include "trace/beauty/sched_policy.c" 1126 #include "trace/beauty/seccomp.c" 1127 #include "trace/beauty/signum.c" 1128 #include "trace/beauty/socket_type.c" 1129 #include "trace/beauty/waitid_options.c" 1130 1131 static const struct syscall_fmt syscall_fmts[] = { 1132 { .name = "access", 1133 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 1134 { .name = "arch_prctl", 1135 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ }, 1136 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, }, 1137 { .name = "bind", 1138 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 1139 [1] = SCA_SOCKADDR_FROM_USER(umyaddr), 1140 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 1141 { .name = "bpf", 1142 .arg = { [0] = STRARRAY(cmd, bpf_cmd), 1143 [1] = { .from_user = true /* attr */, }, } }, 1144 { .name = "brk", .hexret = true, 1145 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, }, 1146 { .name = "clock_gettime", 1147 .arg = { [0] = STRARRAY(clk_id, clockid), }, }, 1148 { .name = "clock_nanosleep", 1149 .arg = { [2] = SCA_TIMESPEC_FROM_USER(req), }, }, 1150 { .name = "clone", .errpid = true, .nr_args = 5, 1151 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, }, 1152 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, }, 1153 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, }, 1154 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, }, 1155 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, }, 1156 { .name = "close", 1157 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, }, 1158 { .name = "connect", 1159 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 1160 [1] = SCA_SOCKADDR_FROM_USER(servaddr), 1161 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 1162 { .name = "epoll_ctl", 1163 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, }, 1164 { .name = "eventfd2", 1165 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, }, 1166 { .name = "faccessat", 1167 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1168 [1] = SCA_FILENAME_FROM_USER(pathname), 1169 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 1170 { .name = "faccessat2", 1171 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1172 [1] = SCA_FILENAME_FROM_USER(pathname), 1173 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, 1174 [3] = { .scnprintf = SCA_FACCESSAT2_FLAGS, /* flags */ }, }, }, 1175 { .name = "fchmodat", 1176 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1177 { .name = "fchownat", 1178 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1179 { .name = "fcntl", 1180 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */ 1181 .strtoul = STUL_STRARRAYS, 1182 .parm = &strarrays__fcntl_cmds_arrays, 1183 .show_zero = true, }, 1184 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, }, 1185 { .name = "flock", 1186 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, }, 1187 { .name = "fsconfig", 1188 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, }, 1189 { .name = "fsmount", 1190 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags), 1191 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, }, 1192 { .name = "fspick", 1193 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1194 [1] = SCA_FILENAME_FROM_USER(path), 1195 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, }, 1196 { .name = "fstat", .alias = "newfstat", }, 1197 { .name = "futex", 1198 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, 1199 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, }, 1200 { .name = "futimesat", 1201 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1202 { .name = "getitimer", 1203 .arg = { [0] = STRARRAY(which, itimers), }, }, 1204 { .name = "getpid", .errpid = true, }, 1205 { .name = "getpgid", .errpid = true, }, 1206 { .name = "getppid", .errpid = true, }, 1207 { .name = "getrandom", 1208 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, }, 1209 { .name = "getrlimit", 1210 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 1211 { .name = "getsockopt", 1212 .arg = { [1] = STRARRAY(level, socket_level), }, }, 1213 { .name = "gettid", .errpid = true, }, 1214 { .name = "ioctl", 1215 .arg = { 1216 #if defined(__i386__) || defined(__x86_64__) 1217 /* 1218 * FIXME: Make this available to all arches. 1219 */ 1220 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ }, 1221 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 1222 #else 1223 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 1224 #endif 1225 { .name = "kcmp", .nr_args = 5, 1226 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, }, 1227 [1] = { .name = "pid2", .scnprintf = SCA_PID, }, 1228 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, }, 1229 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, }, 1230 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, }, 1231 { .name = "keyctl", 1232 .arg = { [0] = STRARRAY(option, keyctl_options), }, }, 1233 { .name = "kill", 1234 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1235 { .name = "linkat", 1236 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1237 { .name = "lseek", 1238 .arg = { [2] = STRARRAY(whence, whences), }, }, 1239 { .name = "lstat", .alias = "newlstat", }, 1240 { .name = "madvise", 1241 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1242 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, }, 1243 { .name = "mkdirat", 1244 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1245 { .name = "mknodat", 1246 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1247 { .name = "mmap", .hexret = true, 1248 /* The standard mmap maps to old_mmap on s390x */ 1249 #if defined(__s390x__) 1250 .alias = "old_mmap", 1251 #endif 1252 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, 1253 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ 1254 .strtoul = STUL_STRARRAY_FLAGS, 1255 .parm = &strarray__mmap_flags, }, 1256 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, }, 1257 { .name = "mount", 1258 .arg = { [0] = SCA_FILENAME_FROM_USER(devname), 1259 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */ 1260 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, }, 1261 { .name = "move_mount", 1262 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ }, 1263 [1] = SCA_FILENAME_FROM_USER(pathname), 1264 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ }, 1265 [3] = SCA_FILENAME_FROM_USER(pathname), 1266 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, }, 1267 { .name = "mprotect", 1268 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1269 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, }, }, 1270 { .name = "mq_unlink", 1271 .arg = { [0] = SCA_FILENAME_FROM_USER(u_name), }, }, 1272 { .name = "mremap", .hexret = true, 1273 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, }, 1274 { .name = "name_to_handle_at", 1275 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1276 { .name = "nanosleep", 1277 .arg = { [0] = SCA_TIMESPEC_FROM_USER(req), }, }, 1278 { .name = "newfstatat", .alias = "fstatat", 1279 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, 1280 [1] = SCA_FILENAME_FROM_USER(pathname), 1281 [3] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, 1282 { .name = "open", 1283 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1284 { .name = "open_by_handle_at", 1285 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1286 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1287 { .name = "openat", 1288 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1289 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1290 { .name = "perf_event_open", 1291 .arg = { [0] = SCA_PERF_ATTR_FROM_USER(attr), 1292 [2] = { .scnprintf = SCA_INT, /* cpu */ }, 1293 [3] = { .scnprintf = SCA_FD, /* group_fd */ }, 1294 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, }, 1295 { .name = "pipe2", 1296 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, }, 1297 { .name = "pkey_alloc", 1298 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, }, 1299 { .name = "pkey_free", 1300 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, }, 1301 { .name = "pkey_mprotect", 1302 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1303 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, 1304 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, }, 1305 { .name = "poll", .timeout = true, }, 1306 { .name = "ppoll", .timeout = true, }, 1307 { .name = "prctl", 1308 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ 1309 .strtoul = STUL_STRARRAY, 1310 .parm = &strarray__prctl_options, }, 1311 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ }, 1312 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, }, 1313 { .name = "pread", .alias = "pread64", }, 1314 { .name = "preadv", .alias = "pread", }, 1315 { .name = "prlimit64", 1316 .arg = { [1] = STRARRAY(resource, rlimit_resources), 1317 [2] = { .from_user = true /* new_rlim */, }, }, }, 1318 { .name = "pwrite", .alias = "pwrite64", }, 1319 { .name = "readlinkat", 1320 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1321 { .name = "recvfrom", 1322 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1323 { .name = "recvmmsg", 1324 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1325 { .name = "recvmsg", 1326 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1327 { .name = "renameat", 1328 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1329 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, }, 1330 { .name = "renameat2", 1331 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1332 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, 1333 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, }, 1334 { .name = "rseq", .errpid = true, 1335 .arg = { [0] = { .from_user = true /* rseq */, }, }, }, 1336 { .name = "rt_sigaction", 1337 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1338 { .name = "rt_sigprocmask", 1339 .arg = { [0] = STRARRAY(how, sighow), }, }, 1340 { .name = "rt_sigqueueinfo", 1341 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1342 { .name = "rt_tgsigqueueinfo", 1343 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1344 { .name = "sched_setscheduler", 1345 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, }, 1346 { .name = "seccomp", 1347 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ }, 1348 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, }, 1349 { .name = "select", .timeout = true, }, 1350 { .name = "sendfile", .alias = "sendfile64", }, 1351 { .name = "sendmmsg", 1352 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1353 { .name = "sendmsg", 1354 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1355 { .name = "sendto", 1356 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, 1357 [4] = SCA_SOCKADDR_FROM_USER(addr), }, }, 1358 { .name = "set_robust_list", .errpid = true, 1359 .arg = { [0] = { .from_user = true /* head */, }, }, }, 1360 { .name = "set_tid_address", .errpid = true, }, 1361 { .name = "setitimer", 1362 .arg = { [0] = STRARRAY(which, itimers), }, }, 1363 { .name = "setrlimit", 1364 .arg = { [0] = STRARRAY(resource, rlimit_resources), 1365 [1] = { .from_user = true /* rlim */, }, }, }, 1366 { .name = "setsockopt", 1367 .arg = { [1] = STRARRAY(level, socket_level), }, }, 1368 { .name = "socket", 1369 .arg = { [0] = STRARRAY(family, socket_families), 1370 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1371 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1372 { .name = "socketpair", 1373 .arg = { [0] = STRARRAY(family, socket_families), 1374 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1375 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1376 { .name = "stat", .alias = "newstat", }, 1377 { .name = "statx", 1378 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ }, 1379 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ } , 1380 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, }, 1381 { .name = "swapoff", 1382 .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, }, 1383 { .name = "swapon", 1384 .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, }, 1385 { .name = "symlinkat", 1386 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1387 { .name = "sync_file_range", 1388 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, }, 1389 { .name = "tgkill", 1390 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1391 { .name = "tkill", 1392 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1393 { .name = "umount2", .alias = "umount", 1394 .arg = { [0] = SCA_FILENAME_FROM_USER(name), }, }, 1395 { .name = "uname", .alias = "newuname", }, 1396 { .name = "unlinkat", 1397 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1398 [1] = SCA_FILENAME_FROM_USER(pathname), 1399 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, }, 1400 { .name = "utimensat", 1401 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, }, 1402 { .name = "wait4", .errpid = true, 1403 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1404 { .name = "waitid", .errpid = true, 1405 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1406 { .name = "write", 1407 .arg = { [1] = { .scnprintf = SCA_BUF /* buf */, .from_user = true, }, }, }, 1408 }; 1409 1410 static int syscall_fmt__cmp(const void *name, const void *fmtp) 1411 { 1412 const struct syscall_fmt *fmt = fmtp; 1413 return strcmp(name, fmt->name); 1414 } 1415 1416 static const struct syscall_fmt *__syscall_fmt__find(const struct syscall_fmt *fmts, 1417 const int nmemb, 1418 const char *name) 1419 { 1420 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); 1421 } 1422 1423 static const struct syscall_fmt *syscall_fmt__find(const char *name) 1424 { 1425 const int nmemb = ARRAY_SIZE(syscall_fmts); 1426 return __syscall_fmt__find(syscall_fmts, nmemb, name); 1427 } 1428 1429 static const struct syscall_fmt *__syscall_fmt__find_by_alias(const struct syscall_fmt *fmts, 1430 const int nmemb, const char *alias) 1431 { 1432 int i; 1433 1434 for (i = 0; i < nmemb; ++i) { 1435 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0) 1436 return &fmts[i]; 1437 } 1438 1439 return NULL; 1440 } 1441 1442 static const struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias) 1443 { 1444 const int nmemb = ARRAY_SIZE(syscall_fmts); 1445 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias); 1446 } 1447 1448 /* 1449 * is_exit: is this "exit" or "exit_group"? 1450 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter. 1451 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc. 1452 * nonexistent: Just a hole in the syscall table, syscall id not allocated 1453 */ 1454 struct syscall { 1455 struct tep_event *tp_format; 1456 int nr_args; 1457 int args_size; 1458 struct { 1459 struct bpf_program *sys_enter, 1460 *sys_exit; 1461 } bpf_prog; 1462 bool is_exit; 1463 bool is_open; 1464 bool nonexistent; 1465 bool use_btf; 1466 struct tep_format_field *args; 1467 const char *name; 1468 const struct syscall_fmt *fmt; 1469 struct syscall_arg_fmt *arg_fmt; 1470 }; 1471 1472 /* 1473 * We need to have this 'calculated' boolean because in some cases we really 1474 * don't know what is the duration of a syscall, for instance, when we start 1475 * a session and some threads are waiting for a syscall to finish, say 'poll', 1476 * in which case all we can do is to print "( ? ) for duration and for the 1477 * start timestamp. 1478 */ 1479 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp) 1480 { 1481 double duration = (double)t / NSEC_PER_MSEC; 1482 size_t printed = fprintf(fp, "("); 1483 1484 if (!calculated) 1485 printed += fprintf(fp, " "); 1486 else if (duration >= 1.0) 1487 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration); 1488 else if (duration >= 0.01) 1489 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration); 1490 else 1491 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration); 1492 return printed + fprintf(fp, "): "); 1493 } 1494 1495 /** 1496 * filename.ptr: The filename char pointer that will be vfs_getname'd 1497 * filename.entry_str_pos: Where to insert the string translated from 1498 * filename.ptr by the vfs_getname tracepoint/kprobe. 1499 * ret_scnprintf: syscall args may set this to a different syscall return 1500 * formatter, for instance, fcntl may return fds, file flags, etc. 1501 */ 1502 struct thread_trace { 1503 u64 entry_time; 1504 bool entry_pending; 1505 unsigned long nr_events; 1506 unsigned long pfmaj, pfmin; 1507 char *entry_str; 1508 double runtime_ms; 1509 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 1510 struct { 1511 unsigned long ptr; 1512 short int entry_str_pos; 1513 bool pending_open; 1514 unsigned int namelen; 1515 char *name; 1516 } filename; 1517 struct { 1518 int max; 1519 struct file *table; 1520 } files; 1521 1522 struct intlist *syscall_stats; 1523 }; 1524 1525 static struct thread_trace *thread_trace__new(void) 1526 { 1527 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace)); 1528 1529 if (ttrace) { 1530 ttrace->files.max = -1; 1531 ttrace->syscall_stats = intlist__new(NULL); 1532 } 1533 1534 return ttrace; 1535 } 1536 1537 static void thread_trace__free_files(struct thread_trace *ttrace); 1538 1539 static void thread_trace__delete(void *pttrace) 1540 { 1541 struct thread_trace *ttrace = pttrace; 1542 1543 if (!ttrace) 1544 return; 1545 1546 intlist__delete(ttrace->syscall_stats); 1547 ttrace->syscall_stats = NULL; 1548 thread_trace__free_files(ttrace); 1549 zfree(&ttrace->entry_str); 1550 free(ttrace); 1551 } 1552 1553 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp) 1554 { 1555 struct thread_trace *ttrace; 1556 1557 if (thread == NULL) 1558 goto fail; 1559 1560 if (thread__priv(thread) == NULL) 1561 thread__set_priv(thread, thread_trace__new()); 1562 1563 if (thread__priv(thread) == NULL) 1564 goto fail; 1565 1566 ttrace = thread__priv(thread); 1567 ++ttrace->nr_events; 1568 1569 return ttrace; 1570 fail: 1571 color_fprintf(fp, PERF_COLOR_RED, 1572 "WARNING: not enough memory, dropping samples!\n"); 1573 return NULL; 1574 } 1575 1576 1577 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg, 1578 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg)) 1579 { 1580 struct thread_trace *ttrace = thread__priv(arg->thread); 1581 1582 ttrace->ret_scnprintf = ret_scnprintf; 1583 } 1584 1585 #define TRACE_PFMAJ (1 << 0) 1586 #define TRACE_PFMIN (1 << 1) 1587 1588 static const size_t trace__entry_str_size = 2048; 1589 1590 static void thread_trace__free_files(struct thread_trace *ttrace) 1591 { 1592 for (int i = 0; i < ttrace->files.max; ++i) { 1593 struct file *file = ttrace->files.table + i; 1594 zfree(&file->pathname); 1595 } 1596 1597 zfree(&ttrace->files.table); 1598 ttrace->files.max = -1; 1599 } 1600 1601 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd) 1602 { 1603 if (fd < 0) 1604 return NULL; 1605 1606 if (fd > ttrace->files.max) { 1607 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); 1608 1609 if (nfiles == NULL) 1610 return NULL; 1611 1612 if (ttrace->files.max != -1) { 1613 memset(nfiles + ttrace->files.max + 1, 0, 1614 (fd - ttrace->files.max) * sizeof(struct file)); 1615 } else { 1616 memset(nfiles, 0, (fd + 1) * sizeof(struct file)); 1617 } 1618 1619 ttrace->files.table = nfiles; 1620 ttrace->files.max = fd; 1621 } 1622 1623 return ttrace->files.table + fd; 1624 } 1625 1626 struct file *thread__files_entry(struct thread *thread, int fd) 1627 { 1628 return thread_trace__files_entry(thread__priv(thread), fd); 1629 } 1630 1631 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) 1632 { 1633 struct thread_trace *ttrace = thread__priv(thread); 1634 struct file *file = thread_trace__files_entry(ttrace, fd); 1635 1636 if (file != NULL) { 1637 struct stat st; 1638 if (stat(pathname, &st) == 0) 1639 file->dev_maj = major(st.st_rdev); 1640 file->pathname = strdup(pathname); 1641 if (file->pathname) 1642 return 0; 1643 } 1644 1645 return -1; 1646 } 1647 1648 static int thread__read_fd_path(struct thread *thread, int fd) 1649 { 1650 char linkname[PATH_MAX], pathname[PATH_MAX]; 1651 struct stat st; 1652 int ret; 1653 1654 if (thread__pid(thread) == thread__tid(thread)) { 1655 scnprintf(linkname, sizeof(linkname), 1656 "/proc/%d/fd/%d", thread__pid(thread), fd); 1657 } else { 1658 scnprintf(linkname, sizeof(linkname), 1659 "/proc/%d/task/%d/fd/%d", 1660 thread__pid(thread), thread__tid(thread), fd); 1661 } 1662 1663 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname)) 1664 return -1; 1665 1666 ret = readlink(linkname, pathname, sizeof(pathname)); 1667 1668 if (ret < 0 || ret > st.st_size) 1669 return -1; 1670 1671 pathname[ret] = '\0'; 1672 return trace__set_fd_pathname(thread, fd, pathname); 1673 } 1674 1675 static const char *thread__fd_path(struct thread *thread, int fd, 1676 struct trace *trace) 1677 { 1678 struct thread_trace *ttrace = thread__priv(thread); 1679 1680 if (ttrace == NULL || trace->fd_path_disabled) 1681 return NULL; 1682 1683 if (fd < 0) 1684 return NULL; 1685 1686 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { 1687 if (!trace->live) 1688 return NULL; 1689 ++trace->stats.proc_getname; 1690 if (thread__read_fd_path(thread, fd)) 1691 return NULL; 1692 } 1693 1694 return ttrace->files.table[fd].pathname; 1695 } 1696 1697 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg) 1698 { 1699 int fd = arg->val; 1700 size_t printed = scnprintf(bf, size, "%d", fd); 1701 const char *path = thread__fd_path(arg->thread, fd, arg->trace); 1702 1703 if (path) 1704 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1705 1706 return printed; 1707 } 1708 1709 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) 1710 { 1711 size_t printed = scnprintf(bf, size, "%d", fd); 1712 struct thread *thread = machine__find_thread(trace->host, pid, pid); 1713 1714 if (thread) { 1715 const char *path = thread__fd_path(thread, fd, trace); 1716 1717 if (path) 1718 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1719 1720 thread__put(thread); 1721 } 1722 1723 return printed; 1724 } 1725 1726 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 1727 struct syscall_arg *arg) 1728 { 1729 int fd = arg->val; 1730 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg); 1731 struct thread_trace *ttrace = thread__priv(arg->thread); 1732 1733 if (ttrace && fd >= 0 && fd <= ttrace->files.max) 1734 zfree(&ttrace->files.table[fd].pathname); 1735 1736 return printed; 1737 } 1738 1739 static void thread__set_filename_pos(struct thread *thread, const char *bf, 1740 unsigned long ptr) 1741 { 1742 struct thread_trace *ttrace = thread__priv(thread); 1743 1744 ttrace->filename.ptr = ptr; 1745 ttrace->filename.entry_str_pos = bf - ttrace->entry_str; 1746 } 1747 1748 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size) 1749 { 1750 struct augmented_arg *augmented_arg = arg->augmented.args; 1751 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value); 1752 /* 1753 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls 1754 * we would have two strings, each prefixed by its size. 1755 */ 1756 int consumed = sizeof(*augmented_arg) + augmented_arg->size; 1757 1758 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1759 arg->augmented.size -= consumed; 1760 1761 return printed; 1762 } 1763 1764 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 1765 struct syscall_arg *arg) 1766 { 1767 unsigned long ptr = arg->val; 1768 1769 if (arg->augmented.args) 1770 return syscall_arg__scnprintf_augmented_string(arg, bf, size); 1771 1772 if (!arg->trace->vfs_getname) 1773 return scnprintf(bf, size, "%#x", ptr); 1774 1775 thread__set_filename_pos(arg->thread, bf, ptr); 1776 return 0; 1777 } 1778 1779 #define MAX_CONTROL_CHAR 31 1780 #define MAX_ASCII 127 1781 1782 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg) 1783 { 1784 struct augmented_arg *augmented_arg = arg->augmented.args; 1785 unsigned char *orig = (unsigned char *)augmented_arg->value; 1786 size_t printed = 0; 1787 int consumed; 1788 1789 if (augmented_arg == NULL) 1790 return 0; 1791 1792 for (int j = 0; j < augmented_arg->size; ++j) { 1793 bool control_char = orig[j] <= MAX_CONTROL_CHAR || orig[j] >= MAX_ASCII; 1794 /* print control characters (0~31 and 127), and non-ascii characters in \(digits) */ 1795 printed += scnprintf(bf + printed, size - printed, control_char ? "\\%d" : "%c", (int)orig[j]); 1796 } 1797 1798 consumed = sizeof(*augmented_arg) + augmented_arg->size; 1799 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1800 arg->augmented.size -= consumed; 1801 1802 return printed; 1803 } 1804 1805 static bool trace__filter_duration(struct trace *trace, double t) 1806 { 1807 return t < (trace->duration_filter * NSEC_PER_MSEC); 1808 } 1809 1810 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1811 { 1812 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; 1813 1814 return fprintf(fp, "%10.3f ", ts); 1815 } 1816 1817 /* 1818 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are 1819 * using ttrace->entry_time for a thread that receives a sys_exit without 1820 * first having received a sys_enter ("poll" issued before tracing session 1821 * starts, lost sys_enter exit due to ring buffer overflow). 1822 */ 1823 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1824 { 1825 if (tstamp > 0) 1826 return __trace__fprintf_tstamp(trace, tstamp, fp); 1827 1828 return fprintf(fp, " ? "); 1829 } 1830 1831 static pid_t workload_pid = -1; 1832 static volatile sig_atomic_t done = false; 1833 static volatile sig_atomic_t interrupted = false; 1834 1835 static void sighandler_interrupt(int sig __maybe_unused) 1836 { 1837 done = interrupted = true; 1838 } 1839 1840 static void sighandler_chld(int sig __maybe_unused, siginfo_t *info, 1841 void *context __maybe_unused) 1842 { 1843 if (info->si_pid == workload_pid) 1844 done = true; 1845 } 1846 1847 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) 1848 { 1849 size_t printed = 0; 1850 1851 if (trace->multiple_threads) { 1852 if (trace->show_comm) 1853 printed += fprintf(fp, "%.14s/", thread__comm_str(thread)); 1854 printed += fprintf(fp, "%d ", thread__tid(thread)); 1855 } 1856 1857 return printed; 1858 } 1859 1860 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, 1861 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp) 1862 { 1863 size_t printed = 0; 1864 1865 if (trace->show_tstamp) 1866 printed = trace__fprintf_tstamp(trace, tstamp, fp); 1867 if (trace->show_duration) 1868 printed += fprintf_duration(duration, duration_calculated, fp); 1869 return printed + trace__fprintf_comm_tid(trace, thread, fp); 1870 } 1871 1872 static int trace__process_event(struct trace *trace, struct machine *machine, 1873 union perf_event *event, struct perf_sample *sample) 1874 { 1875 int ret = 0; 1876 1877 switch (event->header.type) { 1878 case PERF_RECORD_LOST: 1879 color_fprintf(trace->output, PERF_COLOR_RED, 1880 "LOST %" PRIu64 " events!\n", (u64)event->lost.lost); 1881 ret = machine__process_lost_event(machine, event, sample); 1882 break; 1883 default: 1884 ret = machine__process_event(machine, event, sample); 1885 break; 1886 } 1887 1888 return ret; 1889 } 1890 1891 static int trace__tool_process(const struct perf_tool *tool, 1892 union perf_event *event, 1893 struct perf_sample *sample, 1894 struct machine *machine) 1895 { 1896 struct trace *trace = container_of(tool, struct trace, tool); 1897 return trace__process_event(trace, machine, event, sample); 1898 } 1899 1900 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 1901 { 1902 struct machine *machine = vmachine; 1903 1904 if (machine->kptr_restrict_warned) 1905 return NULL; 1906 1907 if (symbol_conf.kptr_restrict) { 1908 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 1909 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n" 1910 "Kernel samples will not be resolved.\n"); 1911 machine->kptr_restrict_warned = true; 1912 return NULL; 1913 } 1914 1915 return machine__resolve_kernel_addr(vmachine, addrp, modp); 1916 } 1917 1918 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) 1919 { 1920 int err = symbol__init(NULL); 1921 1922 if (err) 1923 return err; 1924 1925 trace->host = machine__new_host(); 1926 if (trace->host == NULL) 1927 return -ENOMEM; 1928 1929 thread__set_priv_destructor(thread_trace__delete); 1930 1931 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); 1932 if (err < 0) 1933 goto out; 1934 1935 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1936 evlist->core.threads, trace__tool_process, 1937 true, false, 1); 1938 out: 1939 if (err) 1940 symbol__exit(); 1941 1942 return err; 1943 } 1944 1945 static void trace__symbols__exit(struct trace *trace) 1946 { 1947 machine__exit(trace->host); 1948 trace->host = NULL; 1949 1950 symbol__exit(); 1951 } 1952 1953 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) 1954 { 1955 int idx; 1956 1957 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) 1958 nr_args = sc->fmt->nr_args; 1959 1960 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); 1961 if (sc->arg_fmt == NULL) 1962 return -1; 1963 1964 for (idx = 0; idx < nr_args; ++idx) { 1965 if (sc->fmt) 1966 sc->arg_fmt[idx] = sc->fmt->arg[idx]; 1967 } 1968 1969 sc->nr_args = nr_args; 1970 return 0; 1971 } 1972 1973 static const struct syscall_arg_fmt syscall_arg_fmts__by_name[] = { 1974 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, }, 1975 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, }, 1976 }; 1977 1978 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp) 1979 { 1980 const struct syscall_arg_fmt *fmt = fmtp; 1981 return strcmp(name, fmt->name); 1982 } 1983 1984 static const struct syscall_arg_fmt * 1985 __syscall_arg_fmt__find_by_name(const struct syscall_arg_fmt *fmts, const int nmemb, 1986 const char *name) 1987 { 1988 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp); 1989 } 1990 1991 static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name) 1992 { 1993 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name); 1994 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name); 1995 } 1996 1997 static struct tep_format_field * 1998 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field, 1999 bool *use_btf) 2000 { 2001 struct tep_format_field *last_field = NULL; 2002 int len; 2003 2004 for (; field; field = field->next, ++arg) { 2005 last_field = field; 2006 2007 if (arg->scnprintf) 2008 continue; 2009 2010 len = strlen(field->name); 2011 2012 // As far as heuristics (or intention) goes this seems to hold true, and makes sense! 2013 if ((field->flags & TEP_FIELD_IS_POINTER) && strstarts(field->type, "const ")) 2014 arg->from_user = true; 2015 2016 if (strcmp(field->type, "const char *") == 0 && 2017 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) || 2018 strstr(field->name, "path") != NULL)) { 2019 arg->scnprintf = SCA_FILENAME; 2020 } else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr")) 2021 arg->scnprintf = SCA_PTR; 2022 else if (strcmp(field->type, "pid_t") == 0) 2023 arg->scnprintf = SCA_PID; 2024 else if (strcmp(field->type, "umode_t") == 0) 2025 arg->scnprintf = SCA_MODE_T; 2026 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) { 2027 arg->scnprintf = SCA_CHAR_ARRAY; 2028 arg->nr_entries = field->arraylen; 2029 } else if ((strcmp(field->type, "int") == 0 || 2030 strcmp(field->type, "unsigned int") == 0 || 2031 strcmp(field->type, "long") == 0) && 2032 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) { 2033 /* 2034 * /sys/kernel/tracing/events/syscalls/sys_enter* 2035 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c 2036 * 65 int 2037 * 23 unsigned int 2038 * 7 unsigned long 2039 */ 2040 arg->scnprintf = SCA_FD; 2041 } else if (strstr(field->type, "enum") && use_btf != NULL) { 2042 *use_btf = true; 2043 arg->strtoul = STUL_BTF_TYPE; 2044 } else { 2045 const struct syscall_arg_fmt *fmt = 2046 syscall_arg_fmt__find_by_name(field->name); 2047 2048 if (fmt) { 2049 arg->scnprintf = fmt->scnprintf; 2050 arg->strtoul = fmt->strtoul; 2051 } 2052 } 2053 } 2054 2055 return last_field; 2056 } 2057 2058 static int syscall__set_arg_fmts(struct syscall *sc) 2059 { 2060 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args, 2061 &sc->use_btf); 2062 2063 if (last_field) 2064 sc->args_size = last_field->offset + last_field->size; 2065 2066 return 0; 2067 } 2068 2069 static int trace__read_syscall_info(struct trace *trace, int id) 2070 { 2071 char tp_name[128]; 2072 struct syscall *sc; 2073 const char *name = syscalltbl__name(trace->sctbl, id); 2074 int err; 2075 2076 if (trace->syscalls.table == NULL) { 2077 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); 2078 if (trace->syscalls.table == NULL) 2079 return -ENOMEM; 2080 } 2081 sc = trace->syscalls.table + id; 2082 if (sc->nonexistent) 2083 return -EEXIST; 2084 2085 if (name == NULL) { 2086 sc->nonexistent = true; 2087 return -EEXIST; 2088 } 2089 2090 sc->name = name; 2091 sc->fmt = syscall_fmt__find(sc->name); 2092 2093 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 2094 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 2095 2096 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) { 2097 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); 2098 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 2099 } 2100 2101 /* 2102 * Fails to read trace point format via sysfs node, so the trace point 2103 * doesn't exist. Set the 'nonexistent' flag as true. 2104 */ 2105 if (IS_ERR(sc->tp_format)) { 2106 sc->nonexistent = true; 2107 return PTR_ERR(sc->tp_format); 2108 } 2109 2110 /* 2111 * The tracepoint format contains __syscall_nr field, so it's one more 2112 * than the actual number of syscall arguments. 2113 */ 2114 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 2115 RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields - 1)) 2116 return -ENOMEM; 2117 2118 sc->args = sc->tp_format->format.fields; 2119 /* 2120 * We need to check and discard the first variable '__syscall_nr' 2121 * or 'nr' that mean the syscall number. It is needless here. 2122 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels. 2123 */ 2124 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) { 2125 sc->args = sc->args->next; 2126 --sc->nr_args; 2127 } 2128 2129 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit"); 2130 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat"); 2131 2132 err = syscall__set_arg_fmts(sc); 2133 2134 /* after calling syscall__set_arg_fmts() we'll know whether use_btf is true */ 2135 if (sc->use_btf) 2136 trace__load_vmlinux_btf(trace); 2137 2138 return err; 2139 } 2140 2141 static int evsel__init_tp_arg_scnprintf(struct evsel *evsel, bool *use_btf) 2142 { 2143 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 2144 2145 if (fmt != NULL) { 2146 const struct tep_event *tp_format = evsel__tp_format(evsel); 2147 2148 if (tp_format) { 2149 syscall_arg_fmt__init_array(fmt, tp_format->format.fields, use_btf); 2150 return 0; 2151 } 2152 } 2153 2154 return -ENOMEM; 2155 } 2156 2157 static int intcmp(const void *a, const void *b) 2158 { 2159 const int *one = a, *another = b; 2160 2161 return *one - *another; 2162 } 2163 2164 static int trace__validate_ev_qualifier(struct trace *trace) 2165 { 2166 int err = 0; 2167 bool printed_invalid_prefix = false; 2168 struct str_node *pos; 2169 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); 2170 2171 trace->ev_qualifier_ids.entries = malloc(nr_allocated * 2172 sizeof(trace->ev_qualifier_ids.entries[0])); 2173 2174 if (trace->ev_qualifier_ids.entries == NULL) { 2175 fputs("Error:\tNot enough memory for allocating events qualifier ids\n", 2176 trace->output); 2177 err = -EINVAL; 2178 goto out; 2179 } 2180 2181 strlist__for_each_entry(pos, trace->ev_qualifier) { 2182 const char *sc = pos->s; 2183 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; 2184 2185 if (id < 0) { 2186 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); 2187 if (id >= 0) 2188 goto matches; 2189 2190 if (!printed_invalid_prefix) { 2191 pr_debug("Skipping unknown syscalls: "); 2192 printed_invalid_prefix = true; 2193 } else { 2194 pr_debug(", "); 2195 } 2196 2197 pr_debug("%s", sc); 2198 continue; 2199 } 2200 matches: 2201 trace->ev_qualifier_ids.entries[nr_used++] = id; 2202 if (match_next == -1) 2203 continue; 2204 2205 while (1) { 2206 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); 2207 if (id < 0) 2208 break; 2209 if (nr_allocated == nr_used) { 2210 void *entries; 2211 2212 nr_allocated += 8; 2213 entries = realloc(trace->ev_qualifier_ids.entries, 2214 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); 2215 if (entries == NULL) { 2216 err = -ENOMEM; 2217 fputs("\nError:\t Not enough memory for parsing\n", trace->output); 2218 goto out_free; 2219 } 2220 trace->ev_qualifier_ids.entries = entries; 2221 } 2222 trace->ev_qualifier_ids.entries[nr_used++] = id; 2223 } 2224 } 2225 2226 trace->ev_qualifier_ids.nr = nr_used; 2227 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); 2228 out: 2229 if (printed_invalid_prefix) 2230 pr_debug("\n"); 2231 return err; 2232 out_free: 2233 zfree(&trace->ev_qualifier_ids.entries); 2234 trace->ev_qualifier_ids.nr = 0; 2235 goto out; 2236 } 2237 2238 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) 2239 { 2240 bool in_ev_qualifier; 2241 2242 if (trace->ev_qualifier_ids.nr == 0) 2243 return true; 2244 2245 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, 2246 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; 2247 2248 if (in_ev_qualifier) 2249 return !trace->not_ev_qualifier; 2250 2251 return trace->not_ev_qualifier; 2252 } 2253 2254 /* 2255 * args is to be interpreted as a series of longs but we need to handle 2256 * 8-byte unaligned accesses. args points to raw_data within the event 2257 * and raw_data is guaranteed to be 8-byte unaligned because it is 2258 * preceded by raw_size which is a u32. So we need to copy args to a temp 2259 * variable to read it. Most notably this avoids extended load instructions 2260 * on unaligned addresses 2261 */ 2262 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx) 2263 { 2264 unsigned long val; 2265 unsigned char *p = arg->args + sizeof(unsigned long) * idx; 2266 2267 memcpy(&val, p, sizeof(val)); 2268 return val; 2269 } 2270 2271 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size, 2272 struct syscall_arg *arg) 2273 { 2274 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name) 2275 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name); 2276 2277 return scnprintf(bf, size, "arg%d: ", arg->idx); 2278 } 2279 2280 /* 2281 * Check if the value is in fact zero, i.e. mask whatever needs masking, such 2282 * as mount 'flags' argument that needs ignoring some magic flag, see comment 2283 * in tools/perf/trace/beauty/mount_flags.c 2284 */ 2285 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val) 2286 { 2287 if (fmt && fmt->mask_val) 2288 return fmt->mask_val(arg, val); 2289 2290 return val; 2291 } 2292 2293 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size, 2294 struct syscall_arg *arg, unsigned long val) 2295 { 2296 if (fmt && fmt->scnprintf) { 2297 arg->val = val; 2298 if (fmt->parm) 2299 arg->parm = fmt->parm; 2300 return fmt->scnprintf(bf, size, arg); 2301 } 2302 return scnprintf(bf, size, "%ld", val); 2303 } 2304 2305 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, 2306 unsigned char *args, void *augmented_args, int augmented_args_size, 2307 struct trace *trace, struct thread *thread) 2308 { 2309 size_t printed = 0, btf_printed; 2310 unsigned long val; 2311 u8 bit = 1; 2312 struct syscall_arg arg = { 2313 .args = args, 2314 .augmented = { 2315 .size = augmented_args_size, 2316 .args = augmented_args, 2317 }, 2318 .idx = 0, 2319 .mask = 0, 2320 .trace = trace, 2321 .thread = thread, 2322 .show_string_prefix = trace->show_string_prefix, 2323 }; 2324 struct thread_trace *ttrace = thread__priv(thread); 2325 void *default_scnprintf; 2326 2327 /* 2328 * Things like fcntl will set this in its 'cmd' formatter to pick the 2329 * right formatter for the return value (an fd? file flags?), which is 2330 * not needed for syscalls that always return a given type, say an fd. 2331 */ 2332 ttrace->ret_scnprintf = NULL; 2333 2334 if (sc->args != NULL) { 2335 struct tep_format_field *field; 2336 2337 for (field = sc->args; field; 2338 field = field->next, ++arg.idx, bit <<= 1) { 2339 if (arg.mask & bit) 2340 continue; 2341 2342 arg.fmt = &sc->arg_fmt[arg.idx]; 2343 val = syscall_arg__val(&arg, arg.idx); 2344 /* 2345 * Some syscall args need some mask, most don't and 2346 * return val untouched. 2347 */ 2348 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val); 2349 2350 /* 2351 * Suppress this argument if its value is zero and show_zero 2352 * property isn't set. 2353 * 2354 * If it has a BTF type, then override the zero suppression knob 2355 * as the common case is for zero in an enum to have an associated entry. 2356 */ 2357 if (val == 0 && !trace->show_zeros && 2358 !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero) && 2359 !(sc->arg_fmt && sc->arg_fmt[arg.idx].strtoul == STUL_BTF_TYPE)) 2360 continue; 2361 2362 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2363 2364 if (trace->show_arg_names) 2365 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2366 2367 default_scnprintf = sc->arg_fmt[arg.idx].scnprintf; 2368 2369 if (trace->force_btf || default_scnprintf == NULL || default_scnprintf == SCA_PTR) { 2370 btf_printed = trace__btf_scnprintf(trace, &arg, bf + printed, 2371 size - printed, val, field->type); 2372 if (btf_printed) { 2373 printed += btf_printed; 2374 continue; 2375 } 2376 } 2377 2378 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], 2379 bf + printed, size - printed, &arg, val); 2380 } 2381 } else if (IS_ERR(sc->tp_format)) { 2382 /* 2383 * If we managed to read the tracepoint /format file, then we 2384 * may end up not having any args, like with gettid(), so only 2385 * print the raw args when we didn't manage to read it. 2386 */ 2387 while (arg.idx < sc->nr_args) { 2388 if (arg.mask & bit) 2389 goto next_arg; 2390 val = syscall_arg__val(&arg, arg.idx); 2391 if (printed) 2392 printed += scnprintf(bf + printed, size - printed, ", "); 2393 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg); 2394 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val); 2395 next_arg: 2396 ++arg.idx; 2397 bit <<= 1; 2398 } 2399 } 2400 2401 return printed; 2402 } 2403 2404 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel, 2405 union perf_event *event, 2406 struct perf_sample *sample); 2407 2408 static struct syscall *trace__syscall_info(struct trace *trace, 2409 struct evsel *evsel, int id) 2410 { 2411 int err = 0; 2412 2413 if (id < 0) { 2414 2415 /* 2416 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried 2417 * before that, leaving at a higher verbosity level till that is 2418 * explained. Reproduced with plain ftrace with: 2419 * 2420 * echo 1 > /t/events/raw_syscalls/sys_exit/enable 2421 * grep "NR -1 " /t/trace_pipe 2422 * 2423 * After generating some load on the machine. 2424 */ 2425 if (verbose > 1) { 2426 static u64 n; 2427 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", 2428 id, evsel__name(evsel), ++n); 2429 } 2430 return NULL; 2431 } 2432 2433 err = -EINVAL; 2434 2435 if (id > trace->sctbl->syscalls.max_id) { 2436 goto out_cant_read; 2437 } 2438 2439 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) && 2440 (err = trace__read_syscall_info(trace, id)) != 0) 2441 goto out_cant_read; 2442 2443 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent) 2444 goto out_cant_read; 2445 2446 return &trace->syscalls.table[id]; 2447 2448 out_cant_read: 2449 if (verbose > 0) { 2450 char sbuf[STRERR_BUFSIZE]; 2451 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf))); 2452 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL) 2453 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); 2454 fputs(" information\n", trace->output); 2455 } 2456 return NULL; 2457 } 2458 2459 struct syscall_stats { 2460 struct stats stats; 2461 u64 nr_failures; 2462 int max_errno; 2463 u32 *errnos; 2464 }; 2465 2466 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace, 2467 int id, struct perf_sample *sample, long err, bool errno_summary) 2468 { 2469 struct int_node *inode; 2470 struct syscall_stats *stats; 2471 u64 duration = 0; 2472 2473 inode = intlist__findnew(ttrace->syscall_stats, id); 2474 if (inode == NULL) 2475 return; 2476 2477 stats = inode->priv; 2478 if (stats == NULL) { 2479 stats = zalloc(sizeof(*stats)); 2480 if (stats == NULL) 2481 return; 2482 2483 init_stats(&stats->stats); 2484 inode->priv = stats; 2485 } 2486 2487 if (ttrace->entry_time && sample->time > ttrace->entry_time) 2488 duration = sample->time - ttrace->entry_time; 2489 2490 update_stats(&stats->stats, duration); 2491 2492 if (err < 0) { 2493 ++stats->nr_failures; 2494 2495 if (!errno_summary) 2496 return; 2497 2498 err = -err; 2499 if (err > stats->max_errno) { 2500 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32)); 2501 2502 if (new_errnos) { 2503 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32)); 2504 } else { 2505 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n", 2506 thread__comm_str(thread), thread__pid(thread), 2507 thread__tid(thread)); 2508 return; 2509 } 2510 2511 stats->errnos = new_errnos; 2512 stats->max_errno = err; 2513 } 2514 2515 ++stats->errnos[err - 1]; 2516 } 2517 } 2518 2519 static int trace__printf_interrupted_entry(struct trace *trace) 2520 { 2521 struct thread_trace *ttrace; 2522 size_t printed; 2523 int len; 2524 2525 if (trace->failure_only || trace->current == NULL) 2526 return 0; 2527 2528 ttrace = thread__priv(trace->current); 2529 2530 if (!ttrace->entry_pending) 2531 return 0; 2532 2533 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); 2534 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); 2535 2536 if (len < trace->args_alignment - 4) 2537 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); 2538 2539 printed += fprintf(trace->output, " ...\n"); 2540 2541 ttrace->entry_pending = false; 2542 ++trace->nr_events_printed; 2543 2544 return printed; 2545 } 2546 2547 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, 2548 struct perf_sample *sample, struct thread *thread) 2549 { 2550 int printed = 0; 2551 2552 if (trace->print_sample) { 2553 double ts = (double)sample->time / NSEC_PER_MSEC; 2554 2555 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n", 2556 evsel__name(evsel), ts, 2557 thread__comm_str(thread), 2558 sample->pid, sample->tid, sample->cpu); 2559 } 2560 2561 return printed; 2562 } 2563 2564 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size) 2565 { 2566 /* 2567 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter 2568 * and there we get all 6 syscall args plus the tracepoint common fields 2569 * that gets calculated at the start and the syscall_nr (another long). 2570 * So we check if that is the case and if so don't look after the 2571 * sc->args_size but always after the full raw_syscalls:sys_enter payload, 2572 * which is fixed. 2573 * 2574 * We'll revisit this later to pass s->args_size to the BPF augmenter 2575 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it 2576 * copies only what we need for each syscall, like what happens when we 2577 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace 2578 * traffic to just what is needed for each syscall. 2579 */ 2580 int args_size = raw_augmented_args_size ?: sc->args_size; 2581 2582 *augmented_args_size = sample->raw_size - args_size; 2583 if (*augmented_args_size > 0) { 2584 static uintptr_t argbuf[1024]; /* assuming single-threaded */ 2585 2586 if ((size_t)(*augmented_args_size) > sizeof(argbuf)) 2587 return NULL; 2588 2589 /* 2590 * The perf ring-buffer is 8-byte aligned but sample->raw_data 2591 * is not because it's preceded by u32 size. Later, beautifier 2592 * will use the augmented args with stricter alignments like in 2593 * some struct. To make sure it's aligned, let's copy the args 2594 * into a static buffer as it's single-threaded for now. 2595 */ 2596 memcpy(argbuf, sample->raw_data + args_size, *augmented_args_size); 2597 2598 return argbuf; 2599 } 2600 return NULL; 2601 } 2602 2603 static void syscall__exit(struct syscall *sc) 2604 { 2605 if (!sc) 2606 return; 2607 2608 zfree(&sc->arg_fmt); 2609 } 2610 2611 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, 2612 union perf_event *event __maybe_unused, 2613 struct perf_sample *sample) 2614 { 2615 char *msg; 2616 void *args; 2617 int printed = 0; 2618 struct thread *thread; 2619 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2620 int augmented_args_size = 0; 2621 void *augmented_args = NULL; 2622 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2623 struct thread_trace *ttrace; 2624 2625 if (sc == NULL) 2626 return -1; 2627 2628 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2629 ttrace = thread__trace(thread, trace->output); 2630 if (ttrace == NULL) 2631 goto out_put; 2632 2633 trace__fprintf_sample(trace, evsel, sample, thread); 2634 2635 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2636 2637 if (ttrace->entry_str == NULL) { 2638 ttrace->entry_str = malloc(trace__entry_str_size); 2639 if (!ttrace->entry_str) 2640 goto out_put; 2641 } 2642 2643 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) 2644 trace__printf_interrupted_entry(trace); 2645 /* 2646 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible 2647 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments 2648 * this breaks syscall__augmented_args() check for augmented args, as we calculate 2649 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file, 2650 * so when handling, say the openat syscall, we end up getting 6 args for the 2651 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly 2652 * thinking that the extra 2 u64 args are the augmented filename, so just check 2653 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one. 2654 */ 2655 if (evsel != trace->syscalls.events.sys_enter) 2656 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2657 ttrace->entry_time = sample->time; 2658 msg = ttrace->entry_str; 2659 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); 2660 2661 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed, 2662 args, augmented_args, augmented_args_size, trace, thread); 2663 2664 if (sc->is_exit) { 2665 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { 2666 int alignment = 0; 2667 2668 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); 2669 printed = fprintf(trace->output, "%s)", ttrace->entry_str); 2670 if (trace->args_alignment > printed) 2671 alignment = trace->args_alignment - printed; 2672 fprintf(trace->output, "%*s= ?\n", alignment, " "); 2673 } 2674 } else { 2675 ttrace->entry_pending = true; 2676 /* See trace__vfs_getname & trace__sys_exit */ 2677 ttrace->filename.pending_open = false; 2678 } 2679 2680 if (trace->current != thread) { 2681 thread__put(trace->current); 2682 trace->current = thread__get(thread); 2683 } 2684 err = 0; 2685 out_put: 2686 thread__put(thread); 2687 return err; 2688 } 2689 2690 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, 2691 struct perf_sample *sample) 2692 { 2693 struct thread_trace *ttrace; 2694 struct thread *thread; 2695 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2696 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2697 char msg[1024]; 2698 void *args, *augmented_args = NULL; 2699 int augmented_args_size; 2700 size_t printed = 0; 2701 2702 if (sc == NULL) 2703 return -1; 2704 2705 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2706 ttrace = thread__trace(thread, trace->output); 2707 /* 2708 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args() 2709 * and the rest of the beautifiers accessing it via struct syscall_arg touches it. 2710 */ 2711 if (ttrace == NULL) 2712 goto out_put; 2713 2714 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2715 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2716 printed += syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); 2717 fprintf(trace->output, "%.*s", (int)printed, msg); 2718 err = 0; 2719 out_put: 2720 thread__put(thread); 2721 return err; 2722 } 2723 2724 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, 2725 struct perf_sample *sample, 2726 struct callchain_cursor *cursor) 2727 { 2728 struct addr_location al; 2729 int max_stack = evsel->core.attr.sample_max_stack ? 2730 evsel->core.attr.sample_max_stack : 2731 trace->max_stack; 2732 int err = -1; 2733 2734 addr_location__init(&al); 2735 if (machine__resolve(trace->host, &al, sample) < 0) 2736 goto out; 2737 2738 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack); 2739 out: 2740 addr_location__exit(&al); 2741 return err; 2742 } 2743 2744 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) 2745 { 2746 /* TODO: user-configurable print_opts */ 2747 const unsigned int print_opts = EVSEL__PRINT_SYM | 2748 EVSEL__PRINT_DSO | 2749 EVSEL__PRINT_UNKNOWN_AS_ADDR; 2750 2751 return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output); 2752 } 2753 2754 static const char *errno_to_name(struct evsel *evsel, int err) 2755 { 2756 struct perf_env *env = evsel__env(evsel); 2757 2758 return perf_env__arch_strerrno(env, err); 2759 } 2760 2761 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, 2762 union perf_event *event __maybe_unused, 2763 struct perf_sample *sample) 2764 { 2765 long ret; 2766 u64 duration = 0; 2767 bool duration_calculated = false; 2768 struct thread *thread; 2769 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0; 2770 int alignment = trace->args_alignment; 2771 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2772 struct thread_trace *ttrace; 2773 2774 if (sc == NULL) 2775 return -1; 2776 2777 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2778 ttrace = thread__trace(thread, trace->output); 2779 if (ttrace == NULL) 2780 goto out_put; 2781 2782 trace__fprintf_sample(trace, evsel, sample, thread); 2783 2784 ret = perf_evsel__sc_tp_uint(evsel, ret, sample); 2785 2786 if (trace->summary) 2787 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary); 2788 2789 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) { 2790 trace__set_fd_pathname(thread, ret, ttrace->filename.name); 2791 ttrace->filename.pending_open = false; 2792 ++trace->stats.vfs_getname; 2793 } 2794 2795 if (ttrace->entry_time) { 2796 duration = sample->time - ttrace->entry_time; 2797 if (trace__filter_duration(trace, duration)) 2798 goto out; 2799 duration_calculated = true; 2800 } else if (trace->duration_filter) 2801 goto out; 2802 2803 if (sample->callchain) { 2804 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 2805 2806 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 2807 if (callchain_ret == 0) { 2808 if (cursor->nr < trace->min_stack) 2809 goto out; 2810 callchain_ret = 1; 2811 } 2812 } 2813 2814 if (trace->summary_only || (ret >= 0 && trace->failure_only)) 2815 goto out; 2816 2817 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output); 2818 2819 if (ttrace->entry_pending) { 2820 printed = fprintf(trace->output, "%s", ttrace->entry_str); 2821 } else { 2822 printed += fprintf(trace->output, " ... ["); 2823 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 2824 printed += 9; 2825 printed += fprintf(trace->output, "]: %s()", sc->name); 2826 } 2827 2828 printed++; /* the closing ')' */ 2829 2830 if (alignment > printed) 2831 alignment -= printed; 2832 else 2833 alignment = 0; 2834 2835 fprintf(trace->output, ")%*s= ", alignment, " "); 2836 2837 if (sc->fmt == NULL) { 2838 if (ret < 0) 2839 goto errno_print; 2840 signed_print: 2841 fprintf(trace->output, "%ld", ret); 2842 } else if (ret < 0) { 2843 errno_print: { 2844 char bf[STRERR_BUFSIZE]; 2845 const char *emsg = str_error_r(-ret, bf, sizeof(bf)), 2846 *e = errno_to_name(evsel, -ret); 2847 2848 fprintf(trace->output, "-1 %s (%s)", e, emsg); 2849 } 2850 } else if (ret == 0 && sc->fmt->timeout) 2851 fprintf(trace->output, "0 (Timeout)"); 2852 else if (ttrace->ret_scnprintf) { 2853 char bf[1024]; 2854 struct syscall_arg arg = { 2855 .val = ret, 2856 .thread = thread, 2857 .trace = trace, 2858 }; 2859 ttrace->ret_scnprintf(bf, sizeof(bf), &arg); 2860 ttrace->ret_scnprintf = NULL; 2861 fprintf(trace->output, "%s", bf); 2862 } else if (sc->fmt->hexret) 2863 fprintf(trace->output, "%#lx", ret); 2864 else if (sc->fmt->errpid) { 2865 struct thread *child = machine__find_thread(trace->host, ret, ret); 2866 2867 if (child != NULL) { 2868 fprintf(trace->output, "%ld", ret); 2869 if (thread__comm_set(child)) 2870 fprintf(trace->output, " (%s)", thread__comm_str(child)); 2871 thread__put(child); 2872 } 2873 } else 2874 goto signed_print; 2875 2876 fputc('\n', trace->output); 2877 2878 /* 2879 * We only consider an 'event' for the sake of --max-events a non-filtered 2880 * sys_enter + sys_exit and other tracepoint events. 2881 */ 2882 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) 2883 interrupted = true; 2884 2885 if (callchain_ret > 0) 2886 trace__fprintf_callchain(trace, sample); 2887 else if (callchain_ret < 0) 2888 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 2889 out: 2890 ttrace->entry_pending = false; 2891 err = 0; 2892 out_put: 2893 thread__put(thread); 2894 return err; 2895 } 2896 2897 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, 2898 union perf_event *event __maybe_unused, 2899 struct perf_sample *sample) 2900 { 2901 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2902 struct thread_trace *ttrace; 2903 size_t filename_len, entry_str_len, to_move; 2904 ssize_t remaining_space; 2905 char *pos; 2906 const char *filename = evsel__rawptr(evsel, sample, "pathname"); 2907 2908 if (!thread) 2909 goto out; 2910 2911 ttrace = thread__priv(thread); 2912 if (!ttrace) 2913 goto out_put; 2914 2915 filename_len = strlen(filename); 2916 if (filename_len == 0) 2917 goto out_put; 2918 2919 if (ttrace->filename.namelen < filename_len) { 2920 char *f = realloc(ttrace->filename.name, filename_len + 1); 2921 2922 if (f == NULL) 2923 goto out_put; 2924 2925 ttrace->filename.namelen = filename_len; 2926 ttrace->filename.name = f; 2927 } 2928 2929 strcpy(ttrace->filename.name, filename); 2930 ttrace->filename.pending_open = true; 2931 2932 if (!ttrace->filename.ptr) 2933 goto out_put; 2934 2935 entry_str_len = strlen(ttrace->entry_str); 2936 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */ 2937 if (remaining_space <= 0) 2938 goto out_put; 2939 2940 if (filename_len > (size_t)remaining_space) { 2941 filename += filename_len - remaining_space; 2942 filename_len = remaining_space; 2943 } 2944 2945 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */ 2946 pos = ttrace->entry_str + ttrace->filename.entry_str_pos; 2947 memmove(pos + filename_len, pos, to_move); 2948 memcpy(pos, filename, filename_len); 2949 2950 ttrace->filename.ptr = 0; 2951 ttrace->filename.entry_str_pos = 0; 2952 out_put: 2953 thread__put(thread); 2954 out: 2955 return 0; 2956 } 2957 2958 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, 2959 union perf_event *event __maybe_unused, 2960 struct perf_sample *sample) 2961 { 2962 u64 runtime = evsel__intval(evsel, sample, "runtime"); 2963 double runtime_ms = (double)runtime / NSEC_PER_MSEC; 2964 struct thread *thread = machine__findnew_thread(trace->host, 2965 sample->pid, 2966 sample->tid); 2967 struct thread_trace *ttrace = thread__trace(thread, trace->output); 2968 2969 if (ttrace == NULL) 2970 goto out_dump; 2971 2972 ttrace->runtime_ms += runtime_ms; 2973 trace->runtime_ms += runtime_ms; 2974 out_put: 2975 thread__put(thread); 2976 return 0; 2977 2978 out_dump: 2979 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", 2980 evsel->name, 2981 evsel__strval(evsel, sample, "comm"), 2982 (pid_t)evsel__intval(evsel, sample, "pid"), 2983 runtime, 2984 evsel__intval(evsel, sample, "vruntime")); 2985 goto out_put; 2986 } 2987 2988 static int bpf_output__printer(enum binary_printer_ops op, 2989 unsigned int val, void *extra __maybe_unused, FILE *fp) 2990 { 2991 unsigned char ch = (unsigned char)val; 2992 2993 switch (op) { 2994 case BINARY_PRINT_CHAR_DATA: 2995 return fprintf(fp, "%c", isprint(ch) ? ch : '.'); 2996 case BINARY_PRINT_DATA_BEGIN: 2997 case BINARY_PRINT_LINE_BEGIN: 2998 case BINARY_PRINT_ADDR: 2999 case BINARY_PRINT_NUM_DATA: 3000 case BINARY_PRINT_NUM_PAD: 3001 case BINARY_PRINT_SEP: 3002 case BINARY_PRINT_CHAR_PAD: 3003 case BINARY_PRINT_LINE_END: 3004 case BINARY_PRINT_DATA_END: 3005 default: 3006 break; 3007 } 3008 3009 return 0; 3010 } 3011 3012 static void bpf_output__fprintf(struct trace *trace, 3013 struct perf_sample *sample) 3014 { 3015 binary__fprintf(sample->raw_data, sample->raw_size, 8, 3016 bpf_output__printer, NULL, trace->output); 3017 ++trace->nr_events_printed; 3018 } 3019 3020 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample, 3021 struct thread *thread, void *augmented_args, int augmented_args_size) 3022 { 3023 char bf[2048]; 3024 size_t size = sizeof(bf); 3025 const struct tep_event *tp_format = evsel__tp_format(evsel); 3026 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL; 3027 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel); 3028 size_t printed = 0, btf_printed; 3029 unsigned long val; 3030 u8 bit = 1; 3031 struct syscall_arg syscall_arg = { 3032 .augmented = { 3033 .size = augmented_args_size, 3034 .args = augmented_args, 3035 }, 3036 .idx = 0, 3037 .mask = 0, 3038 .trace = trace, 3039 .thread = thread, 3040 .show_string_prefix = trace->show_string_prefix, 3041 }; 3042 3043 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) { 3044 if (syscall_arg.mask & bit) 3045 continue; 3046 3047 syscall_arg.len = 0; 3048 syscall_arg.fmt = arg; 3049 if (field->flags & TEP_FIELD_IS_ARRAY) { 3050 int offset = field->offset; 3051 3052 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 3053 offset = format_field__intval(field, sample, evsel->needs_swap); 3054 syscall_arg.len = offset >> 16; 3055 offset &= 0xffff; 3056 if (tep_field_is_relative(field->flags)) 3057 offset += field->offset + field->size; 3058 } 3059 3060 val = (uintptr_t)(sample->raw_data + offset); 3061 } else 3062 val = format_field__intval(field, sample, evsel->needs_swap); 3063 /* 3064 * Some syscall args need some mask, most don't and 3065 * return val untouched. 3066 */ 3067 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val); 3068 3069 /* Suppress this argument if its value is zero and show_zero property isn't set. */ 3070 if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE) 3071 continue; 3072 3073 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 3074 3075 if (trace->show_arg_names) 3076 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 3077 3078 btf_printed = trace__btf_scnprintf(trace, &syscall_arg, bf + printed, size - printed, val, field->type); 3079 if (btf_printed) { 3080 printed += btf_printed; 3081 continue; 3082 } 3083 3084 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val); 3085 } 3086 3087 return printed + fprintf(trace->output, "%.*s", (int)printed, bf); 3088 } 3089 3090 static int trace__event_handler(struct trace *trace, struct evsel *evsel, 3091 union perf_event *event __maybe_unused, 3092 struct perf_sample *sample) 3093 { 3094 struct thread *thread; 3095 int callchain_ret = 0; 3096 3097 if (evsel->nr_events_printed >= evsel->max_events) 3098 return 0; 3099 3100 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3101 3102 if (sample->callchain) { 3103 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 3104 3105 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 3106 if (callchain_ret == 0) { 3107 if (cursor->nr < trace->min_stack) 3108 goto out; 3109 callchain_ret = 1; 3110 } 3111 } 3112 3113 trace__printf_interrupted_entry(trace); 3114 trace__fprintf_tstamp(trace, sample->time, trace->output); 3115 3116 if (trace->trace_syscalls && trace->show_duration) 3117 fprintf(trace->output, "( ): "); 3118 3119 if (thread) 3120 trace__fprintf_comm_tid(trace, thread, trace->output); 3121 3122 if (evsel == trace->syscalls.events.bpf_output) { 3123 int id = perf_evsel__sc_tp_uint(evsel, id, sample); 3124 struct syscall *sc = trace__syscall_info(trace, evsel, id); 3125 3126 if (sc) { 3127 fprintf(trace->output, "%s(", sc->name); 3128 trace__fprintf_sys_enter(trace, evsel, sample); 3129 fputc(')', trace->output); 3130 goto newline; 3131 } 3132 3133 /* 3134 * XXX: Not having the associated syscall info or not finding/adding 3135 * the thread should never happen, but if it does... 3136 * fall thru and print it as a bpf_output event. 3137 */ 3138 } 3139 3140 fprintf(trace->output, "%s(", evsel->name); 3141 3142 if (evsel__is_bpf_output(evsel)) { 3143 bpf_output__fprintf(trace, sample); 3144 } else { 3145 const struct tep_event *tp_format = evsel__tp_format(evsel); 3146 3147 if (tp_format && (strncmp(tp_format->name, "sys_enter_", 10) || 3148 trace__fprintf_sys_enter(trace, evsel, sample))) { 3149 if (trace->libtraceevent_print) { 3150 event_format__fprintf(tp_format, sample->cpu, 3151 sample->raw_data, sample->raw_size, 3152 trace->output); 3153 } else { 3154 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0); 3155 } 3156 } 3157 } 3158 3159 newline: 3160 fprintf(trace->output, ")\n"); 3161 3162 if (callchain_ret > 0) 3163 trace__fprintf_callchain(trace, sample); 3164 else if (callchain_ret < 0) 3165 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3166 3167 ++trace->nr_events_printed; 3168 3169 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { 3170 evsel__disable(evsel); 3171 evsel__close(evsel); 3172 } 3173 out: 3174 thread__put(thread); 3175 return 0; 3176 } 3177 3178 static void print_location(FILE *f, struct perf_sample *sample, 3179 struct addr_location *al, 3180 bool print_dso, bool print_sym) 3181 { 3182 3183 if ((verbose > 0 || print_dso) && al->map) 3184 fprintf(f, "%s@", dso__long_name(map__dso(al->map))); 3185 3186 if ((verbose > 0 || print_sym) && al->sym) 3187 fprintf(f, "%s+0x%" PRIx64, al->sym->name, 3188 al->addr - al->sym->start); 3189 else if (al->map) 3190 fprintf(f, "0x%" PRIx64, al->addr); 3191 else 3192 fprintf(f, "0x%" PRIx64, sample->addr); 3193 } 3194 3195 static int trace__pgfault(struct trace *trace, 3196 struct evsel *evsel, 3197 union perf_event *event __maybe_unused, 3198 struct perf_sample *sample) 3199 { 3200 struct thread *thread; 3201 struct addr_location al; 3202 char map_type = 'd'; 3203 struct thread_trace *ttrace; 3204 int err = -1; 3205 int callchain_ret = 0; 3206 3207 addr_location__init(&al); 3208 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3209 3210 if (sample->callchain) { 3211 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 3212 3213 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 3214 if (callchain_ret == 0) { 3215 if (cursor->nr < trace->min_stack) 3216 goto out_put; 3217 callchain_ret = 1; 3218 } 3219 } 3220 3221 ttrace = thread__trace(thread, trace->output); 3222 if (ttrace == NULL) 3223 goto out_put; 3224 3225 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ) 3226 ttrace->pfmaj++; 3227 else 3228 ttrace->pfmin++; 3229 3230 if (trace->summary_only) 3231 goto out; 3232 3233 thread__find_symbol(thread, sample->cpumode, sample->ip, &al); 3234 3235 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); 3236 3237 fprintf(trace->output, "%sfault [", 3238 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ? 3239 "maj" : "min"); 3240 3241 print_location(trace->output, sample, &al, false, true); 3242 3243 fprintf(trace->output, "] => "); 3244 3245 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 3246 3247 if (!al.map) { 3248 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 3249 3250 if (al.map) 3251 map_type = 'x'; 3252 else 3253 map_type = '?'; 3254 } 3255 3256 print_location(trace->output, sample, &al, true, false); 3257 3258 fprintf(trace->output, " (%c%c)\n", map_type, al.level); 3259 3260 if (callchain_ret > 0) 3261 trace__fprintf_callchain(trace, sample); 3262 else if (callchain_ret < 0) 3263 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 3264 3265 ++trace->nr_events_printed; 3266 out: 3267 err = 0; 3268 out_put: 3269 thread__put(thread); 3270 addr_location__exit(&al); 3271 return err; 3272 } 3273 3274 static void trace__set_base_time(struct trace *trace, 3275 struct evsel *evsel, 3276 struct perf_sample *sample) 3277 { 3278 /* 3279 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust 3280 * and don't use sample->time unconditionally, we may end up having 3281 * some other event in the future without PERF_SAMPLE_TIME for good 3282 * reason, i.e. we may not be interested in its timestamps, just in 3283 * it taking place, picking some piece of information when it 3284 * appears in our event stream (vfs_getname comes to mind). 3285 */ 3286 if (trace->base_time == 0 && !trace->full_time && 3287 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 3288 trace->base_time = sample->time; 3289 } 3290 3291 static int trace__process_sample(const struct perf_tool *tool, 3292 union perf_event *event, 3293 struct perf_sample *sample, 3294 struct evsel *evsel, 3295 struct machine *machine __maybe_unused) 3296 { 3297 struct trace *trace = container_of(tool, struct trace, tool); 3298 struct thread *thread; 3299 int err = 0; 3300 3301 tracepoint_handler handler = evsel->handler; 3302 3303 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3304 if (thread && thread__is_filtered(thread)) 3305 goto out; 3306 3307 trace__set_base_time(trace, evsel, sample); 3308 3309 if (handler) { 3310 ++trace->nr_events; 3311 handler(trace, evsel, event, sample); 3312 } 3313 out: 3314 thread__put(thread); 3315 return err; 3316 } 3317 3318 static int trace__record(struct trace *trace, int argc, const char **argv) 3319 { 3320 unsigned int rec_argc, i, j; 3321 const char **rec_argv; 3322 const char * const record_args[] = { 3323 "record", 3324 "-R", 3325 "-m", "1024", 3326 "-c", "1", 3327 }; 3328 pid_t pid = getpid(); 3329 char *filter = asprintf__tp_filter_pids(1, &pid); 3330 const char * const sc_args[] = { "-e", }; 3331 unsigned int sc_args_nr = ARRAY_SIZE(sc_args); 3332 const char * const majpf_args[] = { "-e", "major-faults" }; 3333 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args); 3334 const char * const minpf_args[] = { "-e", "minor-faults" }; 3335 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args); 3336 int err = -1; 3337 3338 /* +3 is for the event string below and the pid filter */ 3339 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 + 3340 majpf_args_nr + minpf_args_nr + argc; 3341 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3342 3343 if (rec_argv == NULL || filter == NULL) 3344 goto out_free; 3345 3346 j = 0; 3347 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3348 rec_argv[j++] = record_args[i]; 3349 3350 if (trace->trace_syscalls) { 3351 for (i = 0; i < sc_args_nr; i++) 3352 rec_argv[j++] = sc_args[i]; 3353 3354 /* event string may be different for older kernels - e.g., RHEL6 */ 3355 if (is_valid_tracepoint("raw_syscalls:sys_enter")) 3356 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit"; 3357 else if (is_valid_tracepoint("syscalls:sys_enter")) 3358 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit"; 3359 else { 3360 pr_err("Neither raw_syscalls nor syscalls events exist.\n"); 3361 goto out_free; 3362 } 3363 } 3364 3365 rec_argv[j++] = "--filter"; 3366 rec_argv[j++] = filter; 3367 3368 if (trace->trace_pgfaults & TRACE_PFMAJ) 3369 for (i = 0; i < majpf_args_nr; i++) 3370 rec_argv[j++] = majpf_args[i]; 3371 3372 if (trace->trace_pgfaults & TRACE_PFMIN) 3373 for (i = 0; i < minpf_args_nr; i++) 3374 rec_argv[j++] = minpf_args[i]; 3375 3376 for (i = 0; i < (unsigned int)argc; i++) 3377 rec_argv[j++] = argv[i]; 3378 3379 err = cmd_record(j, rec_argv); 3380 out_free: 3381 free(filter); 3382 free(rec_argv); 3383 return err; 3384 } 3385 3386 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); 3387 3388 static bool evlist__add_vfs_getname(struct evlist *evlist) 3389 { 3390 bool found = false; 3391 struct evsel *evsel, *tmp; 3392 struct parse_events_error err; 3393 int ret; 3394 3395 parse_events_error__init(&err); 3396 ret = parse_events(evlist, "probe:vfs_getname*", &err); 3397 parse_events_error__exit(&err); 3398 if (ret) 3399 return false; 3400 3401 evlist__for_each_entry_safe(evlist, evsel, tmp) { 3402 if (!strstarts(evsel__name(evsel), "probe:vfs_getname")) 3403 continue; 3404 3405 if (evsel__field(evsel, "pathname")) { 3406 evsel->handler = trace__vfs_getname; 3407 found = true; 3408 continue; 3409 } 3410 3411 list_del_init(&evsel->core.node); 3412 evsel->evlist = NULL; 3413 evsel__delete(evsel); 3414 } 3415 3416 return found; 3417 } 3418 3419 static struct evsel *evsel__new_pgfault(u64 config) 3420 { 3421 struct evsel *evsel; 3422 struct perf_event_attr attr = { 3423 .type = PERF_TYPE_SOFTWARE, 3424 .mmap_data = 1, 3425 }; 3426 3427 attr.config = config; 3428 attr.sample_period = 1; 3429 3430 event_attr_init(&attr); 3431 3432 evsel = evsel__new(&attr); 3433 if (evsel) 3434 evsel->handler = trace__pgfault; 3435 3436 return evsel; 3437 } 3438 3439 static void evlist__free_syscall_tp_fields(struct evlist *evlist) 3440 { 3441 struct evsel *evsel; 3442 3443 evlist__for_each_entry(evlist, evsel) { 3444 evsel_trace__delete(evsel->priv); 3445 evsel->priv = NULL; 3446 } 3447 } 3448 3449 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) 3450 { 3451 const u32 type = event->header.type; 3452 struct evsel *evsel; 3453 3454 if (type != PERF_RECORD_SAMPLE) { 3455 trace__process_event(trace, trace->host, event, sample); 3456 return; 3457 } 3458 3459 evsel = evlist__id2evsel(trace->evlist, sample->id); 3460 if (evsel == NULL) { 3461 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); 3462 return; 3463 } 3464 3465 if (evswitch__discard(&trace->evswitch, evsel)) 3466 return; 3467 3468 trace__set_base_time(trace, evsel, sample); 3469 3470 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 3471 sample->raw_data == NULL) { 3472 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", 3473 evsel__name(evsel), sample->tid, 3474 sample->cpu, sample->raw_size); 3475 } else { 3476 tracepoint_handler handler = evsel->handler; 3477 handler(trace, evsel, event, sample); 3478 } 3479 3480 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) 3481 interrupted = true; 3482 } 3483 3484 static int trace__add_syscall_newtp(struct trace *trace) 3485 { 3486 int ret = -1; 3487 struct evlist *evlist = trace->evlist; 3488 struct evsel *sys_enter, *sys_exit; 3489 3490 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter); 3491 if (sys_enter == NULL) 3492 goto out; 3493 3494 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args)) 3495 goto out_delete_sys_enter; 3496 3497 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit); 3498 if (sys_exit == NULL) 3499 goto out_delete_sys_enter; 3500 3501 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret)) 3502 goto out_delete_sys_exit; 3503 3504 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param); 3505 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param); 3506 3507 evlist__add(evlist, sys_enter); 3508 evlist__add(evlist, sys_exit); 3509 3510 if (callchain_param.enabled && !trace->kernel_syscallchains) { 3511 /* 3512 * We're interested only in the user space callchain 3513 * leading to the syscall, allow overriding that for 3514 * debugging reasons using --kernel_syscall_callchains 3515 */ 3516 sys_exit->core.attr.exclude_callchain_kernel = 1; 3517 } 3518 3519 trace->syscalls.events.sys_enter = sys_enter; 3520 trace->syscalls.events.sys_exit = sys_exit; 3521 3522 ret = 0; 3523 out: 3524 return ret; 3525 3526 out_delete_sys_exit: 3527 evsel__delete_priv(sys_exit); 3528 out_delete_sys_enter: 3529 evsel__delete_priv(sys_enter); 3530 goto out; 3531 } 3532 3533 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) 3534 { 3535 int err = -1; 3536 struct evsel *sys_exit; 3537 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier, 3538 trace->ev_qualifier_ids.nr, 3539 trace->ev_qualifier_ids.entries); 3540 3541 if (filter == NULL) 3542 goto out_enomem; 3543 3544 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) { 3545 sys_exit = trace->syscalls.events.sys_exit; 3546 err = evsel__append_tp_filter(sys_exit, filter); 3547 } 3548 3549 free(filter); 3550 out: 3551 return err; 3552 out_enomem: 3553 errno = ENOMEM; 3554 goto out; 3555 } 3556 3557 #ifdef HAVE_BPF_SKEL 3558 static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type) 3559 { 3560 int id; 3561 3562 if (arg_fmt->type != NULL) 3563 return -1; 3564 3565 id = btf__find_by_name(btf, type); 3566 if (id < 0) 3567 return -1; 3568 3569 arg_fmt->type = btf__type_by_id(btf, id); 3570 arg_fmt->type_id = id; 3571 3572 return 0; 3573 } 3574 3575 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) 3576 { 3577 struct bpf_program *pos, *prog = NULL; 3578 const char *sec_name; 3579 3580 if (trace->skel->obj == NULL) 3581 return NULL; 3582 3583 bpf_object__for_each_program(pos, trace->skel->obj) { 3584 sec_name = bpf_program__section_name(pos); 3585 if (sec_name && !strcmp(sec_name, name)) { 3586 prog = pos; 3587 break; 3588 } 3589 } 3590 3591 return prog; 3592 } 3593 3594 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, 3595 const char *prog_name, const char *type) 3596 { 3597 struct bpf_program *prog; 3598 3599 if (prog_name == NULL) { 3600 char default_prog_name[256]; 3601 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name); 3602 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3603 if (prog != NULL) 3604 goto out_found; 3605 if (sc->fmt && sc->fmt->alias) { 3606 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias); 3607 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3608 if (prog != NULL) 3609 goto out_found; 3610 } 3611 goto out_unaugmented; 3612 } 3613 3614 prog = trace__find_bpf_program_by_title(trace, prog_name); 3615 3616 if (prog != NULL) { 3617 out_found: 3618 return prog; 3619 } 3620 3621 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n", 3622 prog_name, type, sc->name); 3623 out_unaugmented: 3624 return trace->skel->progs.syscall_unaugmented; 3625 } 3626 3627 static void trace__init_syscall_bpf_progs(struct trace *trace, int id) 3628 { 3629 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3630 3631 if (sc == NULL) 3632 return; 3633 3634 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3635 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit"); 3636 } 3637 3638 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) 3639 { 3640 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3641 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3642 } 3643 3644 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) 3645 { 3646 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3647 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3648 } 3649 3650 static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int key, unsigned int *beauty_array) 3651 { 3652 struct tep_format_field *field; 3653 struct syscall *sc = trace__syscall_info(trace, NULL, key); 3654 const struct btf_type *bt; 3655 char *struct_offset, *tmp, name[32]; 3656 bool can_augment = false; 3657 int i, cnt; 3658 3659 if (sc == NULL) 3660 return -1; 3661 3662 trace__load_vmlinux_btf(trace); 3663 if (trace->btf == NULL) 3664 return -1; 3665 3666 for (i = 0, field = sc->args; field; ++i, field = field->next) { 3667 // XXX We're only collecting pointer payloads _from_ user space 3668 if (!sc->arg_fmt[i].from_user) 3669 continue; 3670 3671 struct_offset = strstr(field->type, "struct "); 3672 if (struct_offset == NULL) 3673 struct_offset = strstr(field->type, "union "); 3674 else 3675 struct_offset++; // "union" is shorter 3676 3677 if (field->flags & TEP_FIELD_IS_POINTER && struct_offset) { /* struct or union (think BPF's attr arg) */ 3678 struct_offset += 6; 3679 3680 /* for 'struct foo *', we only want 'foo' */ 3681 for (tmp = struct_offset, cnt = 0; *tmp != ' ' && *tmp != '\0'; ++tmp, ++cnt) { 3682 } 3683 3684 strncpy(name, struct_offset, cnt); 3685 name[cnt] = '\0'; 3686 3687 /* cache struct's btf_type and type_id */ 3688 if (syscall_arg_fmt__cache_btf_struct(&sc->arg_fmt[i], trace->btf, name)) 3689 continue; 3690 3691 bt = sc->arg_fmt[i].type; 3692 beauty_array[i] = bt->size; 3693 can_augment = true; 3694 } else if (field->flags & TEP_FIELD_IS_POINTER && /* string */ 3695 strcmp(field->type, "const char *") == 0 && 3696 (strstr(field->name, "name") || 3697 strstr(field->name, "path") || 3698 strstr(field->name, "file") || 3699 strstr(field->name, "root") || 3700 strstr(field->name, "key") || 3701 strstr(field->name, "special") || 3702 strstr(field->name, "type") || 3703 strstr(field->name, "description"))) { 3704 beauty_array[i] = 1; 3705 can_augment = true; 3706 } else if (field->flags & TEP_FIELD_IS_POINTER && /* buffer */ 3707 strstr(field->type, "char *") && 3708 (strstr(field->name, "buf") || 3709 strstr(field->name, "val") || 3710 strstr(field->name, "msg"))) { 3711 int j; 3712 struct tep_format_field *field_tmp; 3713 3714 /* find the size of the buffer that appears in pairs with buf */ 3715 for (j = 0, field_tmp = sc->args; field_tmp; ++j, field_tmp = field_tmp->next) { 3716 if (!(field_tmp->flags & TEP_FIELD_IS_POINTER) && /* only integers */ 3717 (strstr(field_tmp->name, "count") || 3718 strstr(field_tmp->name, "siz") || /* size, bufsiz */ 3719 (strstr(field_tmp->name, "len") && strcmp(field_tmp->name, "filename")))) { 3720 /* filename's got 'len' in it, we don't want that */ 3721 beauty_array[i] = -(j + 1); 3722 can_augment = true; 3723 break; 3724 } 3725 } 3726 } 3727 } 3728 3729 if (can_augment) 3730 return 0; 3731 3732 return -1; 3733 } 3734 3735 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc) 3736 { 3737 struct tep_format_field *field, *candidate_field; 3738 /* 3739 * We're only interested in syscalls that have a pointer: 3740 */ 3741 for (field = sc->args; field; field = field->next) { 3742 if (field->flags & TEP_FIELD_IS_POINTER) 3743 goto try_to_find_pair; 3744 } 3745 3746 return NULL; 3747 3748 try_to_find_pair: 3749 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { 3750 int id = syscalltbl__id_at_idx(trace->sctbl, i); 3751 struct syscall *pair = trace__syscall_info(trace, NULL, id); 3752 struct bpf_program *pair_prog; 3753 bool is_candidate = false; 3754 3755 if (pair == NULL || pair == sc || 3756 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented) 3757 continue; 3758 3759 for (field = sc->args, candidate_field = pair->args; 3760 field && candidate_field; field = field->next, candidate_field = candidate_field->next) { 3761 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER, 3762 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER; 3763 3764 if (is_pointer) { 3765 if (!candidate_is_pointer) { 3766 // The candidate just doesn't copies our pointer arg, might copy other pointers we want. 3767 continue; 3768 } 3769 } else { 3770 if (candidate_is_pointer) { 3771 // The candidate might copy a pointer we don't have, skip it. 3772 goto next_candidate; 3773 } 3774 continue; 3775 } 3776 3777 if (strcmp(field->type, candidate_field->type)) 3778 goto next_candidate; 3779 3780 /* 3781 * This is limited in the BPF program but sys_write 3782 * uses "const char *" for its "buf" arg so we need to 3783 * use some heuristic that is kinda future proof... 3784 */ 3785 if (strcmp(field->type, "const char *") == 0 && 3786 !(strstr(field->name, "name") || 3787 strstr(field->name, "path") || 3788 strstr(field->name, "file") || 3789 strstr(field->name, "root") || 3790 strstr(field->name, "description"))) 3791 goto next_candidate; 3792 3793 is_candidate = true; 3794 } 3795 3796 if (!is_candidate) 3797 goto next_candidate; 3798 3799 /* 3800 * Check if the tentative pair syscall augmenter has more pointers, if it has, 3801 * then it may be collecting that and we then can't use it, as it would collect 3802 * more than what is common to the two syscalls. 3803 */ 3804 if (candidate_field) { 3805 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next) 3806 if (candidate_field->flags & TEP_FIELD_IS_POINTER) 3807 goto next_candidate; 3808 } 3809 3810 pair_prog = pair->bpf_prog.sys_enter; 3811 /* 3812 * If the pair isn't enabled, then its bpf_prog.sys_enter will not 3813 * have been searched for, so search it here and if it returns the 3814 * unaugmented one, then ignore it, otherwise we'll reuse that BPF 3815 * program for a filtered syscall on a non-filtered one. 3816 * 3817 * For instance, we have "!syscalls:sys_enter_renameat" and that is 3818 * useful for "renameat2". 3819 */ 3820 if (pair_prog == NULL) { 3821 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3822 if (pair_prog == trace->skel->progs.syscall_unaugmented) 3823 goto next_candidate; 3824 } 3825 3826 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name); 3827 return pair_prog; 3828 next_candidate: 3829 continue; 3830 } 3831 3832 return NULL; 3833 } 3834 3835 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) 3836 { 3837 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter); 3838 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit); 3839 int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter); 3840 int err = 0; 3841 unsigned int beauty_array[6]; 3842 3843 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { 3844 int prog_fd, key = syscalltbl__id_at_idx(trace->sctbl, i); 3845 3846 if (!trace__syscall_enabled(trace, key)) 3847 continue; 3848 3849 trace__init_syscall_bpf_progs(trace, key); 3850 3851 // It'll get at least the "!raw_syscalls:unaugmented" 3852 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key); 3853 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3854 if (err) 3855 break; 3856 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key); 3857 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY); 3858 if (err) 3859 break; 3860 3861 /* use beauty_map to tell BPF how many bytes to collect, set beauty_map's value here */ 3862 memset(beauty_array, 0, sizeof(beauty_array)); 3863 err = trace__bpf_sys_enter_beauty_map(trace, key, (unsigned int *)beauty_array); 3864 if (err) 3865 continue; 3866 err = bpf_map_update_elem(beauty_map_fd, &key, beauty_array, BPF_ANY); 3867 if (err) 3868 break; 3869 } 3870 3871 /* 3872 * Now lets do a second pass looking for enabled syscalls without 3873 * an augmenter that have a signature that is a superset of another 3874 * syscall with an augmenter so that we can auto-reuse it. 3875 * 3876 * I.e. if we have an augmenter for the "open" syscall that has 3877 * this signature: 3878 * 3879 * int open(const char *pathname, int flags, mode_t mode); 3880 * 3881 * I.e. that will collect just the first string argument, then we 3882 * can reuse it for the 'creat' syscall, that has this signature: 3883 * 3884 * int creat(const char *pathname, mode_t mode); 3885 * 3886 * and for: 3887 * 3888 * int stat(const char *pathname, struct stat *statbuf); 3889 * int lstat(const char *pathname, struct stat *statbuf); 3890 * 3891 * Because the 'open' augmenter will collect the first arg as a string, 3892 * and leave alone all the other args, which already helps with 3893 * beautifying 'stat' and 'lstat''s pathname arg. 3894 * 3895 * Then, in time, when 'stat' gets an augmenter that collects both 3896 * first and second arg (this one on the raw_syscalls:sys_exit prog 3897 * array tail call, then that one will be used. 3898 */ 3899 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) { 3900 int key = syscalltbl__id_at_idx(trace->sctbl, i); 3901 struct syscall *sc = trace__syscall_info(trace, NULL, key); 3902 struct bpf_program *pair_prog; 3903 int prog_fd; 3904 3905 if (sc == NULL || sc->bpf_prog.sys_enter == NULL) 3906 continue; 3907 3908 /* 3909 * For now we're just reusing the sys_enter prog, and if it 3910 * already has an augmenter, we don't need to find one. 3911 */ 3912 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented) 3913 continue; 3914 3915 /* 3916 * Look at all the other syscalls for one that has a signature 3917 * that is close enough that we can share: 3918 */ 3919 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc); 3920 if (pair_prog == NULL) 3921 continue; 3922 3923 sc->bpf_prog.sys_enter = pair_prog; 3924 3925 /* 3926 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter 3927 * with the fd for the program we're reusing: 3928 */ 3929 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter); 3930 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3931 if (err) 3932 break; 3933 } 3934 3935 return err; 3936 } 3937 #endif // HAVE_BPF_SKEL 3938 3939 static int trace__set_ev_qualifier_filter(struct trace *trace) 3940 { 3941 if (trace->syscalls.events.sys_enter) 3942 return trace__set_ev_qualifier_tp_filter(trace); 3943 return 0; 3944 } 3945 3946 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, 3947 size_t npids __maybe_unused, pid_t *pids __maybe_unused) 3948 { 3949 int err = 0; 3950 #ifdef HAVE_LIBBPF_SUPPORT 3951 bool value = true; 3952 int map_fd = bpf_map__fd(map); 3953 size_t i; 3954 3955 for (i = 0; i < npids; ++i) { 3956 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY); 3957 if (err) 3958 break; 3959 } 3960 #endif 3961 return err; 3962 } 3963 3964 static int trace__set_filter_loop_pids(struct trace *trace) 3965 { 3966 unsigned int nr = 1, err; 3967 pid_t pids[32] = { 3968 getpid(), 3969 }; 3970 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]); 3971 3972 while (thread && nr < ARRAY_SIZE(pids)) { 3973 struct thread *parent = machine__find_thread(trace->host, 3974 thread__ppid(thread), 3975 thread__ppid(thread)); 3976 3977 if (parent == NULL) 3978 break; 3979 3980 if (!strcmp(thread__comm_str(parent), "sshd") || 3981 strstarts(thread__comm_str(parent), "gnome-terminal")) { 3982 pids[nr++] = thread__tid(parent); 3983 break; 3984 } 3985 thread = parent; 3986 } 3987 3988 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids); 3989 if (!err && trace->filter_pids.map) 3990 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); 3991 3992 return err; 3993 } 3994 3995 static int trace__set_filter_pids(struct trace *trace) 3996 { 3997 int err = 0; 3998 /* 3999 * Better not use !target__has_task() here because we need to cover the 4000 * case where no threads were specified in the command line, but a 4001 * workload was, and in that case we will fill in the thread_map when 4002 * we fork the workload in evlist__prepare_workload. 4003 */ 4004 if (trace->filter_pids.nr > 0) { 4005 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr, 4006 trace->filter_pids.entries); 4007 if (!err && trace->filter_pids.map) { 4008 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, 4009 trace->filter_pids.entries); 4010 } 4011 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { 4012 err = trace__set_filter_loop_pids(trace); 4013 } 4014 4015 return err; 4016 } 4017 4018 static int __trace__deliver_event(struct trace *trace, union perf_event *event) 4019 { 4020 struct evlist *evlist = trace->evlist; 4021 struct perf_sample sample; 4022 int err = evlist__parse_sample(evlist, event, &sample); 4023 4024 if (err) 4025 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); 4026 else 4027 trace__handle_event(trace, event, &sample); 4028 4029 return 0; 4030 } 4031 4032 static int __trace__flush_events(struct trace *trace) 4033 { 4034 u64 first = ordered_events__first_time(&trace->oe.data); 4035 u64 flush = trace->oe.last - NSEC_PER_SEC; 4036 4037 /* Is there some thing to flush.. */ 4038 if (first && first < flush) 4039 return ordered_events__flush_time(&trace->oe.data, flush); 4040 4041 return 0; 4042 } 4043 4044 static int trace__flush_events(struct trace *trace) 4045 { 4046 return !trace->sort_events ? 0 : __trace__flush_events(trace); 4047 } 4048 4049 static int trace__deliver_event(struct trace *trace, union perf_event *event) 4050 { 4051 int err; 4052 4053 if (!trace->sort_events) 4054 return __trace__deliver_event(trace, event); 4055 4056 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); 4057 if (err && err != -1) 4058 return err; 4059 4060 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL); 4061 if (err) 4062 return err; 4063 4064 return trace__flush_events(trace); 4065 } 4066 4067 static int ordered_events__deliver_event(struct ordered_events *oe, 4068 struct ordered_event *event) 4069 { 4070 struct trace *trace = container_of(oe, struct trace, oe.data); 4071 4072 return __trace__deliver_event(trace, event->event); 4073 } 4074 4075 static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg, 4076 char **type) 4077 { 4078 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel); 4079 const struct tep_event *tp_format; 4080 4081 if (!fmt) 4082 return NULL; 4083 4084 tp_format = evsel__tp_format(evsel); 4085 if (!tp_format) 4086 return NULL; 4087 4088 for (const struct tep_format_field *field = tp_format->format.fields; field; 4089 field = field->next, ++fmt) { 4090 if (strcmp(field->name, arg) == 0) { 4091 *type = field->type; 4092 return fmt; 4093 } 4094 } 4095 4096 return NULL; 4097 } 4098 4099 static int trace__expand_filter(struct trace *trace, struct evsel *evsel) 4100 { 4101 char *tok, *left = evsel->filter, *new_filter = evsel->filter; 4102 4103 while ((tok = strpbrk(left, "=<>!")) != NULL) { 4104 char *right = tok + 1, *right_end; 4105 4106 if (*right == '=') 4107 ++right; 4108 4109 while (isspace(*right)) 4110 ++right; 4111 4112 if (*right == '\0') 4113 break; 4114 4115 while (!isalpha(*left)) 4116 if (++left == tok) { 4117 /* 4118 * Bail out, can't find the name of the argument that is being 4119 * used in the filter, let it try to set this filter, will fail later. 4120 */ 4121 return 0; 4122 } 4123 4124 right_end = right + 1; 4125 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|') 4126 ++right_end; 4127 4128 if (isalpha(*right)) { 4129 struct syscall_arg_fmt *fmt; 4130 int left_size = tok - left, 4131 right_size = right_end - right; 4132 char arg[128], *type; 4133 4134 while (isspace(left[left_size - 1])) 4135 --left_size; 4136 4137 scnprintf(arg, sizeof(arg), "%.*s", left_size, left); 4138 4139 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg, &type); 4140 if (fmt == NULL) { 4141 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n", 4142 arg, evsel->name, evsel->filter); 4143 return -1; 4144 } 4145 4146 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ", 4147 arg, (int)(right - tok), tok, right_size, right); 4148 4149 if (fmt->strtoul) { 4150 u64 val; 4151 struct syscall_arg syscall_arg = { 4152 .trace = trace, 4153 .fmt = fmt, 4154 .type_name = type, 4155 .parm = fmt->parm, 4156 }; 4157 4158 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) { 4159 char *n, expansion[19]; 4160 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val); 4161 int expansion_offset = right - new_filter; 4162 4163 pr_debug("%s", expansion); 4164 4165 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) { 4166 pr_debug(" out of memory!\n"); 4167 free(new_filter); 4168 return -1; 4169 } 4170 if (new_filter != evsel->filter) 4171 free(new_filter); 4172 left = n + expansion_offset + expansion_lenght; 4173 new_filter = n; 4174 } else { 4175 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n", 4176 right_size, right, arg, evsel->name, evsel->filter); 4177 return -1; 4178 } 4179 } else { 4180 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n", 4181 arg, evsel->name, evsel->filter); 4182 return -1; 4183 } 4184 4185 pr_debug("\n"); 4186 } else { 4187 left = right_end; 4188 } 4189 } 4190 4191 if (new_filter != evsel->filter) { 4192 pr_debug("New filter for %s: %s\n", evsel->name, new_filter); 4193 evsel__set_filter(evsel, new_filter); 4194 free(new_filter); 4195 } 4196 4197 return 0; 4198 } 4199 4200 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) 4201 { 4202 struct evlist *evlist = trace->evlist; 4203 struct evsel *evsel; 4204 4205 evlist__for_each_entry(evlist, evsel) { 4206 if (evsel->filter == NULL) 4207 continue; 4208 4209 if (trace__expand_filter(trace, evsel)) { 4210 *err_evsel = evsel; 4211 return -1; 4212 } 4213 } 4214 4215 return 0; 4216 } 4217 4218 static int trace__run(struct trace *trace, int argc, const char **argv) 4219 { 4220 struct evlist *evlist = trace->evlist; 4221 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL; 4222 int err = -1, i; 4223 unsigned long before; 4224 const bool forks = argc > 0; 4225 bool draining = false; 4226 4227 trace->live = true; 4228 4229 if (!trace->raw_augmented_syscalls) { 4230 if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) 4231 goto out_error_raw_syscalls; 4232 4233 if (trace->trace_syscalls) 4234 trace->vfs_getname = evlist__add_vfs_getname(evlist); 4235 } 4236 4237 if ((trace->trace_pgfaults & TRACE_PFMAJ)) { 4238 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ); 4239 if (pgfault_maj == NULL) 4240 goto out_error_mem; 4241 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param); 4242 evlist__add(evlist, pgfault_maj); 4243 } 4244 4245 if ((trace->trace_pgfaults & TRACE_PFMIN)) { 4246 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN); 4247 if (pgfault_min == NULL) 4248 goto out_error_mem; 4249 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param); 4250 evlist__add(evlist, pgfault_min); 4251 } 4252 4253 /* Enable ignoring missing threads when -u/-p option is defined. */ 4254 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid; 4255 4256 if (trace->sched && 4257 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime)) 4258 goto out_error_sched_stat_runtime; 4259 /* 4260 * If a global cgroup was set, apply it to all the events without an 4261 * explicit cgroup. I.e.: 4262 * 4263 * trace -G A -e sched:*switch 4264 * 4265 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc 4266 * _and_ sched:sched_switch to the 'A' cgroup, while: 4267 * 4268 * trace -e sched:*switch -G A 4269 * 4270 * will only set the sched:sched_switch event to the 'A' cgroup, all the 4271 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without" 4272 * a cgroup (on the root cgroup, sys wide, etc). 4273 * 4274 * Multiple cgroups: 4275 * 4276 * trace -G A -e sched:*switch -G B 4277 * 4278 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes 4279 * to the 'B' cgroup. 4280 * 4281 * evlist__set_default_cgroup() grabs a reference of the passed cgroup 4282 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL. 4283 */ 4284 if (trace->cgroup) 4285 evlist__set_default_cgroup(trace->evlist, trace->cgroup); 4286 4287 err = evlist__create_maps(evlist, &trace->opts.target); 4288 if (err < 0) { 4289 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); 4290 goto out_delete_evlist; 4291 } 4292 4293 err = trace__symbols_init(trace, evlist); 4294 if (err < 0) { 4295 fprintf(trace->output, "Problems initializing symbol libraries!\n"); 4296 goto out_delete_evlist; 4297 } 4298 4299 evlist__config(evlist, &trace->opts, &callchain_param); 4300 4301 if (forks) { 4302 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL); 4303 if (err < 0) { 4304 fprintf(trace->output, "Couldn't run the workload!\n"); 4305 goto out_delete_evlist; 4306 } 4307 workload_pid = evlist->workload.pid; 4308 } 4309 4310 err = evlist__open(evlist); 4311 if (err < 0) 4312 goto out_error_open; 4313 #ifdef HAVE_BPF_SKEL 4314 if (trace->syscalls.events.bpf_output) { 4315 struct perf_cpu cpu; 4316 4317 /* 4318 * Set up the __augmented_syscalls__ BPF map to hold for each 4319 * CPU the bpf-output event's file descriptor. 4320 */ 4321 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) { 4322 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__, 4323 &cpu.cpu, sizeof(int), 4324 xyarray__entry(trace->syscalls.events.bpf_output->core.fd, 4325 cpu.cpu, 0), 4326 sizeof(__u32), BPF_ANY); 4327 } 4328 } 4329 4330 if (trace->skel) 4331 trace->filter_pids.map = trace->skel->maps.pids_filtered; 4332 #endif 4333 err = trace__set_filter_pids(trace); 4334 if (err < 0) 4335 goto out_error_mem; 4336 4337 #ifdef HAVE_BPF_SKEL 4338 if (trace->skel && trace->skel->progs.sys_enter) 4339 trace__init_syscalls_bpf_prog_array_maps(trace); 4340 #endif 4341 4342 if (trace->ev_qualifier_ids.nr > 0) { 4343 err = trace__set_ev_qualifier_filter(trace); 4344 if (err < 0) 4345 goto out_errno; 4346 4347 if (trace->syscalls.events.sys_exit) { 4348 pr_debug("event qualifier tracepoint filter: %s\n", 4349 trace->syscalls.events.sys_exit->filter); 4350 } 4351 } 4352 4353 /* 4354 * If the "close" syscall is not traced, then we will not have the 4355 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the 4356 * fd->pathname table and were ending up showing the last value set by 4357 * syscalls opening a pathname and associating it with a descriptor or 4358 * reading it from /proc/pid/fd/ in cases where that doesn't make 4359 * sense. 4360 * 4361 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is 4362 * not in use. 4363 */ 4364 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close")); 4365 4366 err = trace__expand_filters(trace, &evsel); 4367 if (err) 4368 goto out_delete_evlist; 4369 err = evlist__apply_filters(evlist, &evsel, &trace->opts.target); 4370 if (err < 0) 4371 goto out_error_apply_filters; 4372 4373 err = evlist__mmap(evlist, trace->opts.mmap_pages); 4374 if (err < 0) 4375 goto out_error_mmap; 4376 4377 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay) 4378 evlist__enable(evlist); 4379 4380 if (forks) 4381 evlist__start_workload(evlist); 4382 4383 if (trace->opts.target.initial_delay) { 4384 usleep(trace->opts.target.initial_delay * 1000); 4385 evlist__enable(evlist); 4386 } 4387 4388 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || 4389 perf_thread_map__nr(evlist->core.threads) > 1 || 4390 evlist__first(evlist)->core.attr.inherit; 4391 4392 /* 4393 * Now that we already used evsel->core.attr to ask the kernel to setup the 4394 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in 4395 * trace__resolve_callchain(), allowing per-event max-stack settings 4396 * to override an explicitly set --max-stack global setting. 4397 */ 4398 evlist__for_each_entry(evlist, evsel) { 4399 if (evsel__has_callchain(evsel) && 4400 evsel->core.attr.sample_max_stack == 0) 4401 evsel->core.attr.sample_max_stack = trace->max_stack; 4402 } 4403 again: 4404 before = trace->nr_events; 4405 4406 for (i = 0; i < evlist->core.nr_mmaps; i++) { 4407 union perf_event *event; 4408 struct mmap *md; 4409 4410 md = &evlist->mmap[i]; 4411 if (perf_mmap__read_init(&md->core) < 0) 4412 continue; 4413 4414 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 4415 ++trace->nr_events; 4416 4417 err = trace__deliver_event(trace, event); 4418 if (err) 4419 goto out_disable; 4420 4421 perf_mmap__consume(&md->core); 4422 4423 if (interrupted) 4424 goto out_disable; 4425 4426 if (done && !draining) { 4427 evlist__disable(evlist); 4428 draining = true; 4429 } 4430 } 4431 perf_mmap__read_done(&md->core); 4432 } 4433 4434 if (trace->nr_events == before) { 4435 int timeout = done ? 100 : -1; 4436 4437 if (!draining && evlist__poll(evlist, timeout) > 0) { 4438 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) 4439 draining = true; 4440 4441 goto again; 4442 } else { 4443 if (trace__flush_events(trace)) 4444 goto out_disable; 4445 } 4446 } else { 4447 goto again; 4448 } 4449 4450 out_disable: 4451 thread__zput(trace->current); 4452 4453 evlist__disable(evlist); 4454 4455 if (trace->sort_events) 4456 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL); 4457 4458 if (!err) { 4459 if (trace->summary) 4460 trace__fprintf_thread_summary(trace, trace->output); 4461 4462 if (trace->show_tool_stats) { 4463 fprintf(trace->output, "Stats:\n " 4464 " vfs_getname : %" PRIu64 "\n" 4465 " proc_getname: %" PRIu64 "\n", 4466 trace->stats.vfs_getname, 4467 trace->stats.proc_getname); 4468 } 4469 } 4470 4471 out_delete_evlist: 4472 trace__symbols__exit(trace); 4473 evlist__free_syscall_tp_fields(evlist); 4474 evlist__delete(evlist); 4475 cgroup__put(trace->cgroup); 4476 trace->evlist = NULL; 4477 trace->live = false; 4478 return err; 4479 { 4480 char errbuf[BUFSIZ]; 4481 4482 out_error_sched_stat_runtime: 4483 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime"); 4484 goto out_error; 4485 4486 out_error_raw_syscalls: 4487 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)"); 4488 goto out_error; 4489 4490 out_error_mmap: 4491 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf)); 4492 goto out_error; 4493 4494 out_error_open: 4495 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); 4496 4497 out_error: 4498 fprintf(trace->output, "%s\n", errbuf); 4499 goto out_delete_evlist; 4500 4501 out_error_apply_filters: 4502 fprintf(trace->output, 4503 "Failed to set filter \"%s\" on event %s with %d (%s)\n", 4504 evsel->filter, evsel__name(evsel), errno, 4505 str_error_r(errno, errbuf, sizeof(errbuf))); 4506 goto out_delete_evlist; 4507 } 4508 out_error_mem: 4509 fprintf(trace->output, "Not enough memory to run!\n"); 4510 goto out_delete_evlist; 4511 4512 out_errno: 4513 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno)); 4514 goto out_delete_evlist; 4515 } 4516 4517 static int trace__replay(struct trace *trace) 4518 { 4519 const struct evsel_str_handler handlers[] = { 4520 { "probe:vfs_getname", trace__vfs_getname, }, 4521 }; 4522 struct perf_data data = { 4523 .path = input_name, 4524 .mode = PERF_DATA_MODE_READ, 4525 .force = trace->force, 4526 }; 4527 struct perf_session *session; 4528 struct evsel *evsel; 4529 int err = -1; 4530 4531 trace->tool.sample = trace__process_sample; 4532 trace->tool.mmap = perf_event__process_mmap; 4533 trace->tool.mmap2 = perf_event__process_mmap2; 4534 trace->tool.comm = perf_event__process_comm; 4535 trace->tool.exit = perf_event__process_exit; 4536 trace->tool.fork = perf_event__process_fork; 4537 trace->tool.attr = perf_event__process_attr; 4538 trace->tool.tracing_data = perf_event__process_tracing_data; 4539 trace->tool.build_id = perf_event__process_build_id; 4540 trace->tool.namespaces = perf_event__process_namespaces; 4541 4542 trace->tool.ordered_events = true; 4543 trace->tool.ordering_requires_timestamps = true; 4544 4545 /* add tid to output */ 4546 trace->multiple_threads = true; 4547 4548 session = perf_session__new(&data, &trace->tool); 4549 if (IS_ERR(session)) 4550 return PTR_ERR(session); 4551 4552 if (trace->opts.target.pid) 4553 symbol_conf.pid_list_str = strdup(trace->opts.target.pid); 4554 4555 if (trace->opts.target.tid) 4556 symbol_conf.tid_list_str = strdup(trace->opts.target.tid); 4557 4558 if (symbol__init(&session->header.env) < 0) 4559 goto out; 4560 4561 trace->host = &session->machines.host; 4562 4563 err = perf_session__set_tracepoints_handlers(session, handlers); 4564 if (err) 4565 goto out; 4566 4567 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter"); 4568 trace->syscalls.events.sys_enter = evsel; 4569 /* older kernels have syscalls tp versus raw_syscalls */ 4570 if (evsel == NULL) 4571 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter"); 4572 4573 if (evsel && 4574 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 || 4575 perf_evsel__init_sc_tp_ptr_field(evsel, args))) { 4576 pr_err("Error during initialize raw_syscalls:sys_enter event\n"); 4577 goto out; 4578 } 4579 4580 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit"); 4581 trace->syscalls.events.sys_exit = evsel; 4582 if (evsel == NULL) 4583 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit"); 4584 if (evsel && 4585 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 || 4586 perf_evsel__init_sc_tp_uint_field(evsel, ret))) { 4587 pr_err("Error during initialize raw_syscalls:sys_exit event\n"); 4588 goto out; 4589 } 4590 4591 evlist__for_each_entry(session->evlist, evsel) { 4592 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && 4593 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ || 4594 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 4595 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS)) 4596 evsel->handler = trace__pgfault; 4597 } 4598 4599 setup_pager(); 4600 4601 err = perf_session__process_events(session); 4602 if (err) 4603 pr_err("Failed to process events, error %d", err); 4604 4605 else if (trace->summary) 4606 trace__fprintf_thread_summary(trace, trace->output); 4607 4608 out: 4609 perf_session__delete(session); 4610 4611 return err; 4612 } 4613 4614 static size_t trace__fprintf_threads_header(FILE *fp) 4615 { 4616 size_t printed; 4617 4618 printed = fprintf(fp, "\n Summary of events:\n\n"); 4619 4620 return printed; 4621 } 4622 4623 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs, 4624 struct syscall_stats *stats; 4625 double msecs; 4626 int syscall; 4627 ) 4628 { 4629 struct int_node *source = rb_entry(nd, struct int_node, rb_node); 4630 struct syscall_stats *stats = source->priv; 4631 4632 entry->syscall = source->i; 4633 entry->stats = stats; 4634 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0; 4635 } 4636 4637 static size_t thread__dump_stats(struct thread_trace *ttrace, 4638 struct trace *trace, FILE *fp) 4639 { 4640 size_t printed = 0; 4641 struct syscall *sc; 4642 struct rb_node *nd; 4643 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats); 4644 4645 if (syscall_stats == NULL) 4646 return 0; 4647 4648 printed += fprintf(fp, "\n"); 4649 4650 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n"); 4651 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n"); 4652 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n"); 4653 4654 resort_rb__for_each_entry(nd, syscall_stats) { 4655 struct syscall_stats *stats = syscall_stats_entry->stats; 4656 if (stats) { 4657 double min = (double)(stats->stats.min) / NSEC_PER_MSEC; 4658 double max = (double)(stats->stats.max) / NSEC_PER_MSEC; 4659 double avg = avg_stats(&stats->stats); 4660 double pct; 4661 u64 n = (u64)stats->stats.n; 4662 4663 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0; 4664 avg /= NSEC_PER_MSEC; 4665 4666 sc = &trace->syscalls.table[syscall_stats_entry->syscall]; 4667 printed += fprintf(fp, " %-15s", sc->name); 4668 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f", 4669 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg); 4670 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct); 4671 4672 if (trace->errno_summary && stats->nr_failures) { 4673 int e; 4674 4675 for (e = 0; e < stats->max_errno; ++e) { 4676 if (stats->errnos[e] != 0) 4677 fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]); 4678 } 4679 } 4680 } 4681 } 4682 4683 resort_rb__delete(syscall_stats); 4684 printed += fprintf(fp, "\n\n"); 4685 4686 return printed; 4687 } 4688 4689 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) 4690 { 4691 size_t printed = 0; 4692 struct thread_trace *ttrace = thread__priv(thread); 4693 double ratio; 4694 4695 if (ttrace == NULL) 4696 return 0; 4697 4698 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; 4699 4700 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread)); 4701 printed += fprintf(fp, "%lu events, ", ttrace->nr_events); 4702 printed += fprintf(fp, "%.1f%%", ratio); 4703 if (ttrace->pfmaj) 4704 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj); 4705 if (ttrace->pfmin) 4706 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin); 4707 if (trace->sched) 4708 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); 4709 else if (fputc('\n', fp) != EOF) 4710 ++printed; 4711 4712 printed += thread__dump_stats(ttrace, trace, fp); 4713 4714 return printed; 4715 } 4716 4717 static unsigned long thread__nr_events(struct thread_trace *ttrace) 4718 { 4719 return ttrace ? ttrace->nr_events : 0; 4720 } 4721 4722 static int trace_nr_events_cmp(void *priv __maybe_unused, 4723 const struct list_head *la, 4724 const struct list_head *lb) 4725 { 4726 struct thread_list *a = list_entry(la, struct thread_list, list); 4727 struct thread_list *b = list_entry(lb, struct thread_list, list); 4728 unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread)); 4729 unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread)); 4730 4731 if (a_nr_events != b_nr_events) 4732 return a_nr_events < b_nr_events ? -1 : 1; 4733 4734 /* Identical number of threads, place smaller tids first. */ 4735 return thread__tid(a->thread) < thread__tid(b->thread) 4736 ? -1 4737 : (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0); 4738 } 4739 4740 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) 4741 { 4742 size_t printed = trace__fprintf_threads_header(fp); 4743 LIST_HEAD(threads); 4744 4745 if (machine__thread_list(trace->host, &threads) == 0) { 4746 struct thread_list *pos; 4747 4748 list_sort(NULL, &threads, trace_nr_events_cmp); 4749 4750 list_for_each_entry(pos, &threads, list) 4751 printed += trace__fprintf_thread(fp, pos->thread, trace); 4752 } 4753 thread_list__delete(&threads); 4754 return printed; 4755 } 4756 4757 static int trace__set_duration(const struct option *opt, const char *str, 4758 int unset __maybe_unused) 4759 { 4760 struct trace *trace = opt->value; 4761 4762 trace->duration_filter = atof(str); 4763 return 0; 4764 } 4765 4766 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str, 4767 int unset __maybe_unused) 4768 { 4769 int ret = -1; 4770 size_t i; 4771 struct trace *trace = opt->value; 4772 /* 4773 * FIXME: introduce a intarray class, plain parse csv and create a 4774 * { int nr, int entries[] } struct... 4775 */ 4776 struct intlist *list = intlist__new(str); 4777 4778 if (list == NULL) 4779 return -1; 4780 4781 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1; 4782 trace->filter_pids.entries = calloc(i, sizeof(pid_t)); 4783 4784 if (trace->filter_pids.entries == NULL) 4785 goto out; 4786 4787 trace->filter_pids.entries[0] = getpid(); 4788 4789 for (i = 1; i < trace->filter_pids.nr; ++i) 4790 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; 4791 4792 intlist__delete(list); 4793 ret = 0; 4794 out: 4795 return ret; 4796 } 4797 4798 static int trace__open_output(struct trace *trace, const char *filename) 4799 { 4800 struct stat st; 4801 4802 if (!stat(filename, &st) && st.st_size) { 4803 char oldname[PATH_MAX]; 4804 4805 scnprintf(oldname, sizeof(oldname), "%s.old", filename); 4806 unlink(oldname); 4807 rename(filename, oldname); 4808 } 4809 4810 trace->output = fopen(filename, "w"); 4811 4812 return trace->output == NULL ? -errno : 0; 4813 } 4814 4815 static int parse_pagefaults(const struct option *opt, const char *str, 4816 int unset __maybe_unused) 4817 { 4818 int *trace_pgfaults = opt->value; 4819 4820 if (strcmp(str, "all") == 0) 4821 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN; 4822 else if (strcmp(str, "maj") == 0) 4823 *trace_pgfaults |= TRACE_PFMAJ; 4824 else if (strcmp(str, "min") == 0) 4825 *trace_pgfaults |= TRACE_PFMIN; 4826 else 4827 return -1; 4828 4829 return 0; 4830 } 4831 4832 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler) 4833 { 4834 struct evsel *evsel; 4835 4836 evlist__for_each_entry(evlist, evsel) { 4837 if (evsel->handler == NULL) 4838 evsel->handler = handler; 4839 } 4840 } 4841 4842 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name) 4843 { 4844 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 4845 4846 if (fmt) { 4847 const struct syscall_fmt *scfmt = syscall_fmt__find(name); 4848 4849 if (scfmt) { 4850 const struct tep_event *tp_format = evsel__tp_format(evsel); 4851 4852 if (tp_format) { 4853 int skip = 0; 4854 4855 if (strcmp(tp_format->format.fields->name, "__syscall_nr") == 0 || 4856 strcmp(tp_format->format.fields->name, "nr") == 0) 4857 ++skip; 4858 4859 memcpy(fmt + skip, scfmt->arg, 4860 (tp_format->format.nr_fields - skip) * sizeof(*fmt)); 4861 } 4862 } 4863 } 4864 } 4865 4866 static int evlist__set_syscall_tp_fields(struct evlist *evlist, bool *use_btf) 4867 { 4868 struct evsel *evsel; 4869 4870 evlist__for_each_entry(evlist, evsel) { 4871 const struct tep_event *tp_format; 4872 4873 if (evsel->priv) 4874 continue; 4875 4876 tp_format = evsel__tp_format(evsel); 4877 if (!tp_format) 4878 continue; 4879 4880 if (strcmp(tp_format->system, "syscalls")) { 4881 evsel__init_tp_arg_scnprintf(evsel, use_btf); 4882 continue; 4883 } 4884 4885 if (evsel__init_syscall_tp(evsel)) 4886 return -1; 4887 4888 if (!strncmp(tp_format->name, "sys_enter_", 10)) { 4889 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 4890 4891 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64))) 4892 return -1; 4893 4894 evsel__set_syscall_arg_fmt(evsel, 4895 tp_format->name + sizeof("sys_enter_") - 1); 4896 } else if (!strncmp(tp_format->name, "sys_exit_", 9)) { 4897 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 4898 4899 if (__tp_field__init_uint(&sc->ret, sizeof(u64), 4900 sc->id.offset + sizeof(u64), 4901 evsel->needs_swap)) 4902 return -1; 4903 4904 evsel__set_syscall_arg_fmt(evsel, 4905 tp_format->name + sizeof("sys_exit_") - 1); 4906 } 4907 } 4908 4909 return 0; 4910 } 4911 4912 /* 4913 * XXX: Hackish, just splitting the combined -e+--event (syscalls 4914 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use 4915 * existing facilities unchanged (trace->ev_qualifier + parse_options()). 4916 * 4917 * It'd be better to introduce a parse_options() variant that would return a 4918 * list with the terms it didn't match to an event... 4919 */ 4920 static int trace__parse_events_option(const struct option *opt, const char *str, 4921 int unset __maybe_unused) 4922 { 4923 struct trace *trace = (struct trace *)opt->value; 4924 const char *s = str; 4925 char *sep = NULL, *lists[2] = { NULL, NULL, }; 4926 int len = strlen(str) + 1, err = -1, list, idx; 4927 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR); 4928 char group_name[PATH_MAX]; 4929 const struct syscall_fmt *fmt; 4930 4931 if (strace_groups_dir == NULL) 4932 return -1; 4933 4934 if (*s == '!') { 4935 ++s; 4936 trace->not_ev_qualifier = true; 4937 } 4938 4939 while (1) { 4940 if ((sep = strchr(s, ',')) != NULL) 4941 *sep = '\0'; 4942 4943 list = 0; 4944 if (syscalltbl__id(trace->sctbl, s) >= 0 || 4945 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) { 4946 list = 1; 4947 goto do_concat; 4948 } 4949 4950 fmt = syscall_fmt__find_by_alias(s); 4951 if (fmt != NULL) { 4952 list = 1; 4953 s = fmt->name; 4954 } else { 4955 path__join(group_name, sizeof(group_name), strace_groups_dir, s); 4956 if (access(group_name, R_OK) == 0) 4957 list = 1; 4958 } 4959 do_concat: 4960 if (lists[list]) { 4961 sprintf(lists[list] + strlen(lists[list]), ",%s", s); 4962 } else { 4963 lists[list] = malloc(len); 4964 if (lists[list] == NULL) 4965 goto out; 4966 strcpy(lists[list], s); 4967 } 4968 4969 if (!sep) 4970 break; 4971 4972 *sep = ','; 4973 s = sep + 1; 4974 } 4975 4976 if (lists[1] != NULL) { 4977 struct strlist_config slist_config = { 4978 .dirname = strace_groups_dir, 4979 }; 4980 4981 trace->ev_qualifier = strlist__new(lists[1], &slist_config); 4982 if (trace->ev_qualifier == NULL) { 4983 fputs("Not enough memory to parse event qualifier", trace->output); 4984 goto out; 4985 } 4986 4987 if (trace__validate_ev_qualifier(trace)) 4988 goto out; 4989 trace->trace_syscalls = true; 4990 } 4991 4992 err = 0; 4993 4994 if (lists[0]) { 4995 struct parse_events_option_args parse_events_option_args = { 4996 .evlistp = &trace->evlist, 4997 }; 4998 struct option o = { 4999 .value = &parse_events_option_args, 5000 }; 5001 err = parse_events_option(&o, lists[0], 0); 5002 } 5003 out: 5004 free(strace_groups_dir); 5005 free(lists[0]); 5006 free(lists[1]); 5007 if (sep) 5008 *sep = ','; 5009 5010 return err; 5011 } 5012 5013 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset) 5014 { 5015 struct trace *trace = opt->value; 5016 5017 if (!list_empty(&trace->evlist->core.entries)) { 5018 struct option o = { 5019 .value = &trace->evlist, 5020 }; 5021 return parse_cgroups(&o, str, unset); 5022 } 5023 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); 5024 5025 return 0; 5026 } 5027 5028 static int trace__config(const char *var, const char *value, void *arg) 5029 { 5030 struct trace *trace = arg; 5031 int err = 0; 5032 5033 if (!strcmp(var, "trace.add_events")) { 5034 trace->perfconfig_events = strdup(value); 5035 if (trace->perfconfig_events == NULL) { 5036 pr_err("Not enough memory for %s\n", "trace.add_events"); 5037 return -1; 5038 } 5039 } else if (!strcmp(var, "trace.show_timestamp")) { 5040 trace->show_tstamp = perf_config_bool(var, value); 5041 } else if (!strcmp(var, "trace.show_duration")) { 5042 trace->show_duration = perf_config_bool(var, value); 5043 } else if (!strcmp(var, "trace.show_arg_names")) { 5044 trace->show_arg_names = perf_config_bool(var, value); 5045 if (!trace->show_arg_names) 5046 trace->show_zeros = true; 5047 } else if (!strcmp(var, "trace.show_zeros")) { 5048 bool new_show_zeros = perf_config_bool(var, value); 5049 if (!trace->show_arg_names && !new_show_zeros) { 5050 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n"); 5051 goto out; 5052 } 5053 trace->show_zeros = new_show_zeros; 5054 } else if (!strcmp(var, "trace.show_prefix")) { 5055 trace->show_string_prefix = perf_config_bool(var, value); 5056 } else if (!strcmp(var, "trace.no_inherit")) { 5057 trace->opts.no_inherit = perf_config_bool(var, value); 5058 } else if (!strcmp(var, "trace.args_alignment")) { 5059 int args_alignment = 0; 5060 if (perf_config_int(&args_alignment, var, value) == 0) 5061 trace->args_alignment = args_alignment; 5062 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) { 5063 if (strcasecmp(value, "libtraceevent") == 0) 5064 trace->libtraceevent_print = true; 5065 else if (strcasecmp(value, "libbeauty") == 0) 5066 trace->libtraceevent_print = false; 5067 } 5068 out: 5069 return err; 5070 } 5071 5072 static void trace__exit(struct trace *trace) 5073 { 5074 int i; 5075 5076 strlist__delete(trace->ev_qualifier); 5077 zfree(&trace->ev_qualifier_ids.entries); 5078 if (trace->syscalls.table) { 5079 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++) 5080 syscall__exit(&trace->syscalls.table[i]); 5081 zfree(&trace->syscalls.table); 5082 } 5083 syscalltbl__delete(trace->sctbl); 5084 zfree(&trace->perfconfig_events); 5085 } 5086 5087 #ifdef HAVE_BPF_SKEL 5088 static int bpf__setup_bpf_output(struct evlist *evlist) 5089 { 5090 int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/"); 5091 5092 if (err) 5093 pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n"); 5094 5095 return err; 5096 } 5097 #endif 5098 5099 int cmd_trace(int argc, const char **argv) 5100 { 5101 const char *trace_usage[] = { 5102 "perf trace [<options>] [<command>]", 5103 "perf trace [<options>] -- <command> [<options>]", 5104 "perf trace record [<options>] [<command>]", 5105 "perf trace record [<options>] -- <command> [<options>]", 5106 NULL 5107 }; 5108 struct trace trace = { 5109 .opts = { 5110 .target = { 5111 .uid = UINT_MAX, 5112 .uses_mmap = true, 5113 }, 5114 .user_freq = UINT_MAX, 5115 .user_interval = ULLONG_MAX, 5116 .no_buffering = true, 5117 .mmap_pages = UINT_MAX, 5118 }, 5119 .output = stderr, 5120 .show_comm = true, 5121 .show_tstamp = true, 5122 .show_duration = true, 5123 .show_arg_names = true, 5124 .args_alignment = 70, 5125 .trace_syscalls = false, 5126 .kernel_syscallchains = false, 5127 .max_stack = UINT_MAX, 5128 .max_events = ULONG_MAX, 5129 }; 5130 const char *output_name = NULL; 5131 const struct option trace_options[] = { 5132 OPT_CALLBACK('e', "event", &trace, "event", 5133 "event/syscall selector. use 'perf list' to list available events", 5134 trace__parse_events_option), 5135 OPT_CALLBACK(0, "filter", &trace.evlist, "filter", 5136 "event filter", parse_filter), 5137 OPT_BOOLEAN(0, "comm", &trace.show_comm, 5138 "show the thread COMM next to its id"), 5139 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"), 5140 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace", 5141 trace__parse_events_option), 5142 OPT_STRING('o', "output", &output_name, "file", "output file name"), 5143 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"), 5144 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", 5145 "trace events on existing process id"), 5146 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", 5147 "trace events on existing thread id"), 5148 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids", 5149 "pids to filter (by the kernel)", trace__set_filter_pids_from_option), 5150 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, 5151 "system-wide collection from all CPUs"), 5152 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", 5153 "list of cpus to monitor"), 5154 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, 5155 "child tasks do not inherit counters"), 5156 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", 5157 "number of mmap data pages", evlist__parse_mmap_pages), 5158 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user", 5159 "user to profile"), 5160 OPT_CALLBACK(0, "duration", &trace, "float", 5161 "show only events with duration > N.M ms", 5162 trace__set_duration), 5163 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), 5164 OPT_INCR('v', "verbose", &verbose, "be more verbose"), 5165 OPT_BOOLEAN('T', "time", &trace.full_time, 5166 "Show full timestamp, not time relative to first start"), 5167 OPT_BOOLEAN(0, "failure", &trace.failure_only, 5168 "Show only syscalls that failed"), 5169 OPT_BOOLEAN('s', "summary", &trace.summary_only, 5170 "Show only syscall summary with statistics"), 5171 OPT_BOOLEAN('S', "with-summary", &trace.summary, 5172 "Show all syscalls and summary with statistics"), 5173 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary, 5174 "Show errno stats per syscall, use with -s or -S"), 5175 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min", 5176 "Trace pagefaults", parse_pagefaults, "maj"), 5177 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"), 5178 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"), 5179 OPT_CALLBACK(0, "call-graph", &trace.opts, 5180 "record_mode[,record_size]", record_callchain_help, 5181 &record_parse_callchain_opt), 5182 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print, 5183 "Use libtraceevent to print the tracepoint arguments."), 5184 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, 5185 "Show the kernel callchains on the syscall exit path"), 5186 OPT_ULONG(0, "max-events", &trace.max_events, 5187 "Set the maximum number of events to print, exit after that is reached. "), 5188 OPT_UINTEGER(0, "min-stack", &trace.min_stack, 5189 "Set the minimum stack depth when parsing the callchain, " 5190 "anything below the specified depth will be ignored."), 5191 OPT_UINTEGER(0, "max-stack", &trace.max_stack, 5192 "Set the maximum stack depth when parsing the callchain, " 5193 "anything beyond the specified depth will be ignored. " 5194 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 5195 OPT_BOOLEAN(0, "sort-events", &trace.sort_events, 5196 "Sort batch of events before processing, use if getting out of order events"), 5197 OPT_BOOLEAN(0, "print-sample", &trace.print_sample, 5198 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"), 5199 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, 5200 "per thread proc mmap processing timeout in ms"), 5201 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", 5202 trace__parse_cgroups), 5203 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay, 5204 "ms to wait before starting measurement after program " 5205 "start"), 5206 OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer" 5207 "to customized ones"), 5208 OPTS_EVSWITCH(&trace.evswitch), 5209 OPT_END() 5210 }; 5211 bool __maybe_unused max_stack_user_set = true; 5212 bool mmap_pages_user_set = true; 5213 struct evsel *evsel; 5214 const char * const trace_subcommands[] = { "record", NULL }; 5215 int err = -1; 5216 char bf[BUFSIZ]; 5217 struct sigaction sigchld_act; 5218 5219 signal(SIGSEGV, sighandler_dump_stack); 5220 signal(SIGFPE, sighandler_dump_stack); 5221 signal(SIGINT, sighandler_interrupt); 5222 5223 memset(&sigchld_act, 0, sizeof(sigchld_act)); 5224 sigchld_act.sa_flags = SA_SIGINFO; 5225 sigchld_act.sa_sigaction = sighandler_chld; 5226 sigaction(SIGCHLD, &sigchld_act, NULL); 5227 5228 trace.evlist = evlist__new(); 5229 trace.sctbl = syscalltbl__new(); 5230 5231 if (trace.evlist == NULL || trace.sctbl == NULL) { 5232 pr_err("Not enough memory to run!\n"); 5233 err = -ENOMEM; 5234 goto out; 5235 } 5236 5237 /* 5238 * Parsing .perfconfig may entail creating a BPF event, that may need 5239 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting 5240 * is too small. This affects just this process, not touching the 5241 * global setting. If it fails we'll get something in 'perf trace -v' 5242 * to help diagnose the problem. 5243 */ 5244 rlimit__bump_memlock(); 5245 5246 err = perf_config(trace__config, &trace); 5247 if (err) 5248 goto out; 5249 5250 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands, 5251 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION); 5252 5253 /* 5254 * Here we already passed thru trace__parse_events_option() and it has 5255 * already figured out if -e syscall_name, if not but if --event 5256 * foo:bar was used, the user is interested _just_ in those, say, 5257 * tracepoint events, not in the strace-like syscall-name-based mode. 5258 * 5259 * This is important because we need to check if strace-like mode is 5260 * needed to decided if we should filter out the eBPF 5261 * __augmented_syscalls__ code, if it is in the mix, say, via 5262 * .perfconfig trace.add_events, and filter those out. 5263 */ 5264 if (!trace.trace_syscalls && !trace.trace_pgfaults && 5265 trace.evlist->core.nr_entries == 0 /* Was --events used? */) { 5266 trace.trace_syscalls = true; 5267 } 5268 /* 5269 * Now that we have --verbose figured out, lets see if we need to parse 5270 * events from .perfconfig, so that if those events fail parsing, say some 5271 * BPF program fails, then we'll be able to use --verbose to see what went 5272 * wrong in more detail. 5273 */ 5274 if (trace.perfconfig_events != NULL) { 5275 struct parse_events_error parse_err; 5276 5277 parse_events_error__init(&parse_err); 5278 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err); 5279 if (err) 5280 parse_events_error__print(&parse_err, trace.perfconfig_events); 5281 parse_events_error__exit(&parse_err); 5282 if (err) 5283 goto out; 5284 } 5285 5286 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) { 5287 usage_with_options_msg(trace_usage, trace_options, 5288 "cgroup monitoring only available in system-wide mode"); 5289 } 5290 5291 #ifdef HAVE_BPF_SKEL 5292 if (!trace.trace_syscalls) 5293 goto skip_augmentation; 5294 5295 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) { 5296 pr_debug("Syscall augmentation fails with record, disabling augmentation"); 5297 goto skip_augmentation; 5298 } 5299 5300 trace.skel = augmented_raw_syscalls_bpf__open(); 5301 if (!trace.skel) { 5302 pr_debug("Failed to open augmented syscalls BPF skeleton"); 5303 } else { 5304 /* 5305 * Disable attaching the BPF programs except for sys_enter and 5306 * sys_exit that tail call into this as necessary. 5307 */ 5308 struct bpf_program *prog; 5309 5310 bpf_object__for_each_program(prog, trace.skel->obj) { 5311 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit) 5312 bpf_program__set_autoattach(prog, /*autoattach=*/false); 5313 } 5314 5315 err = augmented_raw_syscalls_bpf__load(trace.skel); 5316 5317 if (err < 0) { 5318 libbpf_strerror(err, bf, sizeof(bf)); 5319 pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf); 5320 } else { 5321 augmented_raw_syscalls_bpf__attach(trace.skel); 5322 trace__add_syscall_newtp(&trace); 5323 } 5324 } 5325 5326 err = bpf__setup_bpf_output(trace.evlist); 5327 if (err) { 5328 libbpf_strerror(err, bf, sizeof(bf)); 5329 pr_err("ERROR: Setup BPF output event failed: %s\n", bf); 5330 goto out; 5331 } 5332 trace.syscalls.events.bpf_output = evlist__last(trace.evlist); 5333 assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__")); 5334 skip_augmentation: 5335 #endif 5336 err = -1; 5337 5338 if (trace.trace_pgfaults) { 5339 trace.opts.sample_address = true; 5340 trace.opts.sample_time = true; 5341 } 5342 5343 if (trace.opts.mmap_pages == UINT_MAX) 5344 mmap_pages_user_set = false; 5345 5346 if (trace.max_stack == UINT_MAX) { 5347 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack(); 5348 max_stack_user_set = false; 5349 } 5350 5351 #ifdef HAVE_DWARF_UNWIND_SUPPORT 5352 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) { 5353 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); 5354 } 5355 #endif 5356 5357 if (callchain_param.enabled) { 5358 if (!mmap_pages_user_set && geteuid() == 0) 5359 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4; 5360 5361 symbol_conf.use_callchain = true; 5362 } 5363 5364 if (trace.evlist->core.nr_entries > 0) { 5365 bool use_btf = false; 5366 5367 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler); 5368 if (evlist__set_syscall_tp_fields(trace.evlist, &use_btf)) { 5369 perror("failed to set syscalls:* tracepoint fields"); 5370 goto out; 5371 } 5372 5373 if (use_btf) 5374 trace__load_vmlinux_btf(&trace); 5375 } 5376 5377 if (trace.sort_events) { 5378 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace); 5379 ordered_events__set_copy_on_queue(&trace.oe.data, true); 5380 } 5381 5382 /* 5383 * If we are augmenting syscalls, then combine what we put in the 5384 * __augmented_syscalls__ BPF map with what is in the 5385 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF, 5386 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit. 5387 * 5388 * We'll switch to look at two BPF maps, one for sys_enter and the 5389 * other for sys_exit when we start augmenting the sys_exit paths with 5390 * buffers that are being copied from kernel to userspace, think 'read' 5391 * syscall. 5392 */ 5393 if (trace.syscalls.events.bpf_output) { 5394 evlist__for_each_entry(trace.evlist, evsel) { 5395 bool raw_syscalls_sys_exit = evsel__name_is(evsel, "raw_syscalls:sys_exit"); 5396 5397 if (raw_syscalls_sys_exit) { 5398 trace.raw_augmented_syscalls = true; 5399 goto init_augmented_syscall_tp; 5400 } 5401 5402 if (trace.syscalls.events.bpf_output->priv == NULL && 5403 strstr(evsel__name(evsel), "syscalls:sys_enter")) { 5404 struct evsel *augmented = trace.syscalls.events.bpf_output; 5405 if (evsel__init_augmented_syscall_tp(augmented, evsel) || 5406 evsel__init_augmented_syscall_tp_args(augmented)) 5407 goto out; 5408 /* 5409 * Augmented is __augmented_syscalls__ BPF_OUTPUT event 5410 * Above we made sure we can get from the payload the tp fields 5411 * that we get from syscalls:sys_enter tracefs format file. 5412 */ 5413 augmented->handler = trace__sys_enter; 5414 /* 5415 * Now we do the same for the *syscalls:sys_enter event so that 5416 * if we handle it directly, i.e. if the BPF prog returns 0 so 5417 * as not to filter it, then we'll handle it just like we would 5418 * for the BPF_OUTPUT one: 5419 */ 5420 if (evsel__init_augmented_syscall_tp(evsel, evsel) || 5421 evsel__init_augmented_syscall_tp_args(evsel)) 5422 goto out; 5423 evsel->handler = trace__sys_enter; 5424 } 5425 5426 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) { 5427 struct syscall_tp *sc; 5428 init_augmented_syscall_tp: 5429 if (evsel__init_augmented_syscall_tp(evsel, evsel)) 5430 goto out; 5431 sc = __evsel__syscall_tp(evsel); 5432 /* 5433 * For now with BPF raw_augmented we hook into 5434 * raw_syscalls:sys_enter and there we get all 5435 * 6 syscall args plus the tracepoint common 5436 * fields and the syscall_nr (another long). 5437 * So we check if that is the case and if so 5438 * don't look after the sc->args_size but 5439 * always after the full raw_syscalls:sys_enter 5440 * payload, which is fixed. 5441 * 5442 * We'll revisit this later to pass 5443 * s->args_size to the BPF augmenter (now 5444 * tools/perf/examples/bpf/augmented_raw_syscalls.c, 5445 * so that it copies only what we need for each 5446 * syscall, like what happens when we use 5447 * syscalls:sys_enter_NAME, so that we reduce 5448 * the kernel/userspace traffic to just what is 5449 * needed for each syscall. 5450 */ 5451 if (trace.raw_augmented_syscalls) 5452 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; 5453 evsel__init_augmented_syscall_tp_ret(evsel); 5454 evsel->handler = trace__sys_exit; 5455 } 5456 } 5457 } 5458 5459 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) 5460 return trace__record(&trace, argc-1, &argv[1]); 5461 5462 /* Using just --errno-summary will trigger --summary */ 5463 if (trace.errno_summary && !trace.summary && !trace.summary_only) 5464 trace.summary_only = true; 5465 5466 /* summary_only implies summary option, but don't overwrite summary if set */ 5467 if (trace.summary_only) 5468 trace.summary = trace.summary_only; 5469 5470 /* Keep exited threads, otherwise information might be lost for summary */ 5471 if (trace.summary) 5472 symbol_conf.keep_exited_threads = true; 5473 5474 if (output_name != NULL) { 5475 err = trace__open_output(&trace, output_name); 5476 if (err < 0) { 5477 perror("failed to create output file"); 5478 goto out; 5479 } 5480 } 5481 5482 err = evswitch__init(&trace.evswitch, trace.evlist, stderr); 5483 if (err) 5484 goto out_close; 5485 5486 err = target__validate(&trace.opts.target); 5487 if (err) { 5488 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5489 fprintf(trace.output, "%s", bf); 5490 goto out_close; 5491 } 5492 5493 err = target__parse_uid(&trace.opts.target); 5494 if (err) { 5495 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5496 fprintf(trace.output, "%s", bf); 5497 goto out_close; 5498 } 5499 5500 if (!argc && target__none(&trace.opts.target)) 5501 trace.opts.target.system_wide = true; 5502 5503 if (input_name) 5504 err = trace__replay(&trace); 5505 else 5506 err = trace__run(&trace, argc, argv); 5507 5508 out_close: 5509 if (output_name != NULL) 5510 fclose(trace.output); 5511 out: 5512 trace__exit(&trace); 5513 #ifdef HAVE_BPF_SKEL 5514 augmented_raw_syscalls_bpf__destroy(trace.skel); 5515 #endif 5516 return err; 5517 } 5518