1 #include <bpf/libbpf.h> 2 #include <internal/xyarray.h> 3 4 #include "util/debug.h" 5 #include "util/evlist.h" 6 #include "util/trace_augment.h" 7 8 #include "bpf_skel/augmented_raw_syscalls.skel.h" 9 10 static struct augmented_raw_syscalls_bpf *skel; 11 static struct evsel *bpf_output; 12 13 int augmented_syscalls__prepare(void) 14 { 15 struct bpf_program *prog; 16 char buf[128]; 17 int err; 18 19 skel = augmented_raw_syscalls_bpf__open(); 20 if (!skel) { 21 pr_debug("Failed to open augmented syscalls BPF skeleton\n"); 22 return -errno; 23 } 24 25 /* 26 * Disable attaching the BPF programs except for sys_enter and 27 * sys_exit that tail call into this as necessary. 28 */ 29 bpf_object__for_each_program(prog, skel->obj) { 30 if (prog != skel->progs.sys_enter && prog != skel->progs.sys_exit) 31 bpf_program__set_autoattach(prog, /*autoattach=*/false); 32 } 33 34 err = augmented_raw_syscalls_bpf__load(skel); 35 if (err < 0) { 36 libbpf_strerror(err, buf, sizeof(buf)); 37 pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", buf); 38 return err; 39 } 40 41 augmented_raw_syscalls_bpf__attach(skel); 42 return 0; 43 } 44 45 int augmented_syscalls__create_bpf_output(struct evlist *evlist) 46 { 47 int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/"); 48 49 if (err) { 50 pr_err("ERROR: Setup BPF output event failed: %d\n", err); 51 return err; 52 } 53 54 bpf_output = evlist__last(evlist); 55 assert(evsel__name_is(bpf_output, "__augmented_syscalls__")); 56 57 return 0; 58 } 59 60 void augmented_syscalls__setup_bpf_output(void) 61 { 62 struct perf_cpu cpu; 63 int i; 64 65 if (bpf_output == NULL) 66 return; 67 68 /* 69 * Set up the __augmented_syscalls__ BPF map to hold for each 70 * CPU the bpf-output event's file descriptor. 71 */ 72 perf_cpu_map__for_each_cpu(cpu, i, bpf_output->core.cpus) { 73 int mycpu = cpu.cpu; 74 75 bpf_map__update_elem(skel->maps.__augmented_syscalls__, 76 &mycpu, sizeof(mycpu), 77 xyarray__entry(bpf_output->core.fd, 78 mycpu, 0), 79 sizeof(__u32), BPF_ANY); 80 } 81 } 82 83 int augmented_syscalls__set_filter_pids(unsigned int nr, pid_t *pids) 84 { 85 bool value = true; 86 int err = 0; 87 88 if (skel == NULL) 89 return 0; 90 91 for (size_t i = 0; i < nr; ++i) { 92 err = bpf_map__update_elem(skel->maps.pids_filtered, &pids[i], 93 sizeof(*pids), &value, sizeof(value), 94 BPF_ANY); 95 if (err) 96 break; 97 } 98 return err; 99 } 100 101 int augmented_syscalls__get_map_fds(int *enter_fd, int *exit_fd, int *beauty_fd) 102 { 103 if (skel == NULL) 104 return -1; 105 106 *enter_fd = bpf_map__fd(skel->maps.syscalls_sys_enter); 107 *exit_fd = bpf_map__fd(skel->maps.syscalls_sys_exit); 108 *beauty_fd = bpf_map__fd(skel->maps.beauty_map_enter); 109 110 if (*enter_fd < 0 || *exit_fd < 0 || *beauty_fd < 0) { 111 pr_err("Error: failed to get syscall or beauty map fd\n"); 112 return -1; 113 } 114 115 return 0; 116 } 117 118 struct bpf_program *augmented_syscalls__unaugmented(void) 119 { 120 return skel->progs.syscall_unaugmented; 121 } 122 123 struct bpf_program *augmented_syscalls__find_by_title(const char *name) 124 { 125 struct bpf_program *pos; 126 const char *sec_name; 127 128 if (skel->obj == NULL) 129 return NULL; 130 131 bpf_object__for_each_program(pos, skel->obj) { 132 sec_name = bpf_program__section_name(pos); 133 if (sec_name && !strcmp(sec_name, name)) 134 return pos; 135 } 136 137 return NULL; 138 } 139 140 void augmented_syscalls__cleanup(void) 141 { 142 augmented_raw_syscalls_bpf__destroy(skel); 143 } 144