1 // SPDX-License-Identifier: GPL-2.0 2 #include <test_progs.h> 3 #include "stacktrace_ips.skel.h" 4 5 #ifdef __x86_64__ 6 static int check_stacktrace_ips(int fd, __u32 key, int cnt, ...) 7 { 8 __u64 ips[PERF_MAX_STACK_DEPTH]; 9 struct ksyms *ksyms = NULL; 10 int i, err = 0; 11 va_list args; 12 13 /* sorted by addr */ 14 ksyms = load_kallsyms_local(); 15 if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local")) 16 return -1; 17 18 /* unlikely, but... */ 19 if (!ASSERT_LT(cnt, PERF_MAX_STACK_DEPTH, "check_max")) 20 return -1; 21 22 err = bpf_map_lookup_elem(fd, &key, ips); 23 if (err) 24 goto out; 25 26 /* 27 * Compare all symbols provided via arguments with stacktrace ips, 28 * and their related symbol addresses.t 29 */ 30 va_start(args, cnt); 31 32 for (i = 0; i < cnt; i++) { 33 unsigned long val; 34 struct ksym *ksym; 35 36 val = va_arg(args, unsigned long); 37 ksym = ksym_search_local(ksyms, ips[i]); 38 if (!ASSERT_OK_PTR(ksym, "ksym_search_local")) 39 break; 40 ASSERT_EQ(ksym->addr, val, "stack_cmp"); 41 } 42 43 va_end(args); 44 45 out: 46 free_kallsyms_local(ksyms); 47 return err; 48 } 49 50 static void test_stacktrace_ips_kprobe_multi(bool retprobe) 51 { 52 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, 53 .retprobe = retprobe 54 ); 55 LIBBPF_OPTS(bpf_test_run_opts, topts); 56 struct stacktrace_ips *skel; 57 58 skel = stacktrace_ips__open_and_load(); 59 if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load")) 60 return; 61 62 if (!skel->kconfig->CONFIG_UNWINDER_ORC) { 63 test__skip(); 64 goto cleanup; 65 } 66 67 skel->links.kprobe_multi_test = bpf_program__attach_kprobe_multi_opts( 68 skel->progs.kprobe_multi_test, 69 "bpf_testmod_stacktrace_test", &opts); 70 if (!ASSERT_OK_PTR(skel->links.kprobe_multi_test, "bpf_program__attach_kprobe_multi_opts")) 71 goto cleanup; 72 73 trigger_module_test_read(1); 74 75 load_kallsyms(); 76 77 if (retprobe) { 78 check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4, 79 ksym_get_addr("bpf_testmod_stacktrace_test_3"), 80 ksym_get_addr("bpf_testmod_stacktrace_test_2"), 81 ksym_get_addr("bpf_testmod_stacktrace_test_1"), 82 ksym_get_addr("bpf_testmod_test_read")); 83 } else { 84 check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 5, 85 ksym_get_addr("bpf_testmod_stacktrace_test"), 86 ksym_get_addr("bpf_testmod_stacktrace_test_3"), 87 ksym_get_addr("bpf_testmod_stacktrace_test_2"), 88 ksym_get_addr("bpf_testmod_stacktrace_test_1"), 89 ksym_get_addr("bpf_testmod_test_read")); 90 } 91 92 cleanup: 93 stacktrace_ips__destroy(skel); 94 } 95 96 static void test_stacktrace_ips_raw_tp(void) 97 { 98 __u32 info_len = sizeof(struct bpf_prog_info); 99 LIBBPF_OPTS(bpf_test_run_opts, topts); 100 struct bpf_prog_info info = {}; 101 struct stacktrace_ips *skel; 102 __u64 bpf_prog_ksym = 0; 103 int err; 104 105 skel = stacktrace_ips__open_and_load(); 106 if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load")) 107 return; 108 109 if (!skel->kconfig->CONFIG_UNWINDER_ORC) { 110 test__skip(); 111 goto cleanup; 112 } 113 114 skel->links.rawtp_test = bpf_program__attach_raw_tracepoint( 115 skel->progs.rawtp_test, 116 "bpf_testmod_test_read"); 117 if (!ASSERT_OK_PTR(skel->links.rawtp_test, "bpf_program__attach_raw_tracepoint")) 118 goto cleanup; 119 120 /* get bpf program address */ 121 info.jited_ksyms = ptr_to_u64(&bpf_prog_ksym); 122 info.nr_jited_ksyms = 1; 123 err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.rawtp_test), 124 &info, &info_len); 125 if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd")) 126 goto cleanup; 127 128 trigger_module_test_read(1); 129 130 load_kallsyms(); 131 132 check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 2, 133 bpf_prog_ksym, 134 ksym_get_addr("bpf_trace_run2")); 135 136 cleanup: 137 stacktrace_ips__destroy(skel); 138 } 139 140 static void test_stacktrace_ips_kprobe(bool retprobe) 141 { 142 LIBBPF_OPTS(bpf_kprobe_opts, opts, 143 .retprobe = retprobe 144 ); 145 LIBBPF_OPTS(bpf_test_run_opts, topts); 146 struct stacktrace_ips *skel; 147 148 skel = stacktrace_ips__open_and_load(); 149 if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load")) 150 return; 151 152 if (!skel->kconfig->CONFIG_UNWINDER_ORC) { 153 test__skip(); 154 goto cleanup; 155 } 156 157 skel->links.kprobe_test = bpf_program__attach_kprobe_opts( 158 skel->progs.kprobe_test, 159 "bpf_testmod_stacktrace_test", &opts); 160 if (!ASSERT_OK_PTR(skel->links.kprobe_test, "bpf_program__attach_kprobe_opts")) 161 goto cleanup; 162 163 trigger_module_test_read(1); 164 165 load_kallsyms(); 166 167 if (retprobe) { 168 check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4, 169 ksym_get_addr("bpf_testmod_stacktrace_test_3"), 170 ksym_get_addr("bpf_testmod_stacktrace_test_2"), 171 ksym_get_addr("bpf_testmod_stacktrace_test_1"), 172 ksym_get_addr("bpf_testmod_test_read")); 173 } else { 174 check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 5, 175 ksym_get_addr("bpf_testmod_stacktrace_test"), 176 ksym_get_addr("bpf_testmod_stacktrace_test_3"), 177 ksym_get_addr("bpf_testmod_stacktrace_test_2"), 178 ksym_get_addr("bpf_testmod_stacktrace_test_1"), 179 ksym_get_addr("bpf_testmod_test_read")); 180 } 181 182 cleanup: 183 stacktrace_ips__destroy(skel); 184 } 185 186 static void test_stacktrace_ips_trampoline(bool retprobe) 187 { 188 LIBBPF_OPTS(bpf_test_run_opts, topts); 189 struct stacktrace_ips *skel; 190 191 skel = stacktrace_ips__open_and_load(); 192 if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load")) 193 return; 194 195 if (!skel->kconfig->CONFIG_UNWINDER_ORC) { 196 test__skip(); 197 goto cleanup; 198 } 199 200 if (retprobe) { 201 skel->links.fexit_test = bpf_program__attach_trace(skel->progs.fexit_test); 202 if (!ASSERT_OK_PTR(skel->links.fexit_test, "bpf_program__attach_trace")) 203 goto cleanup; 204 } else { 205 skel->links.fentry_test = bpf_program__attach_trace(skel->progs.fentry_test); 206 if (!ASSERT_OK_PTR(skel->links.fentry_test, "bpf_program__attach_trace")) 207 goto cleanup; 208 } 209 210 trigger_module_test_read(1); 211 212 load_kallsyms(); 213 214 if (retprobe) { 215 check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4, 216 ksym_get_addr("bpf_testmod_stacktrace_test_3"), 217 ksym_get_addr("bpf_testmod_stacktrace_test_2"), 218 ksym_get_addr("bpf_testmod_stacktrace_test_1"), 219 ksym_get_addr("bpf_testmod_test_read")); 220 } else { 221 check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 5, 222 ksym_get_addr("bpf_testmod_stacktrace_test"), 223 ksym_get_addr("bpf_testmod_stacktrace_test_3"), 224 ksym_get_addr("bpf_testmod_stacktrace_test_2"), 225 ksym_get_addr("bpf_testmod_stacktrace_test_1"), 226 ksym_get_addr("bpf_testmod_test_read")); 227 } 228 229 cleanup: 230 stacktrace_ips__destroy(skel); 231 } 232 233 static void __test_stacktrace_ips(void) 234 { 235 if (test__start_subtest("kprobe_multi")) 236 test_stacktrace_ips_kprobe_multi(false); 237 if (test__start_subtest("kretprobe_multi")) 238 test_stacktrace_ips_kprobe_multi(true); 239 if (test__start_subtest("raw_tp")) 240 test_stacktrace_ips_raw_tp(); 241 if (test__start_subtest("kprobe")) 242 test_stacktrace_ips_kprobe(false); 243 if (test__start_subtest("kretprobe")) 244 test_stacktrace_ips_kprobe(true); 245 if (test__start_subtest("fentry")) 246 test_stacktrace_ips_trampoline(false); 247 if (test__start_subtest("fexit")) 248 test_stacktrace_ips_trampoline(true); 249 } 250 #else 251 static void __test_stacktrace_ips(void) 252 { 253 test__skip(); 254 } 255 #endif 256 257 void test_stacktrace_ips(void) 258 { 259 __test_stacktrace_ips(); 260 } 261