1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2021 Facebook */ 3 #define _GNU_SOURCE 4 #include <pthread.h> 5 #include <sched.h> 6 #include <sys/syscall.h> 7 #include <sys/mman.h> 8 #include <unistd.h> 9 #include <test_progs.h> 10 #include <network_helpers.h> 11 #include <bpf/btf.h> 12 #include "test_bpf_cookie.skel.h" 13 #include "kprobe_multi.skel.h" 14 #include "uprobe_multi.skel.h" 15 16 /* uprobe attach point */ 17 static noinline void trigger_func(void) 18 { 19 asm volatile (""); 20 } 21 22 static void kprobe_subtest(struct test_bpf_cookie *skel) 23 { 24 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); 25 struct bpf_link *link1 = NULL, *link2 = NULL; 26 struct bpf_link *retlink1 = NULL, *retlink2 = NULL; 27 28 /* attach two kprobes */ 29 opts.bpf_cookie = 0x1; 30 opts.retprobe = false; 31 link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, 32 SYS_NANOSLEEP_KPROBE_NAME, &opts); 33 if (!ASSERT_OK_PTR(link1, "link1")) 34 goto cleanup; 35 36 opts.bpf_cookie = 0x2; 37 opts.retprobe = false; 38 link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, 39 SYS_NANOSLEEP_KPROBE_NAME, &opts); 40 if (!ASSERT_OK_PTR(link2, "link2")) 41 goto cleanup; 42 43 /* attach two kretprobes */ 44 opts.bpf_cookie = 0x10; 45 opts.retprobe = true; 46 retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, 47 SYS_NANOSLEEP_KPROBE_NAME, &opts); 48 if (!ASSERT_OK_PTR(retlink1, "retlink1")) 49 goto cleanup; 50 51 opts.bpf_cookie = 0x20; 52 opts.retprobe = true; 53 retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, 54 SYS_NANOSLEEP_KPROBE_NAME, &opts); 55 if (!ASSERT_OK_PTR(retlink2, "retlink2")) 56 goto cleanup; 57 58 /* trigger kprobe && kretprobe */ 59 usleep(1); 60 61 ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res"); 62 ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res"); 63 64 cleanup: 65 bpf_link__destroy(link1); 66 bpf_link__destroy(link2); 67 bpf_link__destroy(retlink1); 68 bpf_link__destroy(retlink2); 69 } 70 71 static void kprobe_multi_test_run(struct kprobe_multi *skel) 72 { 73 LIBBPF_OPTS(bpf_test_run_opts, topts); 74 int err, prog_fd; 75 76 prog_fd = bpf_program__fd(skel->progs.trigger); 77 err = bpf_prog_test_run_opts(prog_fd, &topts); 78 ASSERT_OK(err, "test_run"); 79 ASSERT_EQ(topts.retval, 0, "test_run"); 80 81 ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); 82 ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); 83 ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); 84 ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); 85 ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); 86 ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); 87 ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); 88 ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); 89 90 ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); 91 ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); 92 ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); 93 ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); 94 ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); 95 ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); 96 ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); 97 ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); 98 } 99 100 static void kprobe_multi_link_api_subtest(void) 101 { 102 int prog_fd, link1_fd = -1, link2_fd = -1; 103 struct kprobe_multi *skel = NULL; 104 LIBBPF_OPTS(bpf_link_create_opts, opts); 105 unsigned long long addrs[8]; 106 __u64 cookies[8]; 107 108 if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) 109 goto cleanup; 110 111 skel = kprobe_multi__open_and_load(); 112 if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) 113 goto cleanup; 114 115 skel->bss->pid = getpid(); 116 skel->bss->test_cookie = true; 117 118 #define GET_ADDR(__sym, __addr) ({ \ 119 __addr = ksym_get_addr(__sym); \ 120 if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym)) \ 121 goto cleanup; \ 122 }) 123 124 GET_ADDR("bpf_fentry_test1", addrs[0]); 125 GET_ADDR("bpf_fentry_test3", addrs[1]); 126 GET_ADDR("bpf_fentry_test4", addrs[2]); 127 GET_ADDR("bpf_fentry_test5", addrs[3]); 128 GET_ADDR("bpf_fentry_test6", addrs[4]); 129 GET_ADDR("bpf_fentry_test7", addrs[5]); 130 GET_ADDR("bpf_fentry_test2", addrs[6]); 131 GET_ADDR("bpf_fentry_test8", addrs[7]); 132 133 #undef GET_ADDR 134 135 cookies[0] = 1; /* bpf_fentry_test1 */ 136 cookies[1] = 2; /* bpf_fentry_test3 */ 137 cookies[2] = 3; /* bpf_fentry_test4 */ 138 cookies[3] = 4; /* bpf_fentry_test5 */ 139 cookies[4] = 5; /* bpf_fentry_test6 */ 140 cookies[5] = 6; /* bpf_fentry_test7 */ 141 cookies[6] = 7; /* bpf_fentry_test2 */ 142 cookies[7] = 8; /* bpf_fentry_test8 */ 143 144 opts.kprobe_multi.addrs = (const unsigned long *) &addrs; 145 opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); 146 opts.kprobe_multi.cookies = (const __u64 *) &cookies; 147 prog_fd = bpf_program__fd(skel->progs.test_kprobe); 148 149 link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); 150 if (!ASSERT_GE(link1_fd, 0, "link1_fd")) 151 goto cleanup; 152 153 cookies[0] = 8; /* bpf_fentry_test1 */ 154 cookies[1] = 7; /* bpf_fentry_test3 */ 155 cookies[2] = 6; /* bpf_fentry_test4 */ 156 cookies[3] = 5; /* bpf_fentry_test5 */ 157 cookies[4] = 4; /* bpf_fentry_test6 */ 158 cookies[5] = 3; /* bpf_fentry_test7 */ 159 cookies[6] = 2; /* bpf_fentry_test2 */ 160 cookies[7] = 1; /* bpf_fentry_test8 */ 161 162 opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; 163 prog_fd = bpf_program__fd(skel->progs.test_kretprobe); 164 165 link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); 166 if (!ASSERT_GE(link2_fd, 0, "link2_fd")) 167 goto cleanup; 168 169 kprobe_multi_test_run(skel); 170 171 cleanup: 172 close(link1_fd); 173 close(link2_fd); 174 kprobe_multi__destroy(skel); 175 } 176 177 static void kprobe_multi_attach_api_subtest(void) 178 { 179 struct bpf_link *link1 = NULL, *link2 = NULL; 180 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); 181 LIBBPF_OPTS(bpf_test_run_opts, topts); 182 struct kprobe_multi *skel = NULL; 183 const char *syms[8] = { 184 "bpf_fentry_test1", 185 "bpf_fentry_test3", 186 "bpf_fentry_test4", 187 "bpf_fentry_test5", 188 "bpf_fentry_test6", 189 "bpf_fentry_test7", 190 "bpf_fentry_test2", 191 "bpf_fentry_test8", 192 }; 193 __u64 cookies[8]; 194 195 skel = kprobe_multi__open_and_load(); 196 if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) 197 goto cleanup; 198 199 skel->bss->pid = getpid(); 200 skel->bss->test_cookie = true; 201 202 cookies[0] = 1; /* bpf_fentry_test1 */ 203 cookies[1] = 2; /* bpf_fentry_test3 */ 204 cookies[2] = 3; /* bpf_fentry_test4 */ 205 cookies[3] = 4; /* bpf_fentry_test5 */ 206 cookies[4] = 5; /* bpf_fentry_test6 */ 207 cookies[5] = 6; /* bpf_fentry_test7 */ 208 cookies[6] = 7; /* bpf_fentry_test2 */ 209 cookies[7] = 8; /* bpf_fentry_test8 */ 210 211 opts.syms = syms; 212 opts.cnt = ARRAY_SIZE(syms); 213 opts.cookies = cookies; 214 215 link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, 216 NULL, &opts); 217 if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) 218 goto cleanup; 219 220 cookies[0] = 8; /* bpf_fentry_test1 */ 221 cookies[1] = 7; /* bpf_fentry_test3 */ 222 cookies[2] = 6; /* bpf_fentry_test4 */ 223 cookies[3] = 5; /* bpf_fentry_test5 */ 224 cookies[4] = 4; /* bpf_fentry_test6 */ 225 cookies[5] = 3; /* bpf_fentry_test7 */ 226 cookies[6] = 2; /* bpf_fentry_test2 */ 227 cookies[7] = 1; /* bpf_fentry_test8 */ 228 229 opts.retprobe = true; 230 231 link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe, 232 NULL, &opts); 233 if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) 234 goto cleanup; 235 236 kprobe_multi_test_run(skel); 237 238 cleanup: 239 bpf_link__destroy(link2); 240 bpf_link__destroy(link1); 241 kprobe_multi__destroy(skel); 242 } 243 244 /* defined in prog_tests/uprobe_multi_test.c */ 245 void uprobe_multi_func_1(void); 246 void uprobe_multi_func_2(void); 247 void uprobe_multi_func_3(void); 248 249 static void uprobe_multi_test_run(struct uprobe_multi *skel) 250 { 251 skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; 252 skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; 253 skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; 254 255 skel->bss->pid = getpid(); 256 skel->bss->test_cookie = true; 257 258 uprobe_multi_func_1(); 259 uprobe_multi_func_2(); 260 uprobe_multi_func_3(); 261 262 ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 1, "uprobe_multi_func_1_result"); 263 ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 1, "uprobe_multi_func_2_result"); 264 ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 1, "uprobe_multi_func_3_result"); 265 266 ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 1, "uretprobe_multi_func_1_result"); 267 ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 1, "uretprobe_multi_func_2_result"); 268 ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 1, "uretprobe_multi_func_3_result"); 269 } 270 271 static void uprobe_multi_attach_api_subtest(void) 272 { 273 struct bpf_link *link1 = NULL, *link2 = NULL; 274 struct uprobe_multi *skel = NULL; 275 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); 276 const char *syms[3] = { 277 "uprobe_multi_func_1", 278 "uprobe_multi_func_2", 279 "uprobe_multi_func_3", 280 }; 281 __u64 cookies[3]; 282 283 cookies[0] = 3; /* uprobe_multi_func_1 */ 284 cookies[1] = 1; /* uprobe_multi_func_2 */ 285 cookies[2] = 2; /* uprobe_multi_func_3 */ 286 287 opts.syms = syms; 288 opts.cnt = ARRAY_SIZE(syms); 289 opts.cookies = &cookies[0]; 290 291 skel = uprobe_multi__open_and_load(); 292 if (!ASSERT_OK_PTR(skel, "uprobe_multi")) 293 goto cleanup; 294 295 link1 = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1, 296 "/proc/self/exe", NULL, &opts); 297 if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi")) 298 goto cleanup; 299 300 cookies[0] = 2; /* uprobe_multi_func_1 */ 301 cookies[1] = 3; /* uprobe_multi_func_2 */ 302 cookies[2] = 1; /* uprobe_multi_func_3 */ 303 304 opts.retprobe = true; 305 link2 = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, -1, 306 "/proc/self/exe", NULL, &opts); 307 if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe")) 308 goto cleanup; 309 310 uprobe_multi_test_run(skel); 311 312 cleanup: 313 bpf_link__destroy(link2); 314 bpf_link__destroy(link1); 315 uprobe_multi__destroy(skel); 316 } 317 318 static void uprobe_subtest(struct test_bpf_cookie *skel) 319 { 320 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); 321 struct bpf_link *link1 = NULL, *link2 = NULL; 322 struct bpf_link *retlink1 = NULL, *retlink2 = NULL; 323 ssize_t uprobe_offset; 324 325 uprobe_offset = get_uprobe_offset(&trigger_func); 326 if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) 327 goto cleanup; 328 329 /* attach two uprobes */ 330 opts.bpf_cookie = 0x100; 331 opts.retprobe = false; 332 link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */, 333 "/proc/self/exe", uprobe_offset, &opts); 334 if (!ASSERT_OK_PTR(link1, "link1")) 335 goto cleanup; 336 337 opts.bpf_cookie = 0x200; 338 opts.retprobe = false; 339 link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */, 340 "/proc/self/exe", uprobe_offset, &opts); 341 if (!ASSERT_OK_PTR(link2, "link2")) 342 goto cleanup; 343 344 /* attach two uretprobes */ 345 opts.bpf_cookie = 0x1000; 346 opts.retprobe = true; 347 retlink1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */, 348 "/proc/self/exe", uprobe_offset, &opts); 349 if (!ASSERT_OK_PTR(retlink1, "retlink1")) 350 goto cleanup; 351 352 opts.bpf_cookie = 0x2000; 353 opts.retprobe = true; 354 retlink2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 0 /* self pid */, 355 "/proc/self/exe", uprobe_offset, &opts); 356 if (!ASSERT_OK_PTR(retlink2, "retlink2")) 357 goto cleanup; 358 359 /* trigger uprobe && uretprobe */ 360 trigger_func(); 361 362 ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res"); 363 ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res"); 364 365 cleanup: 366 bpf_link__destroy(link1); 367 bpf_link__destroy(link2); 368 bpf_link__destroy(retlink1); 369 bpf_link__destroy(retlink2); 370 } 371 372 static void tp_subtest(struct test_bpf_cookie *skel) 373 { 374 DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts); 375 struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL; 376 377 /* attach first tp prog */ 378 opts.bpf_cookie = 0x10000; 379 link1 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp1, 380 "syscalls", "sys_enter_nanosleep", &opts); 381 if (!ASSERT_OK_PTR(link1, "link1")) 382 goto cleanup; 383 384 /* attach second tp prog */ 385 opts.bpf_cookie = 0x20000; 386 link2 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp2, 387 "syscalls", "sys_enter_nanosleep", &opts); 388 if (!ASSERT_OK_PTR(link2, "link2")) 389 goto cleanup; 390 391 /* trigger tracepoints */ 392 usleep(1); 393 394 ASSERT_EQ(skel->bss->tp_res, 0x10000 | 0x20000, "tp_res1"); 395 396 /* now we detach first prog and will attach third one, which causes 397 * two internal calls to bpf_prog_array_copy(), shuffling 398 * bpf_prog_array_items around. We test here that we don't lose track 399 * of associated bpf_cookies. 400 */ 401 bpf_link__destroy(link1); 402 link1 = NULL; 403 kern_sync_rcu(); 404 skel->bss->tp_res = 0; 405 406 /* attach third tp prog */ 407 opts.bpf_cookie = 0x40000; 408 link3 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp3, 409 "syscalls", "sys_enter_nanosleep", &opts); 410 if (!ASSERT_OK_PTR(link3, "link3")) 411 goto cleanup; 412 413 /* trigger tracepoints */ 414 usleep(1); 415 416 ASSERT_EQ(skel->bss->tp_res, 0x20000 | 0x40000, "tp_res2"); 417 418 cleanup: 419 bpf_link__destroy(link1); 420 bpf_link__destroy(link2); 421 bpf_link__destroy(link3); 422 } 423 424 static void burn_cpu(void) 425 { 426 volatile int j = 0; 427 cpu_set_t cpu_set; 428 int i, err; 429 430 /* generate some branches on cpu 0 */ 431 CPU_ZERO(&cpu_set); 432 CPU_SET(0, &cpu_set); 433 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); 434 ASSERT_OK(err, "set_thread_affinity"); 435 436 /* spin the loop for a while (random high number) */ 437 for (i = 0; i < 1000000; ++i) 438 ++j; 439 } 440 441 static void pe_subtest(struct test_bpf_cookie *skel) 442 { 443 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts); 444 struct bpf_link *link = NULL; 445 struct perf_event_attr attr; 446 int pfd = -1; 447 448 /* create perf event */ 449 memset(&attr, 0, sizeof(attr)); 450 attr.size = sizeof(attr); 451 attr.type = PERF_TYPE_SOFTWARE; 452 attr.config = PERF_COUNT_SW_CPU_CLOCK; 453 attr.sample_period = 100000; 454 pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); 455 if (!ASSERT_GE(pfd, 0, "perf_fd")) 456 goto cleanup; 457 458 opts.bpf_cookie = 0x100000; 459 link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts); 460 if (!ASSERT_OK_PTR(link, "link1")) 461 goto cleanup; 462 463 burn_cpu(); /* trigger BPF prog */ 464 465 ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1"); 466 467 /* prevent bpf_link__destroy() closing pfd itself */ 468 bpf_link__disconnect(link); 469 /* close BPF link's FD explicitly */ 470 close(bpf_link__fd(link)); 471 /* free up memory used by struct bpf_link */ 472 bpf_link__destroy(link); 473 link = NULL; 474 kern_sync_rcu(); 475 skel->bss->pe_res = 0; 476 477 opts.bpf_cookie = 0x200000; 478 link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts); 479 if (!ASSERT_OK_PTR(link, "link2")) 480 goto cleanup; 481 482 burn_cpu(); /* trigger BPF prog */ 483 484 ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2"); 485 486 cleanup: 487 close(pfd); 488 bpf_link__destroy(link); 489 } 490 491 static int verify_tracing_link_info(int fd, u64 cookie) 492 { 493 struct bpf_link_info info; 494 int err; 495 u32 len = sizeof(info); 496 497 err = bpf_link_get_info_by_fd(fd, &info, &len); 498 if (!ASSERT_OK(err, "get_link_info")) 499 return -1; 500 501 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_TRACING, "link_type")) 502 return -1; 503 504 ASSERT_EQ(info.tracing.cookie, cookie, "tracing_cookie"); 505 506 return 0; 507 } 508 509 static void tracing_subtest(struct test_bpf_cookie *skel) 510 { 511 __u64 cookie; 512 int prog_fd, err; 513 int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1; 514 LIBBPF_OPTS(bpf_test_run_opts, opts); 515 LIBBPF_OPTS(bpf_link_create_opts, link_opts); 516 517 skel->bss->fentry_res = 0; 518 skel->bss->fexit_res = 0; 519 520 cookie = 0x10000000000000L; 521 prog_fd = bpf_program__fd(skel->progs.fentry_test1); 522 link_opts.tracing.cookie = cookie; 523 fentry_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FENTRY, &link_opts); 524 if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create")) 525 goto cleanup; 526 527 err = verify_tracing_link_info(fentry_fd, cookie); 528 if (!ASSERT_OK(err, "verify_tracing_link_info")) 529 goto cleanup; 530 531 cookie = 0x20000000000000L; 532 prog_fd = bpf_program__fd(skel->progs.fexit_test1); 533 link_opts.tracing.cookie = cookie; 534 fexit_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FEXIT, &link_opts); 535 if (!ASSERT_GE(fexit_fd, 0, "fexit.link_create")) 536 goto cleanup; 537 538 cookie = 0x30000000000000L; 539 prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); 540 link_opts.tracing.cookie = cookie; 541 fmod_ret_fd = bpf_link_create(prog_fd, 0, BPF_MODIFY_RETURN, &link_opts); 542 if (!ASSERT_GE(fmod_ret_fd, 0, "fmod_ret.link_create")) 543 goto cleanup; 544 545 prog_fd = bpf_program__fd(skel->progs.fentry_test1); 546 bpf_prog_test_run_opts(prog_fd, &opts); 547 548 prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); 549 bpf_prog_test_run_opts(prog_fd, &opts); 550 551 ASSERT_EQ(skel->bss->fentry_res, 0x10000000000000L, "fentry_res"); 552 ASSERT_EQ(skel->bss->fexit_res, 0x20000000000000L, "fexit_res"); 553 ASSERT_EQ(skel->bss->fmod_ret_res, 0x30000000000000L, "fmod_ret_res"); 554 555 cleanup: 556 if (fentry_fd >= 0) 557 close(fentry_fd); 558 if (fexit_fd >= 0) 559 close(fexit_fd); 560 if (fmod_ret_fd >= 0) 561 close(fmod_ret_fd); 562 } 563 564 int stack_mprotect(void); 565 566 static void lsm_subtest(struct test_bpf_cookie *skel) 567 { 568 __u64 cookie; 569 int prog_fd; 570 int lsm_fd = -1; 571 LIBBPF_OPTS(bpf_link_create_opts, link_opts); 572 int err; 573 574 skel->bss->lsm_res = 0; 575 576 cookie = 0x90000000000090L; 577 prog_fd = bpf_program__fd(skel->progs.test_int_hook); 578 link_opts.tracing.cookie = cookie; 579 lsm_fd = bpf_link_create(prog_fd, 0, BPF_LSM_MAC, &link_opts); 580 if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create")) 581 goto cleanup; 582 583 err = stack_mprotect(); 584 if (!ASSERT_EQ(err, -1, "stack_mprotect") || 585 !ASSERT_EQ(errno, EPERM, "stack_mprotect")) 586 goto cleanup; 587 588 usleep(1); 589 590 ASSERT_EQ(skel->bss->lsm_res, 0x90000000000090L, "fentry_res"); 591 592 cleanup: 593 if (lsm_fd >= 0) 594 close(lsm_fd); 595 } 596 597 static void tp_btf_subtest(struct test_bpf_cookie *skel) 598 { 599 __u64 cookie; 600 int prog_fd, link_fd = -1; 601 struct bpf_link *link = NULL; 602 LIBBPF_OPTS(bpf_link_create_opts, link_opts); 603 LIBBPF_OPTS(bpf_raw_tp_opts, raw_tp_opts); 604 LIBBPF_OPTS(bpf_trace_opts, trace_opts); 605 606 /* There are three different ways to attach tp_btf (BTF-aware raw 607 * tracepoint) programs. Let's test all of them. 608 */ 609 prog_fd = bpf_program__fd(skel->progs.handle_tp_btf); 610 611 /* low-level BPF_RAW_TRACEPOINT_OPEN command wrapper */ 612 skel->bss->tp_btf_res = 0; 613 614 raw_tp_opts.cookie = cookie = 0x11000000000000L; 615 link_fd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_tp_opts); 616 if (!ASSERT_GE(link_fd, 0, "bpf_raw_tracepoint_open_opts")) 617 goto cleanup; 618 619 usleep(1); /* trigger */ 620 close(link_fd); /* detach */ 621 link_fd = -1; 622 623 ASSERT_EQ(skel->bss->tp_btf_res, cookie, "raw_tp_open_res"); 624 625 /* low-level generic bpf_link_create() API */ 626 skel->bss->tp_btf_res = 0; 627 628 link_opts.tracing.cookie = cookie = 0x22000000000000L; 629 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_RAW_TP, &link_opts); 630 if (!ASSERT_GE(link_fd, 0, "bpf_link_create")) 631 goto cleanup; 632 633 usleep(1); /* trigger */ 634 close(link_fd); /* detach */ 635 link_fd = -1; 636 637 ASSERT_EQ(skel->bss->tp_btf_res, cookie, "link_create_res"); 638 639 /* high-level bpf_link-based bpf_program__attach_trace_opts() API */ 640 skel->bss->tp_btf_res = 0; 641 642 trace_opts.cookie = cookie = 0x33000000000000L; 643 link = bpf_program__attach_trace_opts(skel->progs.handle_tp_btf, &trace_opts); 644 if (!ASSERT_OK_PTR(link, "attach_trace_opts")) 645 goto cleanup; 646 647 usleep(1); /* trigger */ 648 bpf_link__destroy(link); /* detach */ 649 link = NULL; 650 651 ASSERT_EQ(skel->bss->tp_btf_res, cookie, "attach_trace_opts_res"); 652 653 cleanup: 654 if (link_fd >= 0) 655 close(link_fd); 656 bpf_link__destroy(link); 657 } 658 659 static int verify_raw_tp_link_info(int fd, u64 cookie) 660 { 661 struct bpf_link_info info; 662 int err; 663 u32 len = sizeof(info); 664 665 memset(&info, 0, sizeof(info)); 666 err = bpf_link_get_info_by_fd(fd, &info, &len); 667 if (!ASSERT_OK(err, "get_link_info")) 668 return -1; 669 670 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_RAW_TRACEPOINT, "link_type")) 671 return -1; 672 673 ASSERT_EQ(info.raw_tracepoint.cookie, cookie, "raw_tp_cookie"); 674 675 return 0; 676 } 677 678 static void raw_tp_subtest(struct test_bpf_cookie *skel) 679 { 680 __u64 cookie; 681 int err, prog_fd, link_fd = -1; 682 struct bpf_link *link = NULL; 683 LIBBPF_OPTS(bpf_raw_tp_opts, raw_tp_opts); 684 LIBBPF_OPTS(bpf_raw_tracepoint_opts, opts); 685 686 /* There are two different ways to attach raw_tp programs */ 687 prog_fd = bpf_program__fd(skel->progs.handle_raw_tp); 688 689 /* low-level BPF_RAW_TRACEPOINT_OPEN command wrapper */ 690 skel->bss->raw_tp_res = 0; 691 692 raw_tp_opts.tp_name = "sys_enter"; 693 raw_tp_opts.cookie = cookie = 0x55000000000000L; 694 link_fd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_tp_opts); 695 if (!ASSERT_GE(link_fd, 0, "bpf_raw_tracepoint_open_opts")) 696 goto cleanup; 697 698 usleep(1); /* trigger */ 699 700 err = verify_raw_tp_link_info(link_fd, cookie); 701 if (!ASSERT_OK(err, "verify_raw_tp_link_info")) 702 goto cleanup; 703 704 close(link_fd); /* detach */ 705 link_fd = -1; 706 707 ASSERT_EQ(skel->bss->raw_tp_res, cookie, "raw_tp_open_res"); 708 709 /* high-level bpf_link-based bpf_program__attach_raw_tracepoint_opts() API */ 710 skel->bss->raw_tp_res = 0; 711 712 opts.cookie = cookie = 0x66000000000000L; 713 link = bpf_program__attach_raw_tracepoint_opts(skel->progs.handle_raw_tp, 714 "sys_enter", &opts); 715 if (!ASSERT_OK_PTR(link, "attach_raw_tp_opts")) 716 goto cleanup; 717 718 usleep(1); /* trigger */ 719 bpf_link__destroy(link); /* detach */ 720 link = NULL; 721 722 ASSERT_EQ(skel->bss->raw_tp_res, cookie, "attach_raw_tp_opts_res"); 723 724 cleanup: 725 if (link_fd >= 0) 726 close(link_fd); 727 bpf_link__destroy(link); 728 } 729 730 void test_bpf_cookie(void) 731 { 732 struct test_bpf_cookie *skel; 733 734 skel = test_bpf_cookie__open_and_load(); 735 if (!ASSERT_OK_PTR(skel, "skel_open")) 736 return; 737 738 skel->bss->my_tid = sys_gettid(); 739 740 if (test__start_subtest("kprobe")) 741 kprobe_subtest(skel); 742 if (test__start_subtest("multi_kprobe_link_api")) 743 kprobe_multi_link_api_subtest(); 744 if (test__start_subtest("multi_kprobe_attach_api")) 745 kprobe_multi_attach_api_subtest(); 746 if (test__start_subtest("uprobe")) 747 uprobe_subtest(skel); 748 if (test__start_subtest("multi_uprobe_attach_api")) 749 uprobe_multi_attach_api_subtest(); 750 if (test__start_subtest("tracepoint")) 751 tp_subtest(skel); 752 if (test__start_subtest("perf_event")) 753 pe_subtest(skel); 754 if (test__start_subtest("trampoline")) 755 tracing_subtest(skel); 756 if (test__start_subtest("lsm")) 757 lsm_subtest(skel); 758 if (test__start_subtest("tp_btf")) 759 tp_btf_subtest(skel); 760 if (test__start_subtest("raw_tp")) 761 raw_tp_subtest(skel); 762 test_bpf_cookie__destroy(skel); 763 } 764