1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2021 Facebook */ 3 #define _GNU_SOURCE 4 #include <pthread.h> 5 #include <sched.h> 6 #include <sys/syscall.h> 7 #include <sys/mman.h> 8 #include <unistd.h> 9 #include <test_progs.h> 10 #include <network_helpers.h> 11 #include <bpf/btf.h> 12 #include "test_bpf_cookie.skel.h" 13 #include "kprobe_multi.skel.h" 14 15 /* uprobe attach point */ 16 static noinline void trigger_func(void) 17 { 18 asm volatile (""); 19 } 20 21 static void kprobe_subtest(struct test_bpf_cookie *skel) 22 { 23 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); 24 struct bpf_link *link1 = NULL, *link2 = NULL; 25 struct bpf_link *retlink1 = NULL, *retlink2 = NULL; 26 27 /* attach two kprobes */ 28 opts.bpf_cookie = 0x1; 29 opts.retprobe = false; 30 link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, 31 SYS_NANOSLEEP_KPROBE_NAME, &opts); 32 if (!ASSERT_OK_PTR(link1, "link1")) 33 goto cleanup; 34 35 opts.bpf_cookie = 0x2; 36 opts.retprobe = false; 37 link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, 38 SYS_NANOSLEEP_KPROBE_NAME, &opts); 39 if (!ASSERT_OK_PTR(link2, "link2")) 40 goto cleanup; 41 42 /* attach two kretprobes */ 43 opts.bpf_cookie = 0x10; 44 opts.retprobe = true; 45 retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, 46 SYS_NANOSLEEP_KPROBE_NAME, &opts); 47 if (!ASSERT_OK_PTR(retlink1, "retlink1")) 48 goto cleanup; 49 50 opts.bpf_cookie = 0x20; 51 opts.retprobe = true; 52 retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, 53 SYS_NANOSLEEP_KPROBE_NAME, &opts); 54 if (!ASSERT_OK_PTR(retlink2, "retlink2")) 55 goto cleanup; 56 57 /* trigger kprobe && kretprobe */ 58 usleep(1); 59 60 ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res"); 61 ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res"); 62 63 cleanup: 64 bpf_link__destroy(link1); 65 bpf_link__destroy(link2); 66 bpf_link__destroy(retlink1); 67 bpf_link__destroy(retlink2); 68 } 69 70 static void kprobe_multi_test_run(struct kprobe_multi *skel) 71 { 72 LIBBPF_OPTS(bpf_test_run_opts, topts); 73 int err, prog_fd; 74 75 prog_fd = bpf_program__fd(skel->progs.trigger); 76 err = bpf_prog_test_run_opts(prog_fd, &topts); 77 ASSERT_OK(err, "test_run"); 78 ASSERT_EQ(topts.retval, 0, "test_run"); 79 80 ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); 81 ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); 82 ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); 83 ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); 84 ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); 85 ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); 86 ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); 87 ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); 88 89 ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); 90 ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); 91 ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); 92 ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); 93 ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); 94 ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); 95 ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); 96 ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); 97 } 98 99 static void kprobe_multi_link_api_subtest(void) 100 { 101 int prog_fd, link1_fd = -1, link2_fd = -1; 102 struct kprobe_multi *skel = NULL; 103 LIBBPF_OPTS(bpf_link_create_opts, opts); 104 unsigned long long addrs[8]; 105 __u64 cookies[8]; 106 107 if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) 108 goto cleanup; 109 110 skel = kprobe_multi__open_and_load(); 111 if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) 112 goto cleanup; 113 114 skel->bss->pid = getpid(); 115 skel->bss->test_cookie = true; 116 117 #define GET_ADDR(__sym, __addr) ({ \ 118 __addr = ksym_get_addr(__sym); \ 119 if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym)) \ 120 goto cleanup; \ 121 }) 122 123 GET_ADDR("bpf_fentry_test1", addrs[0]); 124 GET_ADDR("bpf_fentry_test3", addrs[1]); 125 GET_ADDR("bpf_fentry_test4", addrs[2]); 126 GET_ADDR("bpf_fentry_test5", addrs[3]); 127 GET_ADDR("bpf_fentry_test6", addrs[4]); 128 GET_ADDR("bpf_fentry_test7", addrs[5]); 129 GET_ADDR("bpf_fentry_test2", addrs[6]); 130 GET_ADDR("bpf_fentry_test8", addrs[7]); 131 132 #undef GET_ADDR 133 134 cookies[0] = 1; /* bpf_fentry_test1 */ 135 cookies[1] = 2; /* bpf_fentry_test3 */ 136 cookies[2] = 3; /* bpf_fentry_test4 */ 137 cookies[3] = 4; /* bpf_fentry_test5 */ 138 cookies[4] = 5; /* bpf_fentry_test6 */ 139 cookies[5] = 6; /* bpf_fentry_test7 */ 140 cookies[6] = 7; /* bpf_fentry_test2 */ 141 cookies[7] = 8; /* bpf_fentry_test8 */ 142 143 opts.kprobe_multi.addrs = (const unsigned long *) &addrs; 144 opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); 145 opts.kprobe_multi.cookies = (const __u64 *) &cookies; 146 prog_fd = bpf_program__fd(skel->progs.test_kprobe); 147 148 link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); 149 if (!ASSERT_GE(link1_fd, 0, "link1_fd")) 150 goto cleanup; 151 152 cookies[0] = 8; /* bpf_fentry_test1 */ 153 cookies[1] = 7; /* bpf_fentry_test3 */ 154 cookies[2] = 6; /* bpf_fentry_test4 */ 155 cookies[3] = 5; /* bpf_fentry_test5 */ 156 cookies[4] = 4; /* bpf_fentry_test6 */ 157 cookies[5] = 3; /* bpf_fentry_test7 */ 158 cookies[6] = 2; /* bpf_fentry_test2 */ 159 cookies[7] = 1; /* bpf_fentry_test8 */ 160 161 opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; 162 prog_fd = bpf_program__fd(skel->progs.test_kretprobe); 163 164 link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); 165 if (!ASSERT_GE(link2_fd, 0, "link2_fd")) 166 goto cleanup; 167 168 kprobe_multi_test_run(skel); 169 170 cleanup: 171 close(link1_fd); 172 close(link2_fd); 173 kprobe_multi__destroy(skel); 174 } 175 176 static void kprobe_multi_attach_api_subtest(void) 177 { 178 struct bpf_link *link1 = NULL, *link2 = NULL; 179 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); 180 LIBBPF_OPTS(bpf_test_run_opts, topts); 181 struct kprobe_multi *skel = NULL; 182 const char *syms[8] = { 183 "bpf_fentry_test1", 184 "bpf_fentry_test3", 185 "bpf_fentry_test4", 186 "bpf_fentry_test5", 187 "bpf_fentry_test6", 188 "bpf_fentry_test7", 189 "bpf_fentry_test2", 190 "bpf_fentry_test8", 191 }; 192 __u64 cookies[8]; 193 194 skel = kprobe_multi__open_and_load(); 195 if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) 196 goto cleanup; 197 198 skel->bss->pid = getpid(); 199 skel->bss->test_cookie = true; 200 201 cookies[0] = 1; /* bpf_fentry_test1 */ 202 cookies[1] = 2; /* bpf_fentry_test3 */ 203 cookies[2] = 3; /* bpf_fentry_test4 */ 204 cookies[3] = 4; /* bpf_fentry_test5 */ 205 cookies[4] = 5; /* bpf_fentry_test6 */ 206 cookies[5] = 6; /* bpf_fentry_test7 */ 207 cookies[6] = 7; /* bpf_fentry_test2 */ 208 cookies[7] = 8; /* bpf_fentry_test8 */ 209 210 opts.syms = syms; 211 opts.cnt = ARRAY_SIZE(syms); 212 opts.cookies = cookies; 213 214 link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, 215 NULL, &opts); 216 if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) 217 goto cleanup; 218 219 cookies[0] = 8; /* bpf_fentry_test1 */ 220 cookies[1] = 7; /* bpf_fentry_test3 */ 221 cookies[2] = 6; /* bpf_fentry_test4 */ 222 cookies[3] = 5; /* bpf_fentry_test5 */ 223 cookies[4] = 4; /* bpf_fentry_test6 */ 224 cookies[5] = 3; /* bpf_fentry_test7 */ 225 cookies[6] = 2; /* bpf_fentry_test2 */ 226 cookies[7] = 1; /* bpf_fentry_test8 */ 227 228 opts.retprobe = true; 229 230 link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe, 231 NULL, &opts); 232 if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) 233 goto cleanup; 234 235 kprobe_multi_test_run(skel); 236 237 cleanup: 238 bpf_link__destroy(link2); 239 bpf_link__destroy(link1); 240 kprobe_multi__destroy(skel); 241 } 242 static void uprobe_subtest(struct test_bpf_cookie *skel) 243 { 244 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); 245 struct bpf_link *link1 = NULL, *link2 = NULL; 246 struct bpf_link *retlink1 = NULL, *retlink2 = NULL; 247 ssize_t uprobe_offset; 248 249 uprobe_offset = get_uprobe_offset(&trigger_func); 250 if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) 251 goto cleanup; 252 253 /* attach two uprobes */ 254 opts.bpf_cookie = 0x100; 255 opts.retprobe = false; 256 link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */, 257 "/proc/self/exe", uprobe_offset, &opts); 258 if (!ASSERT_OK_PTR(link1, "link1")) 259 goto cleanup; 260 261 opts.bpf_cookie = 0x200; 262 opts.retprobe = false; 263 link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */, 264 "/proc/self/exe", uprobe_offset, &opts); 265 if (!ASSERT_OK_PTR(link2, "link2")) 266 goto cleanup; 267 268 /* attach two uretprobes */ 269 opts.bpf_cookie = 0x1000; 270 opts.retprobe = true; 271 retlink1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */, 272 "/proc/self/exe", uprobe_offset, &opts); 273 if (!ASSERT_OK_PTR(retlink1, "retlink1")) 274 goto cleanup; 275 276 opts.bpf_cookie = 0x2000; 277 opts.retprobe = true; 278 retlink2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 0 /* self pid */, 279 "/proc/self/exe", uprobe_offset, &opts); 280 if (!ASSERT_OK_PTR(retlink2, "retlink2")) 281 goto cleanup; 282 283 /* trigger uprobe && uretprobe */ 284 trigger_func(); 285 286 ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res"); 287 ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res"); 288 289 cleanup: 290 bpf_link__destroy(link1); 291 bpf_link__destroy(link2); 292 bpf_link__destroy(retlink1); 293 bpf_link__destroy(retlink2); 294 } 295 296 static void tp_subtest(struct test_bpf_cookie *skel) 297 { 298 DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts); 299 struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL; 300 301 /* attach first tp prog */ 302 opts.bpf_cookie = 0x10000; 303 link1 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp1, 304 "syscalls", "sys_enter_nanosleep", &opts); 305 if (!ASSERT_OK_PTR(link1, "link1")) 306 goto cleanup; 307 308 /* attach second tp prog */ 309 opts.bpf_cookie = 0x20000; 310 link2 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp2, 311 "syscalls", "sys_enter_nanosleep", &opts); 312 if (!ASSERT_OK_PTR(link2, "link2")) 313 goto cleanup; 314 315 /* trigger tracepoints */ 316 usleep(1); 317 318 ASSERT_EQ(skel->bss->tp_res, 0x10000 | 0x20000, "tp_res1"); 319 320 /* now we detach first prog and will attach third one, which causes 321 * two internal calls to bpf_prog_array_copy(), shuffling 322 * bpf_prog_array_items around. We test here that we don't lose track 323 * of associated bpf_cookies. 324 */ 325 bpf_link__destroy(link1); 326 link1 = NULL; 327 kern_sync_rcu(); 328 skel->bss->tp_res = 0; 329 330 /* attach third tp prog */ 331 opts.bpf_cookie = 0x40000; 332 link3 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp3, 333 "syscalls", "sys_enter_nanosleep", &opts); 334 if (!ASSERT_OK_PTR(link3, "link3")) 335 goto cleanup; 336 337 /* trigger tracepoints */ 338 usleep(1); 339 340 ASSERT_EQ(skel->bss->tp_res, 0x20000 | 0x40000, "tp_res2"); 341 342 cleanup: 343 bpf_link__destroy(link1); 344 bpf_link__destroy(link2); 345 bpf_link__destroy(link3); 346 } 347 348 static void burn_cpu(void) 349 { 350 volatile int j = 0; 351 cpu_set_t cpu_set; 352 int i, err; 353 354 /* generate some branches on cpu 0 */ 355 CPU_ZERO(&cpu_set); 356 CPU_SET(0, &cpu_set); 357 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); 358 ASSERT_OK(err, "set_thread_affinity"); 359 360 /* spin the loop for a while (random high number) */ 361 for (i = 0; i < 1000000; ++i) 362 ++j; 363 } 364 365 static void pe_subtest(struct test_bpf_cookie *skel) 366 { 367 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts); 368 struct bpf_link *link = NULL; 369 struct perf_event_attr attr; 370 int pfd = -1; 371 372 /* create perf event */ 373 memset(&attr, 0, sizeof(attr)); 374 attr.size = sizeof(attr); 375 attr.type = PERF_TYPE_SOFTWARE; 376 attr.config = PERF_COUNT_SW_CPU_CLOCK; 377 attr.freq = 1; 378 attr.sample_freq = 1000; 379 pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); 380 if (!ASSERT_GE(pfd, 0, "perf_fd")) 381 goto cleanup; 382 383 opts.bpf_cookie = 0x100000; 384 link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts); 385 if (!ASSERT_OK_PTR(link, "link1")) 386 goto cleanup; 387 388 burn_cpu(); /* trigger BPF prog */ 389 390 ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1"); 391 392 /* prevent bpf_link__destroy() closing pfd itself */ 393 bpf_link__disconnect(link); 394 /* close BPF link's FD explicitly */ 395 close(bpf_link__fd(link)); 396 /* free up memory used by struct bpf_link */ 397 bpf_link__destroy(link); 398 link = NULL; 399 kern_sync_rcu(); 400 skel->bss->pe_res = 0; 401 402 opts.bpf_cookie = 0x200000; 403 link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts); 404 if (!ASSERT_OK_PTR(link, "link2")) 405 goto cleanup; 406 407 burn_cpu(); /* trigger BPF prog */ 408 409 ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2"); 410 411 cleanup: 412 close(pfd); 413 bpf_link__destroy(link); 414 } 415 416 static void tracing_subtest(struct test_bpf_cookie *skel) 417 { 418 __u64 cookie; 419 int prog_fd; 420 int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1; 421 LIBBPF_OPTS(bpf_test_run_opts, opts); 422 LIBBPF_OPTS(bpf_link_create_opts, link_opts); 423 424 skel->bss->fentry_res = 0; 425 skel->bss->fexit_res = 0; 426 427 cookie = 0x10000000000000L; 428 prog_fd = bpf_program__fd(skel->progs.fentry_test1); 429 link_opts.tracing.cookie = cookie; 430 fentry_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FENTRY, &link_opts); 431 if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create")) 432 goto cleanup; 433 434 cookie = 0x20000000000000L; 435 prog_fd = bpf_program__fd(skel->progs.fexit_test1); 436 link_opts.tracing.cookie = cookie; 437 fexit_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FEXIT, &link_opts); 438 if (!ASSERT_GE(fexit_fd, 0, "fexit.link_create")) 439 goto cleanup; 440 441 cookie = 0x30000000000000L; 442 prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); 443 link_opts.tracing.cookie = cookie; 444 fmod_ret_fd = bpf_link_create(prog_fd, 0, BPF_MODIFY_RETURN, &link_opts); 445 if (!ASSERT_GE(fmod_ret_fd, 0, "fmod_ret.link_create")) 446 goto cleanup; 447 448 prog_fd = bpf_program__fd(skel->progs.fentry_test1); 449 bpf_prog_test_run_opts(prog_fd, &opts); 450 451 prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); 452 bpf_prog_test_run_opts(prog_fd, &opts); 453 454 ASSERT_EQ(skel->bss->fentry_res, 0x10000000000000L, "fentry_res"); 455 ASSERT_EQ(skel->bss->fexit_res, 0x20000000000000L, "fexit_res"); 456 ASSERT_EQ(skel->bss->fmod_ret_res, 0x30000000000000L, "fmod_ret_res"); 457 458 cleanup: 459 if (fentry_fd >= 0) 460 close(fentry_fd); 461 if (fexit_fd >= 0) 462 close(fexit_fd); 463 if (fmod_ret_fd >= 0) 464 close(fmod_ret_fd); 465 } 466 467 int stack_mprotect(void); 468 469 static void lsm_subtest(struct test_bpf_cookie *skel) 470 { 471 __u64 cookie; 472 int prog_fd; 473 int lsm_fd = -1; 474 LIBBPF_OPTS(bpf_link_create_opts, link_opts); 475 int err; 476 477 skel->bss->lsm_res = 0; 478 479 cookie = 0x90000000000090L; 480 prog_fd = bpf_program__fd(skel->progs.test_int_hook); 481 link_opts.tracing.cookie = cookie; 482 lsm_fd = bpf_link_create(prog_fd, 0, BPF_LSM_MAC, &link_opts); 483 if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create")) 484 goto cleanup; 485 486 err = stack_mprotect(); 487 if (!ASSERT_EQ(err, -1, "stack_mprotect") || 488 !ASSERT_EQ(errno, EPERM, "stack_mprotect")) 489 goto cleanup; 490 491 usleep(1); 492 493 ASSERT_EQ(skel->bss->lsm_res, 0x90000000000090L, "fentry_res"); 494 495 cleanup: 496 if (lsm_fd >= 0) 497 close(lsm_fd); 498 } 499 500 void test_bpf_cookie(void) 501 { 502 struct test_bpf_cookie *skel; 503 504 skel = test_bpf_cookie__open_and_load(); 505 if (!ASSERT_OK_PTR(skel, "skel_open")) 506 return; 507 508 skel->bss->my_tid = syscall(SYS_gettid); 509 510 if (test__start_subtest("kprobe")) 511 kprobe_subtest(skel); 512 if (test__start_subtest("multi_kprobe_link_api")) 513 kprobe_multi_link_api_subtest(); 514 if (test__start_subtest("multi_kprobe_attach_api")) 515 kprobe_multi_attach_api_subtest(); 516 if (test__start_subtest("uprobe")) 517 uprobe_subtest(skel); 518 if (test__start_subtest("tracepoint")) 519 tp_subtest(skel); 520 if (test__start_subtest("perf_event")) 521 pe_subtest(skel); 522 if (test__start_subtest("trampoline")) 523 tracing_subtest(skel); 524 if (test__start_subtest("lsm")) 525 lsm_subtest(skel); 526 527 test_bpf_cookie__destroy(skel); 528 } 529