1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 #define _GNU_SOURCE 4 #include <test_progs.h> 5 #include <bpf/btf.h> 6 #include "cap_helpers.h" 7 #include <fcntl.h> 8 #include <sched.h> 9 #include <signal.h> 10 #include <unistd.h> 11 #include <linux/filter.h> 12 #include <linux/unistd.h> 13 #include <linux/mount.h> 14 #include <sys/socket.h> 15 #include <sys/stat.h> 16 #include <sys/syscall.h> 17 #include <sys/un.h> 18 #include "priv_map.skel.h" 19 #include "priv_prog.skel.h" 20 #include "dummy_st_ops_success.skel.h" 21 #include "token_lsm.skel.h" 22 #include "priv_freplace_prog.skel.h" 23 24 static inline int sys_mount(const char *dev_name, const char *dir_name, 25 const char *type, unsigned long flags, 26 const void *data) 27 { 28 return syscall(__NR_mount, dev_name, dir_name, type, flags, data); 29 } 30 31 static inline int sys_fsopen(const char *fsname, unsigned flags) 32 { 33 return syscall(__NR_fsopen, fsname, flags); 34 } 35 36 static inline int sys_fspick(int dfd, const char *path, unsigned flags) 37 { 38 return syscall(__NR_fspick, dfd, path, flags); 39 } 40 41 static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux) 42 { 43 return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux); 44 } 45 46 static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags) 47 { 48 return syscall(__NR_fsmount, fs_fd, flags, ms_flags); 49 } 50 51 static inline int sys_move_mount(int from_dfd, const char *from_path, 52 int to_dfd, const char *to_path, 53 unsigned flags) 54 { 55 return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, flags); 56 } 57 58 static int drop_priv_caps(__u64 *old_caps) 59 { 60 return cap_disable_effective((1ULL << CAP_BPF) | 61 (1ULL << CAP_PERFMON) | 62 (1ULL << CAP_NET_ADMIN) | 63 (1ULL << CAP_SYS_ADMIN), old_caps); 64 } 65 66 static int restore_priv_caps(__u64 old_caps) 67 { 68 return cap_enable_effective(old_caps, NULL); 69 } 70 71 static int set_delegate_mask(int fs_fd, const char *key, __u64 mask, const char *mask_str) 72 { 73 char buf[32]; 74 int err; 75 76 if (!mask_str) { 77 if (mask == ~0ULL) { 78 mask_str = "any"; 79 } else { 80 snprintf(buf, sizeof(buf), "0x%llx", (unsigned long long)mask); 81 mask_str = buf; 82 } 83 } 84 85 err = sys_fsconfig(fs_fd, FSCONFIG_SET_STRING, key, 86 mask_str, 0); 87 if (err < 0) 88 err = -errno; 89 return err; 90 } 91 92 #define zclose(fd) do { if (fd >= 0) close(fd); fd = -1; } while (0) 93 94 struct bpffs_opts { 95 __u64 cmds; 96 __u64 maps; 97 __u64 progs; 98 __u64 attachs; 99 const char *cmds_str; 100 const char *maps_str; 101 const char *progs_str; 102 const char *attachs_str; 103 }; 104 105 static int create_bpffs_fd(void) 106 { 107 int fs_fd; 108 109 /* create VFS context */ 110 fs_fd = sys_fsopen("bpf", 0); 111 ASSERT_GE(fs_fd, 0, "fs_fd"); 112 113 return fs_fd; 114 } 115 116 static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts) 117 { 118 int err; 119 120 /* set up token delegation mount options */ 121 err = set_delegate_mask(fs_fd, "delegate_cmds", opts->cmds, opts->cmds_str); 122 if (!ASSERT_OK(err, "fs_cfg_cmds")) 123 return err; 124 err = set_delegate_mask(fs_fd, "delegate_maps", opts->maps, opts->maps_str); 125 if (!ASSERT_OK(err, "fs_cfg_maps")) 126 return err; 127 err = set_delegate_mask(fs_fd, "delegate_progs", opts->progs, opts->progs_str); 128 if (!ASSERT_OK(err, "fs_cfg_progs")) 129 return err; 130 err = set_delegate_mask(fs_fd, "delegate_attachs", opts->attachs, opts->attachs_str); 131 if (!ASSERT_OK(err, "fs_cfg_attachs")) 132 return err; 133 134 /* instantiate FS object */ 135 err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0); 136 if (err < 0) 137 return -errno; 138 139 return 0; 140 } 141 142 /* send FD over Unix domain (AF_UNIX) socket */ 143 static int sendfd(int sockfd, int fd) 144 { 145 struct msghdr msg = {}; 146 struct cmsghdr *cmsg; 147 int fds[1] = { fd }, err; 148 char iobuf[1]; 149 struct iovec io = { 150 .iov_base = iobuf, 151 .iov_len = sizeof(iobuf), 152 }; 153 union { 154 char buf[CMSG_SPACE(sizeof(fds))]; 155 struct cmsghdr align; 156 } u; 157 158 msg.msg_iov = &io; 159 msg.msg_iovlen = 1; 160 msg.msg_control = u.buf; 161 msg.msg_controllen = sizeof(u.buf); 162 cmsg = CMSG_FIRSTHDR(&msg); 163 cmsg->cmsg_level = SOL_SOCKET; 164 cmsg->cmsg_type = SCM_RIGHTS; 165 cmsg->cmsg_len = CMSG_LEN(sizeof(fds)); 166 memcpy(CMSG_DATA(cmsg), fds, sizeof(fds)); 167 168 err = sendmsg(sockfd, &msg, 0); 169 if (err < 0) 170 err = -errno; 171 if (!ASSERT_EQ(err, 1, "sendmsg")) 172 return -EINVAL; 173 174 return 0; 175 } 176 177 /* receive FD over Unix domain (AF_UNIX) socket */ 178 static int recvfd(int sockfd, int *fd) 179 { 180 struct msghdr msg = {}; 181 struct cmsghdr *cmsg; 182 int fds[1], err; 183 char iobuf[1]; 184 struct iovec io = { 185 .iov_base = iobuf, 186 .iov_len = sizeof(iobuf), 187 }; 188 union { 189 char buf[CMSG_SPACE(sizeof(fds))]; 190 struct cmsghdr align; 191 } u; 192 193 msg.msg_iov = &io; 194 msg.msg_iovlen = 1; 195 msg.msg_control = u.buf; 196 msg.msg_controllen = sizeof(u.buf); 197 198 err = recvmsg(sockfd, &msg, 0); 199 if (err < 0) 200 err = -errno; 201 if (!ASSERT_EQ(err, 1, "recvmsg")) 202 return -EINVAL; 203 204 cmsg = CMSG_FIRSTHDR(&msg); 205 if (!ASSERT_OK_PTR(cmsg, "cmsg_null") || 206 !ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(fds)), "cmsg_len") || 207 !ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET, "cmsg_level") || 208 !ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS, "cmsg_type")) 209 return -EINVAL; 210 211 memcpy(fds, CMSG_DATA(cmsg), sizeof(fds)); 212 *fd = fds[0]; 213 214 return 0; 215 } 216 217 static ssize_t write_nointr(int fd, const void *buf, size_t count) 218 { 219 ssize_t ret; 220 221 do { 222 ret = write(fd, buf, count); 223 } while (ret < 0 && errno == EINTR); 224 225 return ret; 226 } 227 228 static int write_file(const char *path, const void *buf, size_t count) 229 { 230 int fd; 231 ssize_t ret; 232 233 fd = open(path, O_WRONLY | O_CLOEXEC | O_NOCTTY | O_NOFOLLOW); 234 if (fd < 0) 235 return -1; 236 237 ret = write_nointr(fd, buf, count); 238 close(fd); 239 if (ret < 0 || (size_t)ret != count) 240 return -1; 241 242 return 0; 243 } 244 245 static int create_and_enter_userns(void) 246 { 247 uid_t uid; 248 gid_t gid; 249 char map[100]; 250 251 uid = getuid(); 252 gid = getgid(); 253 254 if (unshare(CLONE_NEWUSER)) 255 return -1; 256 257 if (write_file("/proc/self/setgroups", "deny", sizeof("deny") - 1) && 258 errno != ENOENT) 259 return -1; 260 261 snprintf(map, sizeof(map), "0 %d 1", uid); 262 if (write_file("/proc/self/uid_map", map, strlen(map))) 263 return -1; 264 265 266 snprintf(map, sizeof(map), "0 %d 1", gid); 267 if (write_file("/proc/self/gid_map", map, strlen(map))) 268 return -1; 269 270 if (setgid(0)) 271 return -1; 272 273 if (setuid(0)) 274 return -1; 275 276 return 0; 277 } 278 279 typedef int (*child_callback_fn)(int bpffs_fd, struct token_lsm *lsm_skel); 280 281 static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callback) 282 { 283 int mnt_fd = -1, fs_fd = -1, err = 0, bpffs_fd = -1, token_fd = -1; 284 struct token_lsm *lsm_skel = NULL; 285 char one; 286 287 /* load and attach LSM "policy" before we go into unpriv userns */ 288 lsm_skel = token_lsm__open_and_load(); 289 if (!ASSERT_OK_PTR(lsm_skel, "lsm_skel_load")) { 290 err = -EINVAL; 291 goto cleanup; 292 } 293 lsm_skel->bss->my_pid = getpid(); 294 err = token_lsm__attach(lsm_skel); 295 if (!ASSERT_OK(err, "lsm_skel_attach")) 296 goto cleanup; 297 298 /* setup userns with root mappings */ 299 err = create_and_enter_userns(); 300 if (!ASSERT_OK(err, "create_and_enter_userns")) 301 goto cleanup; 302 303 /* setup mountns to allow creating BPF FS (fsopen("bpf")) from unpriv process */ 304 err = unshare(CLONE_NEWNS); 305 if (!ASSERT_OK(err, "create_mountns")) 306 goto cleanup; 307 308 err = sys_mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0); 309 if (!ASSERT_OK(err, "remount_root")) 310 goto cleanup; 311 312 fs_fd = create_bpffs_fd(); 313 if (!ASSERT_GE(fs_fd, 0, "create_bpffs_fd")) { 314 err = -EINVAL; 315 goto cleanup; 316 } 317 318 /* ensure unprivileged child cannot set delegation options */ 319 err = set_delegate_mask(fs_fd, "delegate_cmds", 0x1, NULL); 320 ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm"); 321 err = set_delegate_mask(fs_fd, "delegate_maps", 0x1, NULL); 322 ASSERT_EQ(err, -EPERM, "delegate_maps_eperm"); 323 err = set_delegate_mask(fs_fd, "delegate_progs", 0x1, NULL); 324 ASSERT_EQ(err, -EPERM, "delegate_progs_eperm"); 325 err = set_delegate_mask(fs_fd, "delegate_attachs", 0x1, NULL); 326 ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm"); 327 328 /* pass BPF FS context object to parent */ 329 err = sendfd(sock_fd, fs_fd); 330 if (!ASSERT_OK(err, "send_fs_fd")) 331 goto cleanup; 332 333 /* wait that the parent reads the fd, does the fsconfig() calls 334 * and send us a signal that it is done 335 */ 336 err = read(sock_fd, &one, sizeof(one)); 337 if (!ASSERT_GE(err, 0, "read_one")) 338 goto cleanup; 339 340 /* avoid mucking around with mount namespaces and mounting at 341 * well-known path, just create O_PATH fd for detached mount 342 */ 343 mnt_fd = sys_fsmount(fs_fd, 0, 0); 344 if (!ASSERT_OK_FD(mnt_fd, "mnt_fd")) 345 goto cleanup; 346 347 /* try to fspick() BPF FS and try to add some delegation options */ 348 fs_fd = sys_fspick(mnt_fd, "", FSPICK_EMPTY_PATH); 349 if (!ASSERT_GE(fs_fd, 0, "bpffs_fspick")) { 350 err = -EINVAL; 351 goto cleanup; 352 } 353 354 /* ensure unprivileged child cannot reconfigure to set delegation options */ 355 err = set_delegate_mask(fs_fd, "delegate_cmds", 0, "any"); 356 if (!ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm_reconfig")) { 357 err = -EINVAL; 358 goto cleanup; 359 } 360 err = set_delegate_mask(fs_fd, "delegate_maps", 0, "any"); 361 if (!ASSERT_EQ(err, -EPERM, "delegate_maps_eperm_reconfig")) { 362 err = -EINVAL; 363 goto cleanup; 364 } 365 err = set_delegate_mask(fs_fd, "delegate_progs", 0, "any"); 366 if (!ASSERT_EQ(err, -EPERM, "delegate_progs_eperm_reconfig")) { 367 err = -EINVAL; 368 goto cleanup; 369 } 370 err = set_delegate_mask(fs_fd, "delegate_attachs", 0, "any"); 371 if (!ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm_reconfig")) { 372 err = -EINVAL; 373 goto cleanup; 374 } 375 zclose(fs_fd); 376 377 bpffs_fd = openat(mnt_fd, ".", 0, O_RDWR); 378 if (!ASSERT_GE(bpffs_fd, 0, "bpffs_open")) { 379 err = -EINVAL; 380 goto cleanup; 381 } 382 383 /* create BPF token FD and pass it to parent for some extra checks */ 384 token_fd = bpf_token_create(bpffs_fd, NULL); 385 if (!ASSERT_GT(token_fd, 0, "child_token_create")) { 386 err = -EINVAL; 387 goto cleanup; 388 } 389 err = sendfd(sock_fd, token_fd); 390 if (!ASSERT_OK(err, "send_token_fd")) 391 goto cleanup; 392 zclose(token_fd); 393 394 /* do custom test logic with customly set up BPF FS instance */ 395 err = callback(bpffs_fd, lsm_skel); 396 if (!ASSERT_OK(err, "test_callback")) 397 goto cleanup; 398 399 err = 0; 400 cleanup: 401 zclose(sock_fd); 402 zclose(mnt_fd); 403 zclose(fs_fd); 404 zclose(bpffs_fd); 405 zclose(token_fd); 406 407 lsm_skel->bss->my_pid = 0; 408 token_lsm__destroy(lsm_skel); 409 410 exit(-err); 411 } 412 413 static int wait_for_pid(pid_t pid) 414 { 415 int status, ret; 416 417 again: 418 ret = waitpid(pid, &status, 0); 419 if (ret == -1) { 420 if (errno == EINTR) 421 goto again; 422 423 return -1; 424 } 425 426 if (!WIFEXITED(status)) 427 return -1; 428 429 return WEXITSTATUS(status); 430 } 431 432 static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd) 433 { 434 int fs_fd = -1, token_fd = -1, err; 435 char one = 1; 436 437 err = recvfd(sock_fd, &fs_fd); 438 if (!ASSERT_OK(err, "recv_bpffs_fd")) 439 goto cleanup; 440 441 err = materialize_bpffs_fd(fs_fd, bpffs_opts); 442 if (!ASSERT_GE(err, 0, "materialize_bpffs_fd")) { 443 err = -EINVAL; 444 goto cleanup; 445 } 446 447 /* notify the child that we did the fsconfig() calls and it can proceed. */ 448 err = write(sock_fd, &one, sizeof(one)); 449 if (!ASSERT_EQ(err, sizeof(one), "send_one")) 450 goto cleanup; 451 zclose(fs_fd); 452 453 /* receive BPF token FD back from child for some extra tests */ 454 err = recvfd(sock_fd, &token_fd); 455 if (!ASSERT_OK(err, "recv_token_fd")) 456 goto cleanup; 457 458 err = wait_for_pid(child_pid); 459 ASSERT_OK(err, "waitpid_child"); 460 461 cleanup: 462 zclose(sock_fd); 463 zclose(fs_fd); 464 zclose(token_fd); 465 466 if (child_pid > 0) 467 (void)kill(child_pid, SIGKILL); 468 } 469 470 static void subtest_userns(struct bpffs_opts *bpffs_opts, 471 child_callback_fn child_cb) 472 { 473 int sock_fds[2] = { -1, -1 }; 474 int child_pid = 0, err; 475 476 err = socketpair(AF_UNIX, SOCK_STREAM, 0, sock_fds); 477 if (!ASSERT_OK(err, "socketpair")) 478 goto cleanup; 479 480 child_pid = fork(); 481 if (!ASSERT_GE(child_pid, 0, "fork")) 482 goto cleanup; 483 484 if (child_pid == 0) { 485 zclose(sock_fds[0]); 486 return child(sock_fds[1], bpffs_opts, child_cb); 487 488 } else { 489 zclose(sock_fds[1]); 490 return parent(child_pid, bpffs_opts, sock_fds[0]); 491 } 492 493 cleanup: 494 zclose(sock_fds[0]); 495 zclose(sock_fds[1]); 496 if (child_pid > 0) 497 (void)kill(child_pid, SIGKILL); 498 } 499 500 static int userns_map_create(int mnt_fd, struct token_lsm *lsm_skel) 501 { 502 LIBBPF_OPTS(bpf_map_create_opts, map_opts); 503 int err, token_fd = -1, map_fd = -1; 504 __u64 old_caps = 0; 505 506 /* create BPF token from BPF FS mount */ 507 token_fd = bpf_token_create(mnt_fd, NULL); 508 if (!ASSERT_GT(token_fd, 0, "token_create")) { 509 err = -EINVAL; 510 goto cleanup; 511 } 512 513 /* while inside non-init userns, we need both a BPF token *and* 514 * CAP_BPF inside current userns to create privileged map; let's test 515 * that neither BPF token alone nor namespaced CAP_BPF is sufficient 516 */ 517 err = drop_priv_caps(&old_caps); 518 if (!ASSERT_OK(err, "drop_caps")) 519 goto cleanup; 520 521 /* no token, no CAP_BPF -> fail */ 522 map_opts.map_flags = 0; 523 map_opts.token_fd = 0; 524 map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_wo_bpf", 0, 8, 1, &map_opts); 525 if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_wo_cap_bpf_should_fail")) { 526 err = -EINVAL; 527 goto cleanup; 528 } 529 530 /* token without CAP_BPF -> fail */ 531 map_opts.map_flags = BPF_F_TOKEN_FD; 532 map_opts.token_fd = token_fd; 533 map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_wo_bpf", 0, 8, 1, &map_opts); 534 if (!ASSERT_LT(map_fd, 0, "stack_map_w_token_wo_cap_bpf_should_fail")) { 535 err = -EINVAL; 536 goto cleanup; 537 } 538 539 /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */ 540 err = restore_priv_caps(old_caps); 541 if (!ASSERT_OK(err, "restore_caps")) 542 goto cleanup; 543 544 /* CAP_BPF without token -> fail */ 545 map_opts.map_flags = 0; 546 map_opts.token_fd = 0; 547 map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_w_bpf", 0, 8, 1, &map_opts); 548 if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_w_cap_bpf_should_fail")) { 549 err = -EINVAL; 550 goto cleanup; 551 } 552 553 /* finally, namespaced CAP_BPF + token -> success */ 554 map_opts.map_flags = BPF_F_TOKEN_FD; 555 map_opts.token_fd = token_fd; 556 map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_w_bpf", 0, 8, 1, &map_opts); 557 if (!ASSERT_GT(map_fd, 0, "stack_map_w_token_w_cap_bpf")) { 558 err = -EINVAL; 559 goto cleanup; 560 } 561 562 cleanup: 563 zclose(token_fd); 564 zclose(map_fd); 565 return err; 566 } 567 568 static int userns_btf_load(int mnt_fd, struct token_lsm *lsm_skel) 569 { 570 LIBBPF_OPTS(bpf_btf_load_opts, btf_opts); 571 int err, token_fd = -1, btf_fd = -1; 572 const void *raw_btf_data; 573 struct btf *btf = NULL; 574 __u32 raw_btf_size; 575 __u64 old_caps = 0; 576 577 /* create BPF token from BPF FS mount */ 578 token_fd = bpf_token_create(mnt_fd, NULL); 579 if (!ASSERT_GT(token_fd, 0, "token_create")) { 580 err = -EINVAL; 581 goto cleanup; 582 } 583 584 /* while inside non-init userns, we need both a BPF token *and* 585 * CAP_BPF inside current userns to create privileged map; let's test 586 * that neither BPF token alone nor namespaced CAP_BPF is sufficient 587 */ 588 err = drop_priv_caps(&old_caps); 589 if (!ASSERT_OK(err, "drop_caps")) 590 goto cleanup; 591 592 /* setup a trivial BTF data to load to the kernel */ 593 btf = btf__new_empty(); 594 if (!ASSERT_OK_PTR(btf, "empty_btf")) 595 goto cleanup; 596 597 ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type"); 598 599 raw_btf_data = btf__raw_data(btf, &raw_btf_size); 600 if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data")) 601 goto cleanup; 602 603 /* no token + no CAP_BPF -> failure */ 604 btf_opts.btf_flags = 0; 605 btf_opts.token_fd = 0; 606 btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); 607 if (!ASSERT_LT(btf_fd, 0, "no_token_no_cap_should_fail")) 608 goto cleanup; 609 610 /* token + no CAP_BPF -> failure */ 611 btf_opts.btf_flags = BPF_F_TOKEN_FD; 612 btf_opts.token_fd = token_fd; 613 btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); 614 if (!ASSERT_LT(btf_fd, 0, "token_no_cap_should_fail")) 615 goto cleanup; 616 617 /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */ 618 err = restore_priv_caps(old_caps); 619 if (!ASSERT_OK(err, "restore_caps")) 620 goto cleanup; 621 622 /* token + CAP_BPF -> success */ 623 btf_opts.btf_flags = BPF_F_TOKEN_FD; 624 btf_opts.token_fd = token_fd; 625 btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); 626 if (!ASSERT_GT(btf_fd, 0, "token_and_cap_success")) 627 goto cleanup; 628 629 err = 0; 630 cleanup: 631 btf__free(btf); 632 zclose(btf_fd); 633 zclose(token_fd); 634 return err; 635 } 636 637 static int userns_prog_load(int mnt_fd, struct token_lsm *lsm_skel) 638 { 639 LIBBPF_OPTS(bpf_prog_load_opts, prog_opts); 640 int err, token_fd = -1, prog_fd = -1; 641 struct bpf_insn insns[] = { 642 /* bpf_jiffies64() requires CAP_BPF */ 643 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), 644 /* bpf_get_current_task() requires CAP_PERFMON */ 645 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_current_task), 646 /* r0 = 0; exit; */ 647 BPF_MOV64_IMM(BPF_REG_0, 0), 648 BPF_EXIT_INSN(), 649 }; 650 size_t insn_cnt = ARRAY_SIZE(insns); 651 __u64 old_caps = 0; 652 653 /* create BPF token from BPF FS mount */ 654 token_fd = bpf_token_create(mnt_fd, NULL); 655 if (!ASSERT_GT(token_fd, 0, "token_create")) { 656 err = -EINVAL; 657 goto cleanup; 658 } 659 660 /* validate we can successfully load BPF program with token; this 661 * being XDP program (CAP_NET_ADMIN) using bpf_jiffies64() (CAP_BPF) 662 * and bpf_get_current_task() (CAP_PERFMON) helpers validates we have 663 * BPF token wired properly in a bunch of places in the kernel 664 */ 665 prog_opts.prog_flags = BPF_F_TOKEN_FD; 666 prog_opts.token_fd = token_fd; 667 prog_opts.expected_attach_type = BPF_XDP; 668 prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", 669 insns, insn_cnt, &prog_opts); 670 if (!ASSERT_GT(prog_fd, 0, "prog_fd")) { 671 err = -EPERM; 672 goto cleanup; 673 } 674 675 /* no token + caps -> failure */ 676 prog_opts.prog_flags = 0; 677 prog_opts.token_fd = 0; 678 prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", 679 insns, insn_cnt, &prog_opts); 680 if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { 681 err = -EPERM; 682 goto cleanup; 683 } 684 685 err = drop_priv_caps(&old_caps); 686 if (!ASSERT_OK(err, "drop_caps")) 687 goto cleanup; 688 689 /* no caps + token -> failure */ 690 prog_opts.prog_flags = BPF_F_TOKEN_FD; 691 prog_opts.token_fd = token_fd; 692 prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", 693 insns, insn_cnt, &prog_opts); 694 if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { 695 err = -EPERM; 696 goto cleanup; 697 } 698 699 /* no caps + no token -> definitely a failure */ 700 prog_opts.prog_flags = 0; 701 prog_opts.token_fd = 0; 702 prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", 703 insns, insn_cnt, &prog_opts); 704 if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { 705 err = -EPERM; 706 goto cleanup; 707 } 708 709 err = 0; 710 cleanup: 711 zclose(prog_fd); 712 zclose(token_fd); 713 return err; 714 } 715 716 static int userns_obj_priv_map(int mnt_fd, struct token_lsm *lsm_skel) 717 { 718 LIBBPF_OPTS(bpf_object_open_opts, opts); 719 char buf[256]; 720 struct priv_map *skel; 721 int err; 722 723 skel = priv_map__open_and_load(); 724 if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { 725 priv_map__destroy(skel); 726 return -EINVAL; 727 } 728 729 /* use bpf_token_path to provide BPF FS path */ 730 snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); 731 opts.bpf_token_path = buf; 732 skel = priv_map__open_opts(&opts); 733 if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) 734 return -EINVAL; 735 736 err = priv_map__load(skel); 737 priv_map__destroy(skel); 738 if (!ASSERT_OK(err, "obj_token_path_load")) 739 return -EINVAL; 740 741 return 0; 742 } 743 744 static int userns_obj_priv_prog(int mnt_fd, struct token_lsm *lsm_skel) 745 { 746 LIBBPF_OPTS(bpf_object_open_opts, opts); 747 char buf[256]; 748 struct priv_prog *skel; 749 int err; 750 751 skel = priv_prog__open_and_load(); 752 if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { 753 priv_prog__destroy(skel); 754 return -EINVAL; 755 } 756 757 /* use bpf_token_path to provide BPF FS path */ 758 snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); 759 opts.bpf_token_path = buf; 760 skel = priv_prog__open_opts(&opts); 761 if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) 762 return -EINVAL; 763 err = priv_prog__load(skel); 764 priv_prog__destroy(skel); 765 if (!ASSERT_OK(err, "obj_token_path_load")) 766 return -EINVAL; 767 768 /* provide BPF token, but reject bpf_token_capable() with LSM */ 769 lsm_skel->bss->reject_capable = true; 770 lsm_skel->bss->reject_cmd = false; 771 skel = priv_prog__open_opts(&opts); 772 if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cap_open")) 773 return -EINVAL; 774 err = priv_prog__load(skel); 775 priv_prog__destroy(skel); 776 if (!ASSERT_ERR(err, "obj_token_lsm_reject_cap_load")) 777 return -EINVAL; 778 779 /* provide BPF token, but reject bpf_token_cmd() with LSM */ 780 lsm_skel->bss->reject_capable = false; 781 lsm_skel->bss->reject_cmd = true; 782 skel = priv_prog__open_opts(&opts); 783 if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cmd_open")) 784 return -EINVAL; 785 err = priv_prog__load(skel); 786 priv_prog__destroy(skel); 787 if (!ASSERT_ERR(err, "obj_token_lsm_reject_cmd_load")) 788 return -EINVAL; 789 790 return 0; 791 } 792 793 static int userns_obj_priv_freplace_setup(int mnt_fd, struct priv_freplace_prog **fr_skel, 794 struct priv_prog **skel, int *tgt_fd) 795 { 796 LIBBPF_OPTS(bpf_object_open_opts, opts); 797 int err; 798 char buf[256]; 799 800 /* use bpf_token_path to provide BPF FS path */ 801 snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); 802 opts.bpf_token_path = buf; 803 *skel = priv_prog__open_opts(&opts); 804 if (!ASSERT_OK_PTR(*skel, "priv_prog__open_opts")) 805 return -EINVAL; 806 err = priv_prog__load(*skel); 807 if (!ASSERT_OK(err, "priv_prog__load")) 808 return -EINVAL; 809 810 *fr_skel = priv_freplace_prog__open_opts(&opts); 811 if (!ASSERT_OK_PTR(*skel, "priv_freplace_prog__open_opts")) 812 return -EINVAL; 813 814 *tgt_fd = bpf_program__fd((*skel)->progs.xdp_prog1); 815 return 0; 816 } 817 818 /* Verify that freplace works from user namespace, because bpf token is loaded 819 * in bpf_object__prepare 820 */ 821 static int userns_obj_priv_freplace_prog(int mnt_fd, struct token_lsm *lsm_skel) 822 { 823 struct priv_freplace_prog *fr_skel = NULL; 824 struct priv_prog *skel = NULL; 825 int err, tgt_fd; 826 827 err = userns_obj_priv_freplace_setup(mnt_fd, &fr_skel, &skel, &tgt_fd); 828 if (!ASSERT_OK(err, "setup")) 829 goto out; 830 831 err = bpf_object__prepare(fr_skel->obj); 832 if (!ASSERT_OK(err, "freplace__prepare")) 833 goto out; 834 835 err = bpf_program__set_attach_target(fr_skel->progs.new_xdp_prog2, tgt_fd, "xdp_prog1"); 836 if (!ASSERT_OK(err, "set_attach_target")) 837 goto out; 838 839 err = priv_freplace_prog__load(fr_skel); 840 ASSERT_OK(err, "priv_freplace_prog__load"); 841 842 out: 843 priv_freplace_prog__destroy(fr_skel); 844 priv_prog__destroy(skel); 845 return err; 846 } 847 848 /* Verify that replace fails to set attach target from user namespace without bpf token */ 849 static int userns_obj_priv_freplace_prog_fail(int mnt_fd, struct token_lsm *lsm_skel) 850 { 851 struct priv_freplace_prog *fr_skel = NULL; 852 struct priv_prog *skel = NULL; 853 int err, tgt_fd; 854 855 err = userns_obj_priv_freplace_setup(mnt_fd, &fr_skel, &skel, &tgt_fd); 856 if (!ASSERT_OK(err, "setup")) 857 goto out; 858 859 err = bpf_program__set_attach_target(fr_skel->progs.new_xdp_prog2, tgt_fd, "xdp_prog1"); 860 if (ASSERT_ERR(err, "attach fails")) 861 err = 0; 862 else 863 err = -EINVAL; 864 865 out: 866 priv_freplace_prog__destroy(fr_skel); 867 priv_prog__destroy(skel); 868 return err; 869 } 870 871 /* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command, 872 * which should cause struct_ops application to fail, as BTF won't be uploaded 873 * into the kernel, even if STRUCT_OPS programs themselves are allowed 874 */ 875 static int validate_struct_ops_load(int mnt_fd, bool expect_success) 876 { 877 LIBBPF_OPTS(bpf_object_open_opts, opts); 878 char buf[256]; 879 struct dummy_st_ops_success *skel; 880 int err; 881 882 snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); 883 opts.bpf_token_path = buf; 884 skel = dummy_st_ops_success__open_opts(&opts); 885 if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) 886 return -EINVAL; 887 888 err = dummy_st_ops_success__load(skel); 889 dummy_st_ops_success__destroy(skel); 890 if (expect_success) { 891 if (!ASSERT_OK(err, "obj_token_path_load")) 892 return -EINVAL; 893 } else /* expect failure */ { 894 if (!ASSERT_ERR(err, "obj_token_path_load")) 895 return -EINVAL; 896 } 897 898 return 0; 899 } 900 901 static int userns_obj_priv_btf_fail(int mnt_fd, struct token_lsm *lsm_skel) 902 { 903 return validate_struct_ops_load(mnt_fd, false /* should fail */); 904 } 905 906 static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel) 907 { 908 return validate_struct_ops_load(mnt_fd, true /* should succeed */); 909 } 910 911 static const char *token_bpffs_custom_dir() 912 { 913 return getenv("BPF_SELFTESTS_BPF_TOKEN_DIR") ?: "/tmp/bpf-token-fs"; 914 } 915 916 #define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH" 917 918 static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel) 919 { 920 LIBBPF_OPTS(bpf_object_open_opts, opts); 921 struct dummy_st_ops_success *skel; 922 int err; 923 924 /* before we mount BPF FS with token delegation, struct_ops skeleton 925 * should fail to load 926 */ 927 skel = dummy_st_ops_success__open_and_load(); 928 if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { 929 dummy_st_ops_success__destroy(skel); 930 return -EINVAL; 931 } 932 933 /* mount custom BPF FS over /sys/fs/bpf so that libbpf can create BPF 934 * token automatically and implicitly 935 */ 936 err = sys_move_mount(mnt_fd, "", AT_FDCWD, "/sys/fs/bpf", MOVE_MOUNT_F_EMPTY_PATH); 937 if (!ASSERT_OK(err, "move_mount_bpffs")) 938 return -EINVAL; 939 940 /* disable implicit BPF token creation by setting 941 * LIBBPF_BPF_TOKEN_PATH envvar to empty value, load should fail 942 */ 943 err = setenv(TOKEN_ENVVAR, "", 1 /*overwrite*/); 944 if (!ASSERT_OK(err, "setenv_token_path")) 945 return -EINVAL; 946 skel = dummy_st_ops_success__open_and_load(); 947 if (!ASSERT_ERR_PTR(skel, "obj_token_envvar_disabled_load")) { 948 unsetenv(TOKEN_ENVVAR); 949 dummy_st_ops_success__destroy(skel); 950 return -EINVAL; 951 } 952 unsetenv(TOKEN_ENVVAR); 953 954 /* now the same struct_ops skeleton should succeed thanks to libbpf 955 * creating BPF token from /sys/fs/bpf mount point 956 */ 957 skel = dummy_st_ops_success__open_and_load(); 958 if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load")) 959 return -EINVAL; 960 961 dummy_st_ops_success__destroy(skel); 962 963 /* now disable implicit token through empty bpf_token_path, should fail */ 964 opts.bpf_token_path = ""; 965 skel = dummy_st_ops_success__open_opts(&opts); 966 if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open")) 967 return -EINVAL; 968 969 err = dummy_st_ops_success__load(skel); 970 dummy_st_ops_success__destroy(skel); 971 if (!ASSERT_ERR(err, "obj_empty_token_path_load")) 972 return -EINVAL; 973 974 return 0; 975 } 976 977 static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel) 978 { 979 const char *custom_dir = token_bpffs_custom_dir(); 980 LIBBPF_OPTS(bpf_object_open_opts, opts); 981 struct dummy_st_ops_success *skel; 982 int err; 983 984 /* before we mount BPF FS with token delegation, struct_ops skeleton 985 * should fail to load 986 */ 987 skel = dummy_st_ops_success__open_and_load(); 988 if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { 989 dummy_st_ops_success__destroy(skel); 990 return -EINVAL; 991 } 992 993 /* mount custom BPF FS over custom location, so libbpf can't create 994 * BPF token implicitly, unless pointed to it through 995 * LIBBPF_BPF_TOKEN_PATH envvar 996 */ 997 rmdir(custom_dir); 998 if (!ASSERT_OK(mkdir(custom_dir, 0777), "mkdir_bpffs_custom")) 999 goto err_out; 1000 err = sys_move_mount(mnt_fd, "", AT_FDCWD, custom_dir, MOVE_MOUNT_F_EMPTY_PATH); 1001 if (!ASSERT_OK(err, "move_mount_bpffs")) 1002 goto err_out; 1003 1004 /* even though we have BPF FS with delegation, it's not at default 1005 * /sys/fs/bpf location, so we still fail to load until envvar is set up 1006 */ 1007 skel = dummy_st_ops_success__open_and_load(); 1008 if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load2")) { 1009 dummy_st_ops_success__destroy(skel); 1010 goto err_out; 1011 } 1012 1013 err = setenv(TOKEN_ENVVAR, custom_dir, 1 /*overwrite*/); 1014 if (!ASSERT_OK(err, "setenv_token_path")) 1015 goto err_out; 1016 1017 /* now the same struct_ops skeleton should succeed thanks to libbpf 1018 * creating BPF token from custom mount point 1019 */ 1020 skel = dummy_st_ops_success__open_and_load(); 1021 if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load")) 1022 goto err_out; 1023 1024 dummy_st_ops_success__destroy(skel); 1025 1026 /* now disable implicit token through empty bpf_token_path, envvar 1027 * will be ignored, should fail 1028 */ 1029 opts.bpf_token_path = ""; 1030 skel = dummy_st_ops_success__open_opts(&opts); 1031 if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open")) 1032 goto err_out; 1033 1034 err = dummy_st_ops_success__load(skel); 1035 dummy_st_ops_success__destroy(skel); 1036 if (!ASSERT_ERR(err, "obj_empty_token_path_load")) 1037 goto err_out; 1038 1039 rmdir(custom_dir); 1040 unsetenv(TOKEN_ENVVAR); 1041 return 0; 1042 err_out: 1043 rmdir(custom_dir); 1044 unsetenv(TOKEN_ENVVAR); 1045 return -EINVAL; 1046 } 1047 1048 #define bit(n) (1ULL << (n)) 1049 1050 static int userns_bpf_token_info(int mnt_fd, struct token_lsm *lsm_skel) 1051 { 1052 int err, token_fd = -1; 1053 struct bpf_token_info info; 1054 u32 len = sizeof(struct bpf_token_info); 1055 1056 /* create BPF token from BPF FS mount */ 1057 token_fd = bpf_token_create(mnt_fd, NULL); 1058 if (!ASSERT_GT(token_fd, 0, "token_create")) { 1059 err = -EINVAL; 1060 goto cleanup; 1061 } 1062 1063 memset(&info, 0, len); 1064 err = bpf_obj_get_info_by_fd(token_fd, &info, &len); 1065 if (!ASSERT_ERR(err, "bpf_obj_get_token_info")) 1066 goto cleanup; 1067 if (!ASSERT_EQ(info.allowed_cmds, bit(BPF_MAP_CREATE), "token_info_cmds_map_create")) { 1068 err = -EINVAL; 1069 goto cleanup; 1070 } 1071 if (!ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_XDP), "token_info_progs_xdp")) { 1072 err = -EINVAL; 1073 goto cleanup; 1074 } 1075 1076 /* The BPF_PROG_TYPE_EXT is not set in token */ 1077 if (ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_EXT), "token_info_progs_ext")) 1078 err = -EINVAL; 1079 1080 cleanup: 1081 zclose(token_fd); 1082 return err; 1083 } 1084 1085 void test_token(void) 1086 { 1087 if (test__start_subtest("map_token")) { 1088 struct bpffs_opts opts = { 1089 .cmds_str = "map_create", 1090 .maps_str = "stack", 1091 }; 1092 1093 subtest_userns(&opts, userns_map_create); 1094 } 1095 if (test__start_subtest("btf_token")) { 1096 struct bpffs_opts opts = { 1097 .cmds = 1ULL << BPF_BTF_LOAD, 1098 }; 1099 1100 subtest_userns(&opts, userns_btf_load); 1101 } 1102 if (test__start_subtest("prog_token")) { 1103 struct bpffs_opts opts = { 1104 .cmds_str = "PROG_LOAD", 1105 .progs_str = "XDP", 1106 .attachs_str = "xdp", 1107 }; 1108 1109 subtest_userns(&opts, userns_prog_load); 1110 } 1111 if (test__start_subtest("obj_priv_map")) { 1112 struct bpffs_opts opts = { 1113 .cmds = bit(BPF_MAP_CREATE), 1114 .maps = bit(BPF_MAP_TYPE_QUEUE), 1115 }; 1116 1117 subtest_userns(&opts, userns_obj_priv_map); 1118 } 1119 if (test__start_subtest("obj_priv_prog")) { 1120 struct bpffs_opts opts = { 1121 .cmds = bit(BPF_PROG_LOAD), 1122 .progs = bit(BPF_PROG_TYPE_XDP), 1123 .attachs = ~0ULL, 1124 }; 1125 1126 subtest_userns(&opts, userns_obj_priv_prog); 1127 } 1128 if (test__start_subtest("obj_priv_freplace_prog")) { 1129 struct bpffs_opts opts = { 1130 .cmds = bit(BPF_BTF_LOAD) | bit(BPF_PROG_LOAD) | bit(BPF_BTF_GET_FD_BY_ID), 1131 .progs = bit(BPF_PROG_TYPE_EXT) | bit(BPF_PROG_TYPE_XDP), 1132 .attachs = ~0ULL, 1133 }; 1134 subtest_userns(&opts, userns_obj_priv_freplace_prog); 1135 } 1136 if (test__start_subtest("obj_priv_freplace_prog_fail")) { 1137 struct bpffs_opts opts = { 1138 .cmds = bit(BPF_BTF_LOAD) | bit(BPF_PROG_LOAD) | bit(BPF_BTF_GET_FD_BY_ID), 1139 .progs = bit(BPF_PROG_TYPE_EXT) | bit(BPF_PROG_TYPE_XDP), 1140 .attachs = ~0ULL, 1141 }; 1142 subtest_userns(&opts, userns_obj_priv_freplace_prog_fail); 1143 } 1144 if (test__start_subtest("obj_priv_btf_fail")) { 1145 struct bpffs_opts opts = { 1146 /* disallow BTF loading */ 1147 .cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), 1148 .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), 1149 .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), 1150 .attachs = ~0ULL, 1151 }; 1152 1153 subtest_userns(&opts, userns_obj_priv_btf_fail); 1154 } 1155 if (test__start_subtest("obj_priv_btf_success")) { 1156 struct bpffs_opts opts = { 1157 /* allow BTF loading */ 1158 .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), 1159 .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), 1160 .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), 1161 .attachs = ~0ULL, 1162 }; 1163 1164 subtest_userns(&opts, userns_obj_priv_btf_success); 1165 } 1166 if (test__start_subtest("obj_priv_implicit_token")) { 1167 struct bpffs_opts opts = { 1168 /* allow BTF loading */ 1169 .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), 1170 .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), 1171 .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), 1172 .attachs = ~0ULL, 1173 }; 1174 1175 subtest_userns(&opts, userns_obj_priv_implicit_token); 1176 } 1177 if (test__start_subtest("obj_priv_implicit_token_envvar")) { 1178 struct bpffs_opts opts = { 1179 /* allow BTF loading */ 1180 .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), 1181 .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), 1182 .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), 1183 .attachs = ~0ULL, 1184 }; 1185 1186 subtest_userns(&opts, userns_obj_priv_implicit_token_envvar); 1187 } 1188 if (test__start_subtest("bpf_token_info")) { 1189 struct bpffs_opts opts = { 1190 .cmds = bit(BPF_MAP_CREATE), 1191 .progs = bit(BPF_PROG_TYPE_XDP), 1192 .attachs = ~0ULL, 1193 }; 1194 1195 subtest_userns(&opts, userns_bpf_token_info); 1196 } 1197 } 1198