xref: /linux/tools/testing/selftests/bpf/prog_tests/token.c (revision 969fb456ffb43d87894a295dbe6a0a722691552a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 #define _GNU_SOURCE
4 #include <bpf/btf.h>
5 #include <fcntl.h>
6 #include <sched.h>
7 #include <signal.h>
8 #include <unistd.h>
9 #include <linux/filter.h>
10 #include <linux/unistd.h>
11 #include <linux/mount.h>
12 #include <sys/socket.h>
13 #include <sys/stat.h>
14 #include <sys/syscall.h>
15 #include <sys/un.h>
16 
17 #include "bpf_util.h"
18 #include "cap_helpers.h"
19 #include "sysctl_helpers.h"
20 #include "test_progs.h"
21 #include "trace_helpers.h"
22 
23 #include "priv_map.skel.h"
24 #include "priv_prog.skel.h"
25 #include "dummy_st_ops_success.skel.h"
26 #include "token_kallsyms.skel.h"
27 #include "token_lsm.skel.h"
28 #include "priv_freplace_prog.skel.h"
29 
30 static inline int sys_mount(const char *dev_name, const char *dir_name,
31 			    const char *type, unsigned long flags,
32 			    const void *data)
33 {
34 	return syscall(__NR_mount, dev_name, dir_name, type, flags, data);
35 }
36 
37 static inline int sys_fsopen(const char *fsname, unsigned flags)
38 {
39 	return syscall(__NR_fsopen, fsname, flags);
40 }
41 
42 static inline int sys_fspick(int dfd, const char *path, unsigned flags)
43 {
44 	return syscall(__NR_fspick, dfd, path, flags);
45 }
46 
47 static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux)
48 {
49 	return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux);
50 }
51 
52 static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags)
53 {
54 	return syscall(__NR_fsmount, fs_fd, flags, ms_flags);
55 }
56 
57 static inline int sys_move_mount(int from_dfd, const char *from_path,
58 				 int to_dfd, const char *to_path,
59 				 unsigned flags)
60 {
61 	return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, flags);
62 }
63 
64 static int drop_priv_caps(__u64 *old_caps)
65 {
66 	return cap_disable_effective((1ULL << CAP_BPF) |
67 				     (1ULL << CAP_PERFMON) |
68 				     (1ULL << CAP_NET_ADMIN) |
69 				     (1ULL << CAP_SYS_ADMIN), old_caps);
70 }
71 
72 static int restore_priv_caps(__u64 old_caps)
73 {
74 	return cap_enable_effective(old_caps, NULL);
75 }
76 
77 static int set_delegate_mask(int fs_fd, const char *key, __u64 mask, const char *mask_str)
78 {
79 	char buf[32];
80 	int err;
81 
82 	if (!mask_str) {
83 		if (mask == ~0ULL) {
84 			mask_str = "any";
85 		} else {
86 			snprintf(buf, sizeof(buf), "0x%llx", (unsigned long long)mask);
87 			mask_str = buf;
88 		}
89 	}
90 
91 	err = sys_fsconfig(fs_fd, FSCONFIG_SET_STRING, key,
92 			   mask_str, 0);
93 	if (err < 0)
94 		err = -errno;
95 	return err;
96 }
97 
98 #define zclose(fd) do { if (fd >= 0) close(fd); fd = -1; } while (0)
99 
100 struct bpffs_opts {
101 	__u64 cmds;
102 	__u64 maps;
103 	__u64 progs;
104 	__u64 attachs;
105 	const char *cmds_str;
106 	const char *maps_str;
107 	const char *progs_str;
108 	const char *attachs_str;
109 };
110 
111 static int create_bpffs_fd(void)
112 {
113 	int fs_fd;
114 
115 	/* create VFS context */
116 	fs_fd = sys_fsopen("bpf", 0);
117 	ASSERT_GE(fs_fd, 0, "fs_fd");
118 
119 	return fs_fd;
120 }
121 
122 static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts)
123 {
124 	int err;
125 
126 	/* set up token delegation mount options */
127 	err = set_delegate_mask(fs_fd, "delegate_cmds", opts->cmds, opts->cmds_str);
128 	if (!ASSERT_OK(err, "fs_cfg_cmds"))
129 		return err;
130 	err = set_delegate_mask(fs_fd, "delegate_maps", opts->maps, opts->maps_str);
131 	if (!ASSERT_OK(err, "fs_cfg_maps"))
132 		return err;
133 	err = set_delegate_mask(fs_fd, "delegate_progs", opts->progs, opts->progs_str);
134 	if (!ASSERT_OK(err, "fs_cfg_progs"))
135 		return err;
136 	err = set_delegate_mask(fs_fd, "delegate_attachs", opts->attachs, opts->attachs_str);
137 	if (!ASSERT_OK(err, "fs_cfg_attachs"))
138 		return err;
139 
140 	/* instantiate FS object */
141 	err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0);
142 	if (err < 0)
143 		return -errno;
144 
145 	return 0;
146 }
147 
148 /* send FD over Unix domain (AF_UNIX) socket */
149 static int sendfd(int sockfd, int fd)
150 {
151 	struct msghdr msg = {};
152 	struct cmsghdr *cmsg;
153 	int fds[1] = { fd }, err;
154 	char iobuf[1];
155 	struct iovec io = {
156 		.iov_base = iobuf,
157 		.iov_len = sizeof(iobuf),
158 	};
159 	union {
160 		char buf[CMSG_SPACE(sizeof(fds))];
161 		struct cmsghdr align;
162 	} u;
163 
164 	msg.msg_iov = &io;
165 	msg.msg_iovlen = 1;
166 	msg.msg_control = u.buf;
167 	msg.msg_controllen = sizeof(u.buf);
168 	cmsg = CMSG_FIRSTHDR(&msg);
169 	cmsg->cmsg_level = SOL_SOCKET;
170 	cmsg->cmsg_type = SCM_RIGHTS;
171 	cmsg->cmsg_len = CMSG_LEN(sizeof(fds));
172 	memcpy(CMSG_DATA(cmsg), fds, sizeof(fds));
173 
174 	err = sendmsg(sockfd, &msg, 0);
175 	if (err < 0)
176 		err = -errno;
177 	if (!ASSERT_EQ(err, 1, "sendmsg"))
178 		return -EINVAL;
179 
180 	return 0;
181 }
182 
183 /* receive FD over Unix domain (AF_UNIX) socket */
184 static int recvfd(int sockfd, int *fd)
185 {
186 	struct msghdr msg = {};
187 	struct cmsghdr *cmsg;
188 	int fds[1], err;
189 	char iobuf[1];
190 	struct iovec io = {
191 		.iov_base = iobuf,
192 		.iov_len = sizeof(iobuf),
193 	};
194 	union {
195 		char buf[CMSG_SPACE(sizeof(fds))];
196 		struct cmsghdr align;
197 	} u;
198 
199 	msg.msg_iov = &io;
200 	msg.msg_iovlen = 1;
201 	msg.msg_control = u.buf;
202 	msg.msg_controllen = sizeof(u.buf);
203 
204 	err = recvmsg(sockfd, &msg, 0);
205 	if (err < 0)
206 		err = -errno;
207 	if (!ASSERT_EQ(err, 1, "recvmsg"))
208 		return -EINVAL;
209 
210 	cmsg = CMSG_FIRSTHDR(&msg);
211 	if (!ASSERT_OK_PTR(cmsg, "cmsg_null") ||
212 	    !ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(fds)), "cmsg_len") ||
213 	    !ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET, "cmsg_level") ||
214 	    !ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS, "cmsg_type"))
215 		return -EINVAL;
216 
217 	memcpy(fds, CMSG_DATA(cmsg), sizeof(fds));
218 	*fd = fds[0];
219 
220 	return 0;
221 }
222 
223 static ssize_t write_nointr(int fd, const void *buf, size_t count)
224 {
225 	ssize_t ret;
226 
227 	do {
228 		ret = write(fd, buf, count);
229 	} while (ret < 0 && errno == EINTR);
230 
231 	return ret;
232 }
233 
234 static int write_file(const char *path, const void *buf, size_t count)
235 {
236 	int fd;
237 	ssize_t ret;
238 
239 	fd = open(path, O_WRONLY | O_CLOEXEC | O_NOCTTY | O_NOFOLLOW);
240 	if (fd < 0)
241 		return -1;
242 
243 	ret = write_nointr(fd, buf, count);
244 	close(fd);
245 	if (ret < 0 || (size_t)ret != count)
246 		return -1;
247 
248 	return 0;
249 }
250 
251 static int create_and_enter_userns(void)
252 {
253 	uid_t uid;
254 	gid_t gid;
255 	char map[100];
256 
257 	uid = getuid();
258 	gid = getgid();
259 
260 	if (unshare(CLONE_NEWUSER))
261 		return -1;
262 
263 	if (write_file("/proc/self/setgroups", "deny", sizeof("deny") - 1) &&
264 	    errno != ENOENT)
265 		return -1;
266 
267 	snprintf(map, sizeof(map), "0 %d 1", uid);
268 	if (write_file("/proc/self/uid_map", map, strlen(map)))
269 		return -1;
270 
271 
272 	snprintf(map, sizeof(map), "0 %d 1", gid);
273 	if (write_file("/proc/self/gid_map", map, strlen(map)))
274 		return -1;
275 
276 	if (setgid(0))
277 		return -1;
278 
279 	if (setuid(0))
280 		return -1;
281 
282 	return 0;
283 }
284 
285 typedef int (*child_callback_fn)(int bpffs_fd, struct token_lsm *lsm_skel);
286 
287 static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callback)
288 {
289 	int mnt_fd = -1, fs_fd = -1, err = 0, bpffs_fd = -1, token_fd = -1;
290 	struct token_lsm *lsm_skel = NULL;
291 	char one;
292 
293 	/* load and attach LSM "policy" before we go into unpriv userns */
294 	lsm_skel = token_lsm__open_and_load();
295 	if (!ASSERT_OK_PTR(lsm_skel, "lsm_skel_load")) {
296 		err = -EINVAL;
297 		goto cleanup;
298 	}
299 	lsm_skel->bss->my_pid = getpid();
300 	err = token_lsm__attach(lsm_skel);
301 	if (!ASSERT_OK(err, "lsm_skel_attach"))
302 		goto cleanup;
303 
304 	/* setup userns with root mappings */
305 	err = create_and_enter_userns();
306 	if (!ASSERT_OK(err, "create_and_enter_userns"))
307 		goto cleanup;
308 
309 	/* setup mountns to allow creating BPF FS (fsopen("bpf")) from unpriv process */
310 	err = unshare(CLONE_NEWNS);
311 	if (!ASSERT_OK(err, "create_mountns"))
312 		goto cleanup;
313 
314 	err = sys_mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
315 	if (!ASSERT_OK(err, "remount_root"))
316 		goto cleanup;
317 
318 	fs_fd = create_bpffs_fd();
319 	if (!ASSERT_GE(fs_fd, 0, "create_bpffs_fd")) {
320 		err = -EINVAL;
321 		goto cleanup;
322 	}
323 
324 	/* ensure unprivileged child cannot set delegation options */
325 	err = set_delegate_mask(fs_fd, "delegate_cmds", 0x1, NULL);
326 	ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm");
327 	err = set_delegate_mask(fs_fd, "delegate_maps", 0x1, NULL);
328 	ASSERT_EQ(err, -EPERM, "delegate_maps_eperm");
329 	err = set_delegate_mask(fs_fd, "delegate_progs", 0x1, NULL);
330 	ASSERT_EQ(err, -EPERM, "delegate_progs_eperm");
331 	err = set_delegate_mask(fs_fd, "delegate_attachs", 0x1, NULL);
332 	ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm");
333 
334 	/* pass BPF FS context object to parent */
335 	err = sendfd(sock_fd, fs_fd);
336 	if (!ASSERT_OK(err, "send_fs_fd"))
337 		goto cleanup;
338 
339 	/* wait that the parent reads the fd, does the fsconfig() calls
340 	 * and send us a signal that it is done
341 	 */
342 	err = read(sock_fd, &one, sizeof(one));
343 	if (!ASSERT_GE(err, 0, "read_one"))
344 		goto cleanup;
345 
346 	/* avoid mucking around with mount namespaces and mounting at
347 	 * well-known path, just create O_PATH fd for detached mount
348 	 */
349 	mnt_fd = sys_fsmount(fs_fd, 0, 0);
350 	if (!ASSERT_OK_FD(mnt_fd, "mnt_fd"))
351 		goto cleanup;
352 
353 	/* try to fspick() BPF FS and try to add some delegation options */
354 	fs_fd = sys_fspick(mnt_fd, "", FSPICK_EMPTY_PATH);
355 	if (!ASSERT_GE(fs_fd, 0, "bpffs_fspick")) {
356 		err = -EINVAL;
357 		goto cleanup;
358 	}
359 
360 	/* ensure unprivileged child cannot reconfigure to set delegation options */
361 	err = set_delegate_mask(fs_fd, "delegate_cmds", 0, "any");
362 	if (!ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm_reconfig")) {
363 		err = -EINVAL;
364 		goto cleanup;
365 	}
366 	err = set_delegate_mask(fs_fd, "delegate_maps", 0, "any");
367 	if (!ASSERT_EQ(err, -EPERM, "delegate_maps_eperm_reconfig")) {
368 		err = -EINVAL;
369 		goto cleanup;
370 	}
371 	err = set_delegate_mask(fs_fd, "delegate_progs", 0, "any");
372 	if (!ASSERT_EQ(err, -EPERM, "delegate_progs_eperm_reconfig")) {
373 		err = -EINVAL;
374 		goto cleanup;
375 	}
376 	err = set_delegate_mask(fs_fd, "delegate_attachs", 0, "any");
377 	if (!ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm_reconfig")) {
378 		err = -EINVAL;
379 		goto cleanup;
380 	}
381 	zclose(fs_fd);
382 
383 	bpffs_fd = openat(mnt_fd, ".", 0, O_RDWR);
384 	if (!ASSERT_GE(bpffs_fd, 0, "bpffs_open")) {
385 		err = -EINVAL;
386 		goto cleanup;
387 	}
388 
389 	/* create BPF token FD and pass it to parent for some extra checks */
390 	token_fd = bpf_token_create(bpffs_fd, NULL);
391 	if (!ASSERT_GT(token_fd, 0, "child_token_create")) {
392 		err = -EINVAL;
393 		goto cleanup;
394 	}
395 	err = sendfd(sock_fd, token_fd);
396 	if (!ASSERT_OK(err, "send_token_fd"))
397 		goto cleanup;
398 	zclose(token_fd);
399 
400 	/* do custom test logic with customly set up BPF FS instance */
401 	err = callback(bpffs_fd, lsm_skel);
402 	if (!ASSERT_OK(err, "test_callback"))
403 		goto cleanup;
404 
405 	err = 0;
406 cleanup:
407 	zclose(sock_fd);
408 	zclose(mnt_fd);
409 	zclose(fs_fd);
410 	zclose(bpffs_fd);
411 	zclose(token_fd);
412 
413 	lsm_skel->bss->my_pid = 0;
414 	token_lsm__destroy(lsm_skel);
415 
416 	exit(-err);
417 }
418 
419 static int wait_for_pid(pid_t pid)
420 {
421 	int status, ret;
422 
423 again:
424 	ret = waitpid(pid, &status, 0);
425 	if (ret == -1) {
426 		if (errno == EINTR)
427 			goto again;
428 
429 		return -1;
430 	}
431 
432 	if (!WIFEXITED(status))
433 		return -1;
434 
435 	return WEXITSTATUS(status);
436 }
437 
438 static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd)
439 {
440 	int fs_fd = -1, token_fd = -1, err;
441 	char one = 1;
442 
443 	err = recvfd(sock_fd, &fs_fd);
444 	if (!ASSERT_OK(err, "recv_bpffs_fd"))
445 		goto cleanup;
446 
447 	err = materialize_bpffs_fd(fs_fd, bpffs_opts);
448 	if (!ASSERT_GE(err, 0, "materialize_bpffs_fd")) {
449 		err = -EINVAL;
450 		goto cleanup;
451 	}
452 
453 	/* notify the child that we did the fsconfig() calls and it can proceed. */
454 	err = write(sock_fd, &one, sizeof(one));
455 	if (!ASSERT_EQ(err, sizeof(one), "send_one"))
456 		goto cleanup;
457 	zclose(fs_fd);
458 
459 	/* receive BPF token FD back from child for some extra tests */
460 	err = recvfd(sock_fd, &token_fd);
461 	if (!ASSERT_OK(err, "recv_token_fd"))
462 		goto cleanup;
463 
464 	err = wait_for_pid(child_pid);
465 	ASSERT_OK(err, "waitpid_child");
466 
467 cleanup:
468 	zclose(sock_fd);
469 	zclose(fs_fd);
470 	zclose(token_fd);
471 
472 	if (child_pid > 0)
473 		(void)kill(child_pid, SIGKILL);
474 }
475 
476 static void subtest_userns(struct bpffs_opts *bpffs_opts,
477 			   child_callback_fn child_cb)
478 {
479 	int sock_fds[2] = { -1, -1 };
480 	int child_pid = 0, err;
481 
482 	err = socketpair(AF_UNIX, SOCK_STREAM, 0, sock_fds);
483 	if (!ASSERT_OK(err, "socketpair"))
484 		goto cleanup;
485 
486 	child_pid = fork();
487 	if (!ASSERT_GE(child_pid, 0, "fork"))
488 		goto cleanup;
489 
490 	if (child_pid == 0) {
491 		zclose(sock_fds[0]);
492 		return child(sock_fds[1], bpffs_opts, child_cb);
493 
494 	} else {
495 		zclose(sock_fds[1]);
496 		return parent(child_pid, bpffs_opts, sock_fds[0]);
497 	}
498 
499 cleanup:
500 	zclose(sock_fds[0]);
501 	zclose(sock_fds[1]);
502 	if (child_pid > 0)
503 		(void)kill(child_pid, SIGKILL);
504 }
505 
506 static int userns_map_create(int mnt_fd, struct token_lsm *lsm_skel)
507 {
508 	LIBBPF_OPTS(bpf_map_create_opts, map_opts);
509 	int err, token_fd = -1, map_fd = -1;
510 	__u64 old_caps = 0;
511 
512 	/* create BPF token from BPF FS mount */
513 	token_fd = bpf_token_create(mnt_fd, NULL);
514 	if (!ASSERT_GT(token_fd, 0, "token_create")) {
515 		err = -EINVAL;
516 		goto cleanup;
517 	}
518 
519 	/* while inside non-init userns, we need both a BPF token *and*
520 	 * CAP_BPF inside current userns to create privileged map; let's test
521 	 * that neither BPF token alone nor namespaced CAP_BPF is sufficient
522 	 */
523 	err = drop_priv_caps(&old_caps);
524 	if (!ASSERT_OK(err, "drop_caps"))
525 		goto cleanup;
526 
527 	/* no token, no CAP_BPF -> fail */
528 	map_opts.map_flags = 0;
529 	map_opts.token_fd = 0;
530 	map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_wo_bpf", 0, 8, 1, &map_opts);
531 	if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_wo_cap_bpf_should_fail")) {
532 		err = -EINVAL;
533 		goto cleanup;
534 	}
535 
536 	/* token without CAP_BPF -> fail */
537 	map_opts.map_flags = BPF_F_TOKEN_FD;
538 	map_opts.token_fd = token_fd;
539 	map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_wo_bpf", 0, 8, 1, &map_opts);
540 	if (!ASSERT_LT(map_fd, 0, "stack_map_w_token_wo_cap_bpf_should_fail")) {
541 		err = -EINVAL;
542 		goto cleanup;
543 	}
544 
545 	/* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */
546 	err = restore_priv_caps(old_caps);
547 	if (!ASSERT_OK(err, "restore_caps"))
548 		goto cleanup;
549 
550 	/* CAP_BPF without token -> fail */
551 	map_opts.map_flags = 0;
552 	map_opts.token_fd = 0;
553 	map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_w_bpf", 0, 8, 1, &map_opts);
554 	if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_w_cap_bpf_should_fail")) {
555 		err = -EINVAL;
556 		goto cleanup;
557 	}
558 
559 	/* finally, namespaced CAP_BPF + token -> success */
560 	map_opts.map_flags = BPF_F_TOKEN_FD;
561 	map_opts.token_fd = token_fd;
562 	map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_w_bpf", 0, 8, 1, &map_opts);
563 	if (!ASSERT_GT(map_fd, 0, "stack_map_w_token_w_cap_bpf")) {
564 		err = -EINVAL;
565 		goto cleanup;
566 	}
567 
568 cleanup:
569 	zclose(token_fd);
570 	zclose(map_fd);
571 	return err;
572 }
573 
574 static int userns_btf_load(int mnt_fd, struct token_lsm *lsm_skel)
575 {
576 	LIBBPF_OPTS(bpf_btf_load_opts, btf_opts);
577 	int err, token_fd = -1, btf_fd = -1;
578 	const void *raw_btf_data;
579 	struct btf *btf = NULL;
580 	__u32 raw_btf_size;
581 	__u64 old_caps = 0;
582 
583 	/* create BPF token from BPF FS mount */
584 	token_fd = bpf_token_create(mnt_fd, NULL);
585 	if (!ASSERT_GT(token_fd, 0, "token_create")) {
586 		err = -EINVAL;
587 		goto cleanup;
588 	}
589 
590 	/* while inside non-init userns, we need both a BPF token *and*
591 	 * CAP_BPF inside current userns to create privileged map; let's test
592 	 * that neither BPF token alone nor namespaced CAP_BPF is sufficient
593 	 */
594 	err = drop_priv_caps(&old_caps);
595 	if (!ASSERT_OK(err, "drop_caps"))
596 		goto cleanup;
597 
598 	/* setup a trivial BTF data to load to the kernel */
599 	btf = btf__new_empty();
600 	if (!ASSERT_OK_PTR(btf, "empty_btf"))
601 		goto cleanup;
602 
603 	ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type");
604 
605 	raw_btf_data = btf__raw_data(btf, &raw_btf_size);
606 	if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data"))
607 		goto cleanup;
608 
609 	/* no token + no CAP_BPF -> failure */
610 	btf_opts.btf_flags = 0;
611 	btf_opts.token_fd = 0;
612 	btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
613 	if (!ASSERT_LT(btf_fd, 0, "no_token_no_cap_should_fail"))
614 		goto cleanup;
615 
616 	/* token + no CAP_BPF -> failure */
617 	btf_opts.btf_flags = BPF_F_TOKEN_FD;
618 	btf_opts.token_fd = token_fd;
619 	btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
620 	if (!ASSERT_LT(btf_fd, 0, "token_no_cap_should_fail"))
621 		goto cleanup;
622 
623 	/* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */
624 	err = restore_priv_caps(old_caps);
625 	if (!ASSERT_OK(err, "restore_caps"))
626 		goto cleanup;
627 
628 	/* token + CAP_BPF -> success */
629 	btf_opts.btf_flags = BPF_F_TOKEN_FD;
630 	btf_opts.token_fd = token_fd;
631 	btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
632 	if (!ASSERT_GT(btf_fd, 0, "token_and_cap_success"))
633 		goto cleanup;
634 
635 	err = 0;
636 cleanup:
637 	btf__free(btf);
638 	zclose(btf_fd);
639 	zclose(token_fd);
640 	return err;
641 }
642 
643 static int userns_prog_load(int mnt_fd, struct token_lsm *lsm_skel)
644 {
645 	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts);
646 	int err, token_fd = -1, prog_fd = -1;
647 	struct bpf_insn insns[] = {
648 		/* bpf_jiffies64() requires CAP_BPF */
649 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
650 		/* bpf_get_current_task() requires CAP_PERFMON */
651 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_current_task),
652 		/* r0 = 0; exit; */
653 		BPF_MOV64_IMM(BPF_REG_0, 0),
654 		BPF_EXIT_INSN(),
655 	};
656 	size_t insn_cnt = ARRAY_SIZE(insns);
657 	__u64 old_caps = 0;
658 
659 	/* create BPF token from BPF FS mount */
660 	token_fd = bpf_token_create(mnt_fd, NULL);
661 	if (!ASSERT_GT(token_fd, 0, "token_create")) {
662 		err = -EINVAL;
663 		goto cleanup;
664 	}
665 
666 	/* validate we can successfully load BPF program with token; this
667 	 * being XDP program (CAP_NET_ADMIN) using bpf_jiffies64() (CAP_BPF)
668 	 * and bpf_get_current_task() (CAP_PERFMON) helpers validates we have
669 	 * BPF token wired properly in a bunch of places in the kernel
670 	 */
671 	prog_opts.prog_flags = BPF_F_TOKEN_FD;
672 	prog_opts.token_fd = token_fd;
673 	prog_opts.expected_attach_type = BPF_XDP;
674 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
675 				insns, insn_cnt, &prog_opts);
676 	if (!ASSERT_GT(prog_fd, 0, "prog_fd")) {
677 		err = -EPERM;
678 		goto cleanup;
679 	}
680 
681 	/* no token + caps -> failure */
682 	prog_opts.prog_flags = 0;
683 	prog_opts.token_fd = 0;
684 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
685 				insns, insn_cnt, &prog_opts);
686 	if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
687 		err = -EPERM;
688 		goto cleanup;
689 	}
690 
691 	err = drop_priv_caps(&old_caps);
692 	if (!ASSERT_OK(err, "drop_caps"))
693 		goto cleanup;
694 
695 	/* no caps + token -> failure */
696 	prog_opts.prog_flags = BPF_F_TOKEN_FD;
697 	prog_opts.token_fd = token_fd;
698 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
699 				insns, insn_cnt, &prog_opts);
700 	if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
701 		err = -EPERM;
702 		goto cleanup;
703 	}
704 
705 	/* no caps + no token -> definitely a failure */
706 	prog_opts.prog_flags = 0;
707 	prog_opts.token_fd = 0;
708 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
709 				insns, insn_cnt, &prog_opts);
710 	if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
711 		err = -EPERM;
712 		goto cleanup;
713 	}
714 
715 	err = 0;
716 cleanup:
717 	zclose(prog_fd);
718 	zclose(token_fd);
719 	return err;
720 }
721 
722 static int userns_obj_priv_map(int mnt_fd, struct token_lsm *lsm_skel)
723 {
724 	LIBBPF_OPTS(bpf_object_open_opts, opts);
725 	char buf[256];
726 	struct priv_map *skel;
727 	int err;
728 
729 	skel = priv_map__open_and_load();
730 	if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
731 		priv_map__destroy(skel);
732 		return -EINVAL;
733 	}
734 
735 	/* use bpf_token_path to provide BPF FS path */
736 	snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
737 	opts.bpf_token_path = buf;
738 	skel = priv_map__open_opts(&opts);
739 	if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
740 		return -EINVAL;
741 
742 	err = priv_map__load(skel);
743 	priv_map__destroy(skel);
744 	if (!ASSERT_OK(err, "obj_token_path_load"))
745 		return -EINVAL;
746 
747 	return 0;
748 }
749 
750 static int userns_obj_priv_prog(int mnt_fd, struct token_lsm *lsm_skel)
751 {
752 	LIBBPF_OPTS(bpf_object_open_opts, opts);
753 	char buf[256];
754 	struct priv_prog *skel;
755 	int err;
756 
757 	skel = priv_prog__open_and_load();
758 	if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
759 		priv_prog__destroy(skel);
760 		return -EINVAL;
761 	}
762 
763 	/* use bpf_token_path to provide BPF FS path */
764 	snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
765 	opts.bpf_token_path = buf;
766 	skel = priv_prog__open_opts(&opts);
767 	if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
768 		return -EINVAL;
769 	err = priv_prog__load(skel);
770 	priv_prog__destroy(skel);
771 	if (!ASSERT_OK(err, "obj_token_path_load"))
772 		return -EINVAL;
773 
774 	/* provide BPF token, but reject bpf_token_capable() with LSM */
775 	lsm_skel->bss->reject_capable = true;
776 	lsm_skel->bss->reject_cmd = false;
777 	skel = priv_prog__open_opts(&opts);
778 	if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cap_open"))
779 		return -EINVAL;
780 	err = priv_prog__load(skel);
781 	priv_prog__destroy(skel);
782 	if (!ASSERT_ERR(err, "obj_token_lsm_reject_cap_load"))
783 		return -EINVAL;
784 
785 	/* provide BPF token, but reject bpf_token_cmd() with LSM */
786 	lsm_skel->bss->reject_capable = false;
787 	lsm_skel->bss->reject_cmd = true;
788 	skel = priv_prog__open_opts(&opts);
789 	if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cmd_open"))
790 		return -EINVAL;
791 	err = priv_prog__load(skel);
792 	priv_prog__destroy(skel);
793 	if (!ASSERT_ERR(err, "obj_token_lsm_reject_cmd_load"))
794 		return -EINVAL;
795 
796 	return 0;
797 }
798 
799 static int userns_obj_priv_freplace_setup(int mnt_fd, struct priv_freplace_prog **fr_skel,
800 					  struct priv_prog **skel, int *tgt_fd)
801 {
802 	LIBBPF_OPTS(bpf_object_open_opts, opts);
803 	int err;
804 	char buf[256];
805 
806 	/* use bpf_token_path to provide BPF FS path */
807 	snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
808 	opts.bpf_token_path = buf;
809 	*skel = priv_prog__open_opts(&opts);
810 	if (!ASSERT_OK_PTR(*skel, "priv_prog__open_opts"))
811 		return -EINVAL;
812 	err = priv_prog__load(*skel);
813 	if (!ASSERT_OK(err, "priv_prog__load"))
814 		return -EINVAL;
815 
816 	*fr_skel = priv_freplace_prog__open_opts(&opts);
817 	if (!ASSERT_OK_PTR(*skel, "priv_freplace_prog__open_opts"))
818 		return -EINVAL;
819 
820 	*tgt_fd = bpf_program__fd((*skel)->progs.xdp_prog1);
821 	return 0;
822 }
823 
824 /* Verify that freplace works from user namespace, because bpf token is loaded
825  * in bpf_object__prepare
826  */
827 static int userns_obj_priv_freplace_prog(int mnt_fd, struct token_lsm *lsm_skel)
828 {
829 	struct priv_freplace_prog *fr_skel = NULL;
830 	struct priv_prog *skel = NULL;
831 	int err, tgt_fd;
832 
833 	err = userns_obj_priv_freplace_setup(mnt_fd, &fr_skel, &skel, &tgt_fd);
834 	if (!ASSERT_OK(err, "setup"))
835 		goto out;
836 
837 	err = bpf_object__prepare(fr_skel->obj);
838 	if (!ASSERT_OK(err, "freplace__prepare"))
839 		goto out;
840 
841 	err = bpf_program__set_attach_target(fr_skel->progs.new_xdp_prog2, tgt_fd, "xdp_prog1");
842 	if (!ASSERT_OK(err, "set_attach_target"))
843 		goto out;
844 
845 	err = priv_freplace_prog__load(fr_skel);
846 	ASSERT_OK(err, "priv_freplace_prog__load");
847 
848 out:
849 	priv_freplace_prog__destroy(fr_skel);
850 	priv_prog__destroy(skel);
851 	return err;
852 }
853 
854 /* Verify that replace fails to set attach target from user namespace without bpf token */
855 static int userns_obj_priv_freplace_prog_fail(int mnt_fd, struct token_lsm *lsm_skel)
856 {
857 	struct priv_freplace_prog *fr_skel = NULL;
858 	struct priv_prog *skel = NULL;
859 	int err, tgt_fd;
860 
861 	err = userns_obj_priv_freplace_setup(mnt_fd, &fr_skel, &skel, &tgt_fd);
862 	if (!ASSERT_OK(err, "setup"))
863 		goto out;
864 
865 	err = bpf_program__set_attach_target(fr_skel->progs.new_xdp_prog2, tgt_fd, "xdp_prog1");
866 	if (ASSERT_ERR(err, "attach fails"))
867 		err = 0;
868 	else
869 		err = -EINVAL;
870 
871 out:
872 	priv_freplace_prog__destroy(fr_skel);
873 	priv_prog__destroy(skel);
874 	return err;
875 }
876 
877 /* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command,
878  * which should cause struct_ops application to fail, as BTF won't be uploaded
879  * into the kernel, even if STRUCT_OPS programs themselves are allowed
880  */
881 static int validate_struct_ops_load(int mnt_fd, bool expect_success)
882 {
883 	LIBBPF_OPTS(bpf_object_open_opts, opts);
884 	char buf[256];
885 	struct dummy_st_ops_success *skel;
886 	int err;
887 
888 	snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
889 	opts.bpf_token_path = buf;
890 	skel = dummy_st_ops_success__open_opts(&opts);
891 	if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
892 		return -EINVAL;
893 
894 	err = dummy_st_ops_success__load(skel);
895 	dummy_st_ops_success__destroy(skel);
896 	if (expect_success) {
897 		if (!ASSERT_OK(err, "obj_token_path_load"))
898 			return -EINVAL;
899 	} else /* expect failure */ {
900 		if (!ASSERT_ERR(err, "obj_token_path_load"))
901 			return -EINVAL;
902 	}
903 
904 	return 0;
905 }
906 
907 static int userns_obj_priv_btf_fail(int mnt_fd, struct token_lsm *lsm_skel)
908 {
909 	return validate_struct_ops_load(mnt_fd, false /* should fail */);
910 }
911 
912 static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel)
913 {
914 	return validate_struct_ops_load(mnt_fd, true /* should succeed */);
915 }
916 
917 static const char *token_bpffs_custom_dir()
918 {
919 	return getenv("BPF_SELFTESTS_BPF_TOKEN_DIR") ?: "/tmp/bpf-token-fs";
920 }
921 
922 #define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH"
923 
924 static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel)
925 {
926 	LIBBPF_OPTS(bpf_object_open_opts, opts);
927 	struct dummy_st_ops_success *skel;
928 	int err;
929 
930 	/* before we mount BPF FS with token delegation, struct_ops skeleton
931 	 * should fail to load
932 	 */
933 	skel = dummy_st_ops_success__open_and_load();
934 	if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
935 		dummy_st_ops_success__destroy(skel);
936 		return -EINVAL;
937 	}
938 
939 	/* mount custom BPF FS over /sys/fs/bpf so that libbpf can create BPF
940 	 * token automatically and implicitly
941 	 */
942 	err = sys_move_mount(mnt_fd, "", AT_FDCWD, "/sys/fs/bpf", MOVE_MOUNT_F_EMPTY_PATH);
943 	if (!ASSERT_OK(err, "move_mount_bpffs"))
944 		return -EINVAL;
945 
946 	/* disable implicit BPF token creation by setting
947 	 * LIBBPF_BPF_TOKEN_PATH envvar to empty value, load should fail
948 	 */
949 	err = setenv(TOKEN_ENVVAR, "", 1 /*overwrite*/);
950 	if (!ASSERT_OK(err, "setenv_token_path"))
951 		return -EINVAL;
952 	skel = dummy_st_ops_success__open_and_load();
953 	if (!ASSERT_ERR_PTR(skel, "obj_token_envvar_disabled_load")) {
954 		unsetenv(TOKEN_ENVVAR);
955 		dummy_st_ops_success__destroy(skel);
956 		return -EINVAL;
957 	}
958 	unsetenv(TOKEN_ENVVAR);
959 
960 	/* now the same struct_ops skeleton should succeed thanks to libbpf
961 	 * creating BPF token from /sys/fs/bpf mount point
962 	 */
963 	skel = dummy_st_ops_success__open_and_load();
964 	if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load"))
965 		return -EINVAL;
966 
967 	dummy_st_ops_success__destroy(skel);
968 
969 	/* now disable implicit token through empty bpf_token_path, should fail */
970 	opts.bpf_token_path = "";
971 	skel = dummy_st_ops_success__open_opts(&opts);
972 	if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open"))
973 		return -EINVAL;
974 
975 	err = dummy_st_ops_success__load(skel);
976 	dummy_st_ops_success__destroy(skel);
977 	if (!ASSERT_ERR(err, "obj_empty_token_path_load"))
978 		return -EINVAL;
979 
980 	return 0;
981 }
982 
983 static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel)
984 {
985 	const char *custom_dir = token_bpffs_custom_dir();
986 	LIBBPF_OPTS(bpf_object_open_opts, opts);
987 	struct dummy_st_ops_success *skel;
988 	int err;
989 
990 	/* before we mount BPF FS with token delegation, struct_ops skeleton
991 	 * should fail to load
992 	 */
993 	skel = dummy_st_ops_success__open_and_load();
994 	if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
995 		dummy_st_ops_success__destroy(skel);
996 		return -EINVAL;
997 	}
998 
999 	/* mount custom BPF FS over custom location, so libbpf can't create
1000 	 * BPF token implicitly, unless pointed to it through
1001 	 * LIBBPF_BPF_TOKEN_PATH envvar
1002 	 */
1003 	rmdir(custom_dir);
1004 	if (!ASSERT_OK(mkdir(custom_dir, 0777), "mkdir_bpffs_custom"))
1005 		goto err_out;
1006 	err = sys_move_mount(mnt_fd, "", AT_FDCWD, custom_dir, MOVE_MOUNT_F_EMPTY_PATH);
1007 	if (!ASSERT_OK(err, "move_mount_bpffs"))
1008 		goto err_out;
1009 
1010 	/* even though we have BPF FS with delegation, it's not at default
1011 	 * /sys/fs/bpf location, so we still fail to load until envvar is set up
1012 	 */
1013 	skel = dummy_st_ops_success__open_and_load();
1014 	if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load2")) {
1015 		dummy_st_ops_success__destroy(skel);
1016 		goto err_out;
1017 	}
1018 
1019 	err = setenv(TOKEN_ENVVAR, custom_dir, 1 /*overwrite*/);
1020 	if (!ASSERT_OK(err, "setenv_token_path"))
1021 		goto err_out;
1022 
1023 	/* now the same struct_ops skeleton should succeed thanks to libbpf
1024 	 * creating BPF token from custom mount point
1025 	 */
1026 	skel = dummy_st_ops_success__open_and_load();
1027 	if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load"))
1028 		goto err_out;
1029 
1030 	dummy_st_ops_success__destroy(skel);
1031 
1032 	/* now disable implicit token through empty bpf_token_path, envvar
1033 	 * will be ignored, should fail
1034 	 */
1035 	opts.bpf_token_path = "";
1036 	skel = dummy_st_ops_success__open_opts(&opts);
1037 	if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open"))
1038 		goto err_out;
1039 
1040 	err = dummy_st_ops_success__load(skel);
1041 	dummy_st_ops_success__destroy(skel);
1042 	if (!ASSERT_ERR(err, "obj_empty_token_path_load"))
1043 		goto err_out;
1044 
1045 	rmdir(custom_dir);
1046 	unsetenv(TOKEN_ENVVAR);
1047 	return 0;
1048 err_out:
1049 	rmdir(custom_dir);
1050 	unsetenv(TOKEN_ENVVAR);
1051 	return -EINVAL;
1052 }
1053 
1054 static bool kallsyms_has_bpf_func(struct ksyms *ksyms, const char *func_name)
1055 {
1056 	char name[256];
1057 	int i;
1058 
1059 	for (i = 0; i < ksyms->sym_cnt; i++) {
1060 		if (sscanf(ksyms->syms[i].name, "bpf_prog_%*[^_]_%255s", name) == 1 &&
1061 		    strcmp(name, func_name) == 0)
1062 			return true;
1063 	}
1064 	return false;
1065 }
1066 
1067 static int userns_obj_priv_prog_kallsyms(int mnt_fd, struct token_lsm *lsm_skel)
1068 {
1069 	const char *func_names[] = { "xdp_main", "token_ksym_subprog" };
1070 	LIBBPF_OPTS(bpf_object_open_opts, opts);
1071 	struct token_kallsyms *skel;
1072 	struct ksyms *ksyms = NULL;
1073 	char buf[256];
1074 	int i, err;
1075 
1076 	snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
1077 	opts.bpf_token_path = buf;
1078 	skel = token_kallsyms__open_opts(&opts);
1079 	if (!ASSERT_OK_PTR(skel, "token_kallsyms__open_opts"))
1080 		return -EINVAL;
1081 
1082 	err = token_kallsyms__load(skel);
1083 	if (!ASSERT_OK(err, "token_kallsyms__load"))
1084 		goto cleanup;
1085 
1086 	ksyms = load_kallsyms_local();
1087 	if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local")) {
1088 		err = -EINVAL;
1089 		goto cleanup;
1090 	}
1091 
1092 	for (i = 0; i < ARRAY_SIZE(func_names); i++) {
1093 		if (!ASSERT_TRUE(kallsyms_has_bpf_func(ksyms, func_names[i]),
1094 				 func_names[i])) {
1095 			err = -EINVAL;
1096 			break;
1097 		}
1098 	}
1099 
1100 cleanup:
1101 	free_kallsyms_local(ksyms);
1102 	token_kallsyms__destroy(skel);
1103 	return err;
1104 }
1105 
1106 #define bit(n) (1ULL << (n))
1107 
1108 static int userns_bpf_token_info(int mnt_fd, struct token_lsm *lsm_skel)
1109 {
1110 	int err, token_fd = -1;
1111 	struct bpf_token_info info;
1112 	u32 len = sizeof(struct bpf_token_info);
1113 
1114 	/* create BPF token from BPF FS mount */
1115 	token_fd = bpf_token_create(mnt_fd, NULL);
1116 	if (!ASSERT_GT(token_fd, 0, "token_create")) {
1117 		err = -EINVAL;
1118 		goto cleanup;
1119 	}
1120 
1121 	memset(&info, 0, len);
1122 	err = bpf_obj_get_info_by_fd(token_fd, &info, &len);
1123 	if (!ASSERT_ERR(err, "bpf_obj_get_token_info"))
1124 		goto cleanup;
1125 	if (!ASSERT_EQ(info.allowed_cmds, bit(BPF_MAP_CREATE), "token_info_cmds_map_create")) {
1126 		err = -EINVAL;
1127 		goto cleanup;
1128 	}
1129 	if (!ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_XDP), "token_info_progs_xdp")) {
1130 		err = -EINVAL;
1131 		goto cleanup;
1132 	}
1133 
1134 	/* The BPF_PROG_TYPE_EXT is not set in token */
1135 	if (ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_EXT), "token_info_progs_ext"))
1136 		err = -EINVAL;
1137 
1138 cleanup:
1139 	zclose(token_fd);
1140 	return err;
1141 }
1142 
1143 void serial_test_token(void)
1144 {
1145 	if (test__start_subtest("map_token")) {
1146 		struct bpffs_opts opts = {
1147 			.cmds_str = "map_create",
1148 			.maps_str = "stack",
1149 		};
1150 
1151 		subtest_userns(&opts, userns_map_create);
1152 	}
1153 	if (test__start_subtest("btf_token")) {
1154 		struct bpffs_opts opts = {
1155 			.cmds = 1ULL << BPF_BTF_LOAD,
1156 		};
1157 
1158 		subtest_userns(&opts, userns_btf_load);
1159 	}
1160 	if (test__start_subtest("prog_token")) {
1161 		struct bpffs_opts opts = {
1162 			.cmds_str = "PROG_LOAD",
1163 			.progs_str = "XDP",
1164 			.attachs_str = "xdp",
1165 		};
1166 
1167 		subtest_userns(&opts, userns_prog_load);
1168 	}
1169 	if (test__start_subtest("obj_priv_map")) {
1170 		struct bpffs_opts opts = {
1171 			.cmds = bit(BPF_MAP_CREATE),
1172 			.maps = bit(BPF_MAP_TYPE_QUEUE),
1173 		};
1174 
1175 		subtest_userns(&opts, userns_obj_priv_map);
1176 	}
1177 	if (test__start_subtest("obj_priv_prog")) {
1178 		struct bpffs_opts opts = {
1179 			.cmds = bit(BPF_PROG_LOAD),
1180 			.progs = bit(BPF_PROG_TYPE_XDP),
1181 			.attachs = ~0ULL,
1182 		};
1183 
1184 		subtest_userns(&opts, userns_obj_priv_prog);
1185 	}
1186 	if (test__start_subtest("obj_priv_freplace_prog")) {
1187 		struct bpffs_opts opts = {
1188 			.cmds = bit(BPF_BTF_LOAD) | bit(BPF_PROG_LOAD) | bit(BPF_BTF_GET_FD_BY_ID),
1189 			.progs = bit(BPF_PROG_TYPE_EXT) | bit(BPF_PROG_TYPE_XDP),
1190 			.attachs = ~0ULL,
1191 		};
1192 		subtest_userns(&opts, userns_obj_priv_freplace_prog);
1193 	}
1194 	if (test__start_subtest("obj_priv_freplace_prog_fail")) {
1195 		struct bpffs_opts opts = {
1196 			.cmds = bit(BPF_BTF_LOAD) | bit(BPF_PROG_LOAD) | bit(BPF_BTF_GET_FD_BY_ID),
1197 			.progs = bit(BPF_PROG_TYPE_EXT) | bit(BPF_PROG_TYPE_XDP),
1198 			.attachs = ~0ULL,
1199 		};
1200 		subtest_userns(&opts, userns_obj_priv_freplace_prog_fail);
1201 	}
1202 	if (test__start_subtest("obj_priv_btf_fail")) {
1203 		struct bpffs_opts opts = {
1204 			/* disallow BTF loading */
1205 			.cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
1206 			.maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
1207 			.progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
1208 			.attachs = ~0ULL,
1209 		};
1210 
1211 		subtest_userns(&opts, userns_obj_priv_btf_fail);
1212 	}
1213 	if (test__start_subtest("obj_priv_btf_success")) {
1214 		struct bpffs_opts opts = {
1215 			/* allow BTF loading */
1216 			.cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
1217 			.maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
1218 			.progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
1219 			.attachs = ~0ULL,
1220 		};
1221 
1222 		subtest_userns(&opts, userns_obj_priv_btf_success);
1223 	}
1224 	if (test__start_subtest("obj_priv_implicit_token")) {
1225 		struct bpffs_opts opts = {
1226 			/* allow BTF loading */
1227 			.cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
1228 			.maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
1229 			.progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
1230 			.attachs = ~0ULL,
1231 		};
1232 
1233 		subtest_userns(&opts, userns_obj_priv_implicit_token);
1234 	}
1235 	if (test__start_subtest("obj_priv_implicit_token_envvar")) {
1236 		struct bpffs_opts opts = {
1237 			/* allow BTF loading */
1238 			.cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
1239 			.maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
1240 			.progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
1241 			.attachs = ~0ULL,
1242 		};
1243 
1244 		subtest_userns(&opts, userns_obj_priv_implicit_token_envvar);
1245 	}
1246 	if (test__start_subtest("bpf_token_info")) {
1247 		struct bpffs_opts opts = {
1248 			.cmds = bit(BPF_MAP_CREATE),
1249 			.progs = bit(BPF_PROG_TYPE_XDP),
1250 			.attachs = ~0ULL,
1251 		};
1252 
1253 		subtest_userns(&opts, userns_bpf_token_info);
1254 	}
1255 	if (test__start_subtest("obj_priv_prog_kallsyms")) {
1256 		char perf_paranoid_orig[32] = {};
1257 		char kptr_restrict_orig[32] = {};
1258 		struct bpffs_opts opts = {
1259 			.cmds = bit(BPF_BTF_LOAD) | bit(BPF_PROG_LOAD),
1260 			.progs = bit(BPF_PROG_TYPE_XDP),
1261 			.attachs = ~0ULL,
1262 		};
1263 
1264 		if (sysctl_set_or_fail("/proc/sys/kernel/perf_event_paranoid", perf_paranoid_orig, "0"))
1265 			goto cleanup;
1266 		if (sysctl_set_or_fail("/proc/sys/kernel/kptr_restrict", kptr_restrict_orig, "0"))
1267 			goto cleanup;
1268 
1269 		subtest_userns(&opts, userns_obj_priv_prog_kallsyms);
1270 
1271 cleanup:
1272 		if (perf_paranoid_orig[0])
1273 			sysctl_set_or_fail("/proc/sys/kernel/perf_event_paranoid", NULL, perf_paranoid_orig);
1274 		if (kptr_restrict_orig[0])
1275 			sysctl_set_or_fail("/proc/sys/kernel/kptr_restrict", NULL, kptr_restrict_orig);
1276 	}
1277 }
1278