xref: /linux/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022, Oracle and/or its affiliates. */
3 
4 #include <test_progs.h>
5 #include <bpf/btf.h>
6 
7 #include "test_unpriv_bpf_disabled.skel.h"
8 
9 #include "cap_helpers.h"
10 #include "bpf_util.h"
11 
12 /* Using CAP_LAST_CAP is risky here, since it can get pulled in from
13  * an old /usr/include/linux/capability.h and be < CAP_BPF; as a result
14  * CAP_BPF would not be included in ALL_CAPS.  Instead use CAP_BPF as
15  * we know its value is correct since it is explicitly defined in
16  * cap_helpers.h.
17  */
18 #define ALL_CAPS	((2ULL << CAP_BPF) - 1)
19 
20 #define PINPATH		"/sys/fs/bpf/unpriv_bpf_disabled_"
21 #define NUM_MAPS	7
22 
23 static __u32 got_perfbuf_val;
24 static __u32 got_ringbuf_val;
25 
26 static int process_ringbuf(void *ctx, void *data, size_t len)
27 {
28 	if (ASSERT_EQ(len, sizeof(__u32), "ringbuf_size_valid"))
29 		got_ringbuf_val = *(__u32 *)data;
30 	return 0;
31 }
32 
33 static void process_perfbuf(void *ctx, int cpu, void *data, __u32 len)
34 {
35 	if (ASSERT_EQ(len, sizeof(__u32), "perfbuf_size_valid"))
36 		got_perfbuf_val = *(__u32 *)data;
37 }
38 
39 static int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val)
40 {
41 	int ret = 0;
42 	FILE *fp;
43 
44 	fp = fopen(sysctl_path, "r+");
45 	if (!fp)
46 		return -errno;
47 	if (old_val && fscanf(fp, "%s", old_val) <= 0) {
48 		ret = -ENOENT;
49 	} else if (!old_val || strcmp(old_val, new_val) != 0) {
50 		fseek(fp, 0, SEEK_SET);
51 		if (fprintf(fp, "%s", new_val) < 0)
52 			ret = -errno;
53 	}
54 	fclose(fp);
55 
56 	return ret;
57 }
58 
59 static void test_unpriv_bpf_disabled_positive(struct test_unpriv_bpf_disabled *skel,
60 					      __u32 prog_id, int prog_fd, int perf_fd,
61 					      char **map_paths, int *map_fds)
62 {
63 	struct perf_buffer *perfbuf = NULL;
64 	struct ring_buffer *ringbuf = NULL;
65 	int i, nr_cpus, link_fd = -1;
66 
67 	nr_cpus = bpf_num_possible_cpus();
68 
69 	skel->bss->perfbuf_val = 1;
70 	skel->bss->ringbuf_val = 2;
71 
72 	/* Positive tests for unprivileged BPF disabled. Verify we can
73 	 * - retrieve and interact with pinned maps;
74 	 * - set up and interact with perf buffer;
75 	 * - set up and interact with ring buffer;
76 	 * - create a link
77 	 */
78 	perfbuf = perf_buffer__new(bpf_map__fd(skel->maps.perfbuf), 8, process_perfbuf, NULL, NULL,
79 				   NULL);
80 	if (!ASSERT_OK_PTR(perfbuf, "perf_buffer__new"))
81 		goto cleanup;
82 
83 	ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), process_ringbuf, NULL, NULL);
84 	if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
85 		goto cleanup;
86 
87 	/* trigger & validate perf event, ringbuf output */
88 	usleep(1);
89 
90 	ASSERT_GT(perf_buffer__poll(perfbuf, 100), -1, "perf_buffer__poll");
91 	ASSERT_EQ(got_perfbuf_val, skel->bss->perfbuf_val, "check_perfbuf_val");
92 	ASSERT_EQ(ring_buffer__consume(ringbuf), 1, "ring_buffer__consume");
93 	ASSERT_EQ(got_ringbuf_val, skel->bss->ringbuf_val, "check_ringbuf_val");
94 
95 	for (i = 0; i < NUM_MAPS; i++) {
96 		map_fds[i] = bpf_obj_get(map_paths[i]);
97 		if (!ASSERT_GT(map_fds[i], -1, "obj_get"))
98 			goto cleanup;
99 	}
100 
101 	for (i = 0; i < NUM_MAPS; i++) {
102 		bool prog_array = strstr(map_paths[i], "prog_array") != NULL;
103 		bool array = strstr(map_paths[i], "array") != NULL;
104 		bool buf = strstr(map_paths[i], "buf") != NULL;
105 		__u32 key = 0, vals[nr_cpus], lookup_vals[nr_cpus];
106 		__u32 expected_val = 1;
107 		int j;
108 
109 		/* skip ringbuf, perfbuf */
110 		if (buf)
111 			continue;
112 
113 		for (j = 0; j < nr_cpus; j++)
114 			vals[j] = expected_val;
115 
116 		if (prog_array) {
117 			/* need valid prog array value */
118 			vals[0] = prog_fd;
119 			/* prog array lookup returns prog id, not fd */
120 			expected_val = prog_id;
121 		}
122 		ASSERT_OK(bpf_map_update_elem(map_fds[i], &key, vals, 0), "map_update_elem");
123 		ASSERT_OK(bpf_map_lookup_elem(map_fds[i], &key, &lookup_vals), "map_lookup_elem");
124 		ASSERT_EQ(lookup_vals[0], expected_val, "map_lookup_elem_values");
125 		if (!array)
126 			ASSERT_OK(bpf_map_delete_elem(map_fds[i], &key), "map_delete_elem");
127 	}
128 
129 	link_fd = bpf_link_create(bpf_program__fd(skel->progs.handle_perf_event), perf_fd,
130 				  BPF_PERF_EVENT, NULL);
131 	ASSERT_GT(link_fd, 0, "link_create");
132 
133 cleanup:
134 	if (link_fd)
135 		close(link_fd);
136 	if (perfbuf)
137 		perf_buffer__free(perfbuf);
138 	if (ringbuf)
139 		ring_buffer__free(ringbuf);
140 }
141 
142 static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *skel,
143 					      __u32 prog_id, int prog_fd, int perf_fd,
144 					      char **map_paths, int *map_fds)
145 {
146 	const struct bpf_insn prog_insns[] = {
147 		BPF_MOV64_IMM(BPF_REG_0, 0),
148 		BPF_EXIT_INSN(),
149 	};
150 	const size_t prog_insn_cnt = ARRAY_SIZE(prog_insns);
151 	LIBBPF_OPTS(bpf_prog_load_opts, load_opts);
152 	struct bpf_map_info map_info = {};
153 	__u32 map_info_len = sizeof(map_info);
154 	struct bpf_link_info link_info = {};
155 	__u32 link_info_len = sizeof(link_info);
156 	struct btf *btf = NULL;
157 	__u32 attach_flags = 0;
158 	__u32 prog_ids[3] = {};
159 	__u32 prog_cnt = 3;
160 	__u32 next;
161 	int i;
162 
163 	/* Negative tests for unprivileged BPF disabled.  Verify we cannot
164 	 * - load BPF programs;
165 	 * - create BPF maps;
166 	 * - get a prog/map/link fd by id;
167 	 * - get next prog/map/link id
168 	 * - query prog
169 	 * - BTF load
170 	 */
171 	ASSERT_EQ(bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "simple_prog", "GPL",
172 				prog_insns, prog_insn_cnt, &load_opts),
173 		  -EPERM, "prog_load_fails");
174 
175 	/* some map types require particular correct parameters which could be
176 	 * sanity-checked before enforcing -EPERM, so only validate that
177 	 * the simple ARRAY and HASH maps are failing with -EPERM
178 	 */
179 	for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++)
180 		ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL),
181 			  -EPERM, "map_create_fails");
182 
183 	ASSERT_EQ(bpf_prog_get_fd_by_id(prog_id), -EPERM, "prog_get_fd_by_id_fails");
184 	ASSERT_EQ(bpf_prog_get_next_id(prog_id, &next), -EPERM, "prog_get_next_id_fails");
185 	ASSERT_EQ(bpf_prog_get_next_id(0, &next), -EPERM, "prog_get_next_id_fails");
186 
187 	if (ASSERT_OK(bpf_map_get_info_by_fd(map_fds[0], &map_info, &map_info_len),
188 		      "obj_get_info_by_fd")) {
189 		ASSERT_EQ(bpf_map_get_fd_by_id(map_info.id), -EPERM, "map_get_fd_by_id_fails");
190 		ASSERT_EQ(bpf_map_get_next_id(map_info.id, &next), -EPERM,
191 			  "map_get_next_id_fails");
192 	}
193 	ASSERT_EQ(bpf_map_get_next_id(0, &next), -EPERM, "map_get_next_id_fails");
194 
195 	if (ASSERT_OK(bpf_link_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter),
196 					      &link_info, &link_info_len),
197 		      "obj_get_info_by_fd")) {
198 		ASSERT_EQ(bpf_link_get_fd_by_id(link_info.id), -EPERM, "link_get_fd_by_id_fails");
199 		ASSERT_EQ(bpf_link_get_next_id(link_info.id, &next), -EPERM,
200 			  "link_get_next_id_fails");
201 	}
202 	ASSERT_EQ(bpf_link_get_next_id(0, &next), -EPERM, "link_get_next_id_fails");
203 
204 	ASSERT_EQ(bpf_prog_query(prog_fd, BPF_TRACE_FENTRY, 0, &attach_flags, prog_ids,
205 				 &prog_cnt), -EPERM, "prog_query_fails");
206 
207 	btf = btf__new_empty();
208 	if (ASSERT_OK_PTR(btf, "empty_btf") &&
209 	    ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "unpriv_int_type")) {
210 		const void *raw_btf_data;
211 		__u32 raw_btf_size;
212 
213 		raw_btf_data = btf__raw_data(btf, &raw_btf_size);
214 		if (ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
215 			ASSERT_EQ(bpf_btf_load(raw_btf_data, raw_btf_size, NULL), -EPERM,
216 				  "bpf_btf_load_fails");
217 	}
218 	btf__free(btf);
219 }
220 
221 void test_unpriv_bpf_disabled(void)
222 {
223 	char *map_paths[NUM_MAPS] = {	PINPATH	"array",
224 					PINPATH "percpu_array",
225 					PINPATH "hash",
226 					PINPATH "percpu_hash",
227 					PINPATH "perfbuf",
228 					PINPATH "ringbuf",
229 					PINPATH "prog_array" };
230 	int map_fds[NUM_MAPS];
231 	struct test_unpriv_bpf_disabled *skel;
232 	char unprivileged_bpf_disabled_orig[32] = {};
233 	char perf_event_paranoid_orig[32] = {};
234 	struct bpf_prog_info prog_info = {};
235 	__u32 prog_info_len = sizeof(prog_info);
236 	struct perf_event_attr attr = {};
237 	int prog_fd, perf_fd = -1, i, ret;
238 	__u64 save_caps = 0;
239 	__u32 prog_id;
240 
241 	skel = test_unpriv_bpf_disabled__open_and_load();
242 	if (!ASSERT_OK_PTR(skel, "skel_open"))
243 		return;
244 
245 	skel->bss->test_pid = getpid();
246 
247 	map_fds[0] = bpf_map__fd(skel->maps.array);
248 	map_fds[1] = bpf_map__fd(skel->maps.percpu_array);
249 	map_fds[2] = bpf_map__fd(skel->maps.hash);
250 	map_fds[3] = bpf_map__fd(skel->maps.percpu_hash);
251 	map_fds[4] = bpf_map__fd(skel->maps.perfbuf);
252 	map_fds[5] = bpf_map__fd(skel->maps.ringbuf);
253 	map_fds[6] = bpf_map__fd(skel->maps.prog_array);
254 
255 	for (i = 0; i < NUM_MAPS; i++)
256 		ASSERT_OK(bpf_obj_pin(map_fds[i], map_paths[i]), "pin map_fd");
257 
258 	/* allow user without caps to use perf events */
259 	if (!ASSERT_OK(sysctl_set("/proc/sys/kernel/perf_event_paranoid", perf_event_paranoid_orig,
260 				  "-1"),
261 		       "set_perf_event_paranoid"))
262 		goto cleanup;
263 	/* ensure unprivileged bpf disabled is set */
264 	ret = sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled",
265 			 unprivileged_bpf_disabled_orig, "2");
266 	if (ret == -EPERM) {
267 		/* if unprivileged_bpf_disabled=1, we get -EPERM back; that's okay. */
268 		if (!ASSERT_OK(strcmp(unprivileged_bpf_disabled_orig, "1"),
269 			       "unprivileged_bpf_disabled_on"))
270 			goto cleanup;
271 	} else {
272 		if (!ASSERT_OK(ret, "set unprivileged_bpf_disabled"))
273 			goto cleanup;
274 	}
275 
276 	prog_fd = bpf_program__fd(skel->progs.sys_nanosleep_enter);
277 	ASSERT_OK(bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len),
278 		  "obj_get_info_by_fd");
279 	prog_id = prog_info.id;
280 	ASSERT_GT(prog_id, 0, "valid_prog_id");
281 
282 	attr.size = sizeof(attr);
283 	attr.type = PERF_TYPE_SOFTWARE;
284 	attr.config = PERF_COUNT_SW_CPU_CLOCK;
285 	attr.freq = 1;
286 	attr.sample_freq = 1000;
287 	perf_fd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
288 	if (!ASSERT_GE(perf_fd, 0, "perf_fd"))
289 		goto cleanup;
290 
291 	if (!ASSERT_OK(test_unpriv_bpf_disabled__attach(skel), "skel_attach"))
292 		goto cleanup;
293 
294 	if (!ASSERT_OK(cap_disable_effective(ALL_CAPS, &save_caps), "disable caps"))
295 		goto cleanup;
296 
297 	if (test__start_subtest("unpriv_bpf_disabled_positive"))
298 		test_unpriv_bpf_disabled_positive(skel, prog_id, prog_fd, perf_fd, map_paths,
299 						  map_fds);
300 
301 	if (test__start_subtest("unpriv_bpf_disabled_negative"))
302 		test_unpriv_bpf_disabled_negative(skel, prog_id, prog_fd, perf_fd, map_paths,
303 						  map_fds);
304 
305 cleanup:
306 	close(perf_fd);
307 	if (save_caps)
308 		cap_enable_effective(save_caps, NULL);
309 	if (strlen(perf_event_paranoid_orig) > 0)
310 		sysctl_set("/proc/sys/kernel/perf_event_paranoid", NULL, perf_event_paranoid_orig);
311 	if (strlen(unprivileged_bpf_disabled_orig) > 0)
312 		sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", NULL,
313 			   unprivileged_bpf_disabled_orig);
314 	for (i = 0; i < NUM_MAPS; i++)
315 		unlink(map_paths[i]);
316 	test_unpriv_bpf_disabled__destroy(skel);
317 }
318