xref: /linux/tools/testing/selftests/bpf/prog_tests/fd_array.c (revision d0d106a2bd21499901299160744e5fe9f4c83ddb)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <test_progs.h>
4 
5 #include <linux/btf.h>
6 #include <bpf/bpf.h>
7 
8 #include "../test_btf.h"
9 
new_map(void)10 static inline int new_map(void)
11 {
12 	const char *name = NULL;
13 	__u32 max_entries = 1;
14 	__u32 value_size = 8;
15 	__u32 key_size = 4;
16 
17 	return bpf_map_create(BPF_MAP_TYPE_ARRAY, name,
18 			      key_size, value_size,
19 			      max_entries, NULL);
20 }
21 
new_btf(void)22 static int new_btf(void)
23 {
24 	struct btf_blob {
25 		struct btf_header btf_hdr;
26 		__u32 types[8];
27 		__u32 str;
28 	} raw_btf = {
29 		.btf_hdr = {
30 			.magic = BTF_MAGIC,
31 			.version = BTF_VERSION,
32 			.hdr_len = sizeof(struct btf_header),
33 			.type_len = sizeof(raw_btf.types),
34 			.str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types),
35 			.str_len = sizeof(raw_btf.str),
36 		},
37 		.types = {
38 			/* long */
39 			BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8),  /* [1] */
40 			/* unsigned long */
41 			BTF_TYPE_INT_ENC(0, 0, 0, 64, 8),  /* [2] */
42 		},
43 	};
44 
45 	return bpf_btf_load(&raw_btf, sizeof(raw_btf), NULL);
46 }
47 
48 #define Close(FD) do {		\
49 	if ((FD) >= 0) {	\
50 		close(FD);	\
51 		FD = -1;	\
52 	}			\
53 } while(0)
54 
map_exists(__u32 id)55 static bool map_exists(__u32 id)
56 {
57 	int fd;
58 
59 	fd = bpf_map_get_fd_by_id(id);
60 	if (fd >= 0) {
61 		close(fd);
62 		return true;
63 	}
64 	return false;
65 }
66 
btf_exists(__u32 id)67 static bool btf_exists(__u32 id)
68 {
69 	int fd;
70 
71 	fd = bpf_btf_get_fd_by_id(id);
72 	if (fd >= 0) {
73 		close(fd);
74 		return true;
75 	}
76 	return false;
77 }
78 
bpf_prog_get_map_ids(int prog_fd,__u32 * nr_map_ids,__u32 * map_ids)79 static inline int bpf_prog_get_map_ids(int prog_fd, __u32 *nr_map_ids, __u32 *map_ids)
80 {
81 	__u32 len = sizeof(struct bpf_prog_info);
82 	struct bpf_prog_info info;
83 	int err;
84 
85 	memset(&info, 0, len);
86 	info.nr_map_ids = *nr_map_ids,
87 	info.map_ids = ptr_to_u64(map_ids),
88 
89 	err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
90 	if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
91 		return -1;
92 
93 	*nr_map_ids = info.nr_map_ids;
94 
95 	return 0;
96 }
97 
__load_test_prog(int map_fd,const int * fd_array,int fd_array_cnt)98 static int __load_test_prog(int map_fd, const int *fd_array, int fd_array_cnt)
99 {
100 	/* A trivial program which uses one map */
101 	struct bpf_insn insns[] = {
102 		BPF_LD_MAP_FD(BPF_REG_1, map_fd),
103 		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
104 		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
105 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
106 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
107 		BPF_MOV64_IMM(BPF_REG_0, 0),
108 		BPF_EXIT_INSN(),
109 	};
110 	LIBBPF_OPTS(bpf_prog_load_opts, opts);
111 
112 	opts.fd_array = fd_array;
113 	opts.fd_array_cnt = fd_array_cnt;
114 
115 	return bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, ARRAY_SIZE(insns), &opts);
116 }
117 
load_test_prog(const int * fd_array,int fd_array_cnt)118 static int load_test_prog(const int *fd_array, int fd_array_cnt)
119 {
120 	int map_fd;
121 	int ret;
122 
123 	map_fd = new_map();
124 	if (!ASSERT_GE(map_fd, 0, "new_map"))
125 		return map_fd;
126 
127 	ret = __load_test_prog(map_fd, fd_array, fd_array_cnt);
128 	close(map_fd);
129 	return ret;
130 }
131 
check_expected_map_ids(int prog_fd,int expected,__u32 * map_ids,__u32 * nr_map_ids)132 static bool check_expected_map_ids(int prog_fd, int expected, __u32 *map_ids, __u32 *nr_map_ids)
133 {
134 	int err;
135 
136 	err = bpf_prog_get_map_ids(prog_fd, nr_map_ids, map_ids);
137 	if (!ASSERT_OK(err, "bpf_prog_get_map_ids"))
138 		return false;
139 	if (!ASSERT_EQ(*nr_map_ids, expected, "unexpected nr_map_ids"))
140 		return false;
141 
142 	return true;
143 }
144 
145 /*
146  * Load a program, which uses one map. No fd_array maps are present.
147  * On return only one map is expected to be bound to prog.
148  */
check_fd_array_cnt__no_fd_array(void)149 static void check_fd_array_cnt__no_fd_array(void)
150 {
151 	__u32 map_ids[16];
152 	__u32 nr_map_ids;
153 	int prog_fd = -1;
154 
155 	prog_fd = load_test_prog(NULL, 0);
156 	if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
157 		return;
158 	nr_map_ids = ARRAY_SIZE(map_ids);
159 	check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids);
160 	close(prog_fd);
161 }
162 
163 /*
164  * Load a program, which uses one map, and pass two extra, non-equal, maps in
165  * fd_array with fd_array_cnt=2. On return three maps are expected to be bound
166  * to the program.
167  */
check_fd_array_cnt__fd_array_ok(void)168 static void check_fd_array_cnt__fd_array_ok(void)
169 {
170 	int extra_fds[2] = { -1, -1 };
171 	__u32 map_ids[16];
172 	__u32 nr_map_ids;
173 	int prog_fd = -1;
174 
175 	extra_fds[0] = new_map();
176 	if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
177 		goto cleanup;
178 	extra_fds[1] = new_map();
179 	if (!ASSERT_GE(extra_fds[1], 0, "new_map"))
180 		goto cleanup;
181 	prog_fd = load_test_prog(extra_fds, 2);
182 	if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
183 		goto cleanup;
184 	nr_map_ids = ARRAY_SIZE(map_ids);
185 	if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids))
186 		goto cleanup;
187 
188 	/* maps should still exist when original file descriptors are closed */
189 	Close(extra_fds[0]);
190 	Close(extra_fds[1]);
191 	if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map_ids[0] should exist"))
192 		goto cleanup;
193 	if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map_ids[1] should exist"))
194 		goto cleanup;
195 
196 	/* some fds might be invalid, so ignore return codes */
197 cleanup:
198 	Close(extra_fds[1]);
199 	Close(extra_fds[0]);
200 	Close(prog_fd);
201 }
202 
203 /*
204  * Load a program with a few extra maps duplicated in the fd_array.
205  * After the load maps should only be referenced once.
206  */
check_fd_array_cnt__duplicated_maps(void)207 static void check_fd_array_cnt__duplicated_maps(void)
208 {
209 	int extra_fds[4] = { -1, -1, -1, -1 };
210 	__u32 map_ids[16];
211 	__u32 nr_map_ids;
212 	int prog_fd = -1;
213 
214 	extra_fds[0] = extra_fds[2] = new_map();
215 	if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
216 		goto cleanup;
217 	extra_fds[1] = extra_fds[3] = new_map();
218 	if (!ASSERT_GE(extra_fds[1], 0, "new_map"))
219 		goto cleanup;
220 	prog_fd = load_test_prog(extra_fds, 4);
221 	if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
222 		goto cleanup;
223 	nr_map_ids = ARRAY_SIZE(map_ids);
224 	if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids))
225 		goto cleanup;
226 
227 	/* maps should still exist when original file descriptors are closed */
228 	Close(extra_fds[0]);
229 	Close(extra_fds[1]);
230 	if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist"))
231 		goto cleanup;
232 	if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map should exist"))
233 		goto cleanup;
234 
235 	/* some fds might be invalid, so ignore return codes */
236 cleanup:
237 	Close(extra_fds[1]);
238 	Close(extra_fds[0]);
239 	Close(prog_fd);
240 }
241 
242 /*
243  * Check that if maps which are referenced by a program are
244  * passed in fd_array, then they will be referenced only once
245  */
check_fd_array_cnt__referenced_maps_in_fd_array(void)246 static void check_fd_array_cnt__referenced_maps_in_fd_array(void)
247 {
248 	int extra_fds[1] = { -1 };
249 	__u32 map_ids[16];
250 	__u32 nr_map_ids;
251 	int prog_fd = -1;
252 
253 	extra_fds[0] = new_map();
254 	if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
255 		goto cleanup;
256 	prog_fd = __load_test_prog(extra_fds[0], extra_fds, 1);
257 	if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
258 		goto cleanup;
259 	nr_map_ids = ARRAY_SIZE(map_ids);
260 	if (!check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids))
261 		goto cleanup;
262 
263 	/* map should still exist when original file descriptor is closed */
264 	Close(extra_fds[0]);
265 	if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist"))
266 		goto cleanup;
267 
268 	/* some fds might be invalid, so ignore return codes */
269 cleanup:
270 	Close(extra_fds[0]);
271 	Close(prog_fd);
272 }
273 
get_btf_id_by_fd(int btf_fd,__u32 * id)274 static int get_btf_id_by_fd(int btf_fd, __u32 *id)
275 {
276 	struct bpf_btf_info info;
277 	__u32 info_len = sizeof(info);
278 	int err;
279 
280 	memset(&info, 0, info_len);
281 	err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len);
282 	if (err)
283 		return err;
284 	if (id)
285 		*id = info.id;
286 	return 0;
287 }
288 
289 /*
290  * Check that fd_array operates properly for btfs. Namely, to check that
291  * passing a btf fd in fd_array increases its reference count, do the
292  * following:
293  *  1) Create a new btf, it's referenced only by a file descriptor, so refcnt=1
294  *  2) Load a BPF prog with fd_array[0] = btf_fd; now btf's refcnt=2
295  *  3) Close the btf_fd, now refcnt=1
296  * Wait and check that BTF stil exists.
297  */
check_fd_array_cnt__referenced_btfs(void)298 static void check_fd_array_cnt__referenced_btfs(void)
299 {
300 	int extra_fds[1] = { -1 };
301 	int prog_fd = -1;
302 	__u32 btf_id;
303 	int tries;
304 	int err;
305 
306 	extra_fds[0] = new_btf();
307 	if (!ASSERT_GE(extra_fds[0], 0, "new_btf"))
308 		goto cleanup;
309 	prog_fd = load_test_prog(extra_fds, 1);
310 	if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
311 		goto cleanup;
312 
313 	/* btf should still exist when original file descriptor is closed */
314 	err = get_btf_id_by_fd(extra_fds[0], &btf_id);
315 	if (!ASSERT_GE(err, 0, "get_btf_id_by_fd"))
316 		goto cleanup;
317 
318 	Close(extra_fds[0]);
319 
320 	if (!ASSERT_GE(kern_sync_rcu(), 0, "kern_sync_rcu 1"))
321 		goto cleanup;
322 
323 	if (!ASSERT_EQ(btf_exists(btf_id), true, "btf should exist"))
324 		goto cleanup;
325 
326 	Close(prog_fd);
327 
328 	/* The program is freed by a workqueue, so no reliable
329 	 * way to sync, so just wait a bit (max ~1 second). */
330 	for (tries = 100; tries >= 0; tries--) {
331 		usleep(1000);
332 
333 		if (!btf_exists(btf_id))
334 			break;
335 
336 		if (tries)
337 			continue;
338 
339 		PRINT_FAIL("btf should have been freed");
340 	}
341 
342 	/* some fds might be invalid, so ignore return codes */
343 cleanup:
344 	Close(extra_fds[0]);
345 	Close(prog_fd);
346 }
347 
348 /*
349  * Test that a program with trash in fd_array can't be loaded:
350  * only map and BTF file descriptors should be accepted.
351  */
check_fd_array_cnt__fd_array_with_trash(void)352 static void check_fd_array_cnt__fd_array_with_trash(void)
353 {
354 	int extra_fds[3] = { -1, -1, -1 };
355 	int prog_fd = -1;
356 
357 	extra_fds[0] = new_map();
358 	if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
359 		goto cleanup;
360 	extra_fds[1] = new_btf();
361 	if (!ASSERT_GE(extra_fds[1], 0, "new_btf"))
362 		goto cleanup;
363 
364 	/* trash 1: not a file descriptor */
365 	extra_fds[2] = 0xbeef;
366 	prog_fd = load_test_prog(extra_fds, 3);
367 	if (!ASSERT_EQ(prog_fd, -EBADF, "prog should have been rejected with -EBADF"))
368 		goto cleanup;
369 
370 	/* trash 2: not a map or btf */
371 	extra_fds[2] = socket(AF_INET, SOCK_STREAM, 0);
372 	if (!ASSERT_GE(extra_fds[2], 0, "socket"))
373 		goto cleanup;
374 
375 	prog_fd = load_test_prog(extra_fds, 3);
376 	if (!ASSERT_EQ(prog_fd, -EINVAL, "prog should have been rejected with -EINVAL"))
377 		goto cleanup;
378 
379 	/* Validate that the prog is ok if trash is removed */
380 	Close(extra_fds[2]);
381 	extra_fds[2] = new_btf();
382 	if (!ASSERT_GE(extra_fds[2], 0, "new_btf"))
383 		goto cleanup;
384 
385 	prog_fd = load_test_prog(extra_fds, 3);
386 	if (!ASSERT_GE(prog_fd, 0, "prog should have been loaded"))
387 		goto cleanup;
388 
389 	/* some fds might be invalid, so ignore return codes */
390 cleanup:
391 	Close(extra_fds[2]);
392 	Close(extra_fds[1]);
393 	Close(extra_fds[0]);
394 }
395 
396 /*
397  * Test that a program with too big fd_array can't be loaded.
398  */
check_fd_array_cnt__fd_array_too_big(void)399 static void check_fd_array_cnt__fd_array_too_big(void)
400 {
401 	int extra_fds[65];
402 	int prog_fd = -1;
403 	int i;
404 
405 	for (i = 0; i < 65; i++) {
406 		extra_fds[i] = new_map();
407 		if (!ASSERT_GE(extra_fds[i], 0, "new_map"))
408 			goto cleanup_fds;
409 	}
410 
411 	prog_fd = load_test_prog(extra_fds, 65);
412 	ASSERT_EQ(prog_fd, -E2BIG, "prog should have been rejected with -E2BIG");
413 
414 cleanup_fds:
415 	while (i > 0)
416 		Close(extra_fds[--i]);
417 }
418 
test_fd_array_cnt(void)419 void test_fd_array_cnt(void)
420 {
421 	if (test__start_subtest("no-fd-array"))
422 		check_fd_array_cnt__no_fd_array();
423 
424 	if (test__start_subtest("fd-array-ok"))
425 		check_fd_array_cnt__fd_array_ok();
426 
427 	if (test__start_subtest("fd-array-dup-input"))
428 		check_fd_array_cnt__duplicated_maps();
429 
430 	if (test__start_subtest("fd-array-ref-maps-in-array"))
431 		check_fd_array_cnt__referenced_maps_in_fd_array();
432 
433 	if (test__start_subtest("fd-array-ref-btfs"))
434 		check_fd_array_cnt__referenced_btfs();
435 
436 	if (test__start_subtest("fd-array-trash-input"))
437 		check_fd_array_cnt__fd_array_with_trash();
438 
439 	if (test__start_subtest("fd-array-2big"))
440 		check_fd_array_cnt__fd_array_too_big();
441 }
442