xref: /linux/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <test_progs.h>
4 
5 #include <linux/if_ether.h>
6 #include <linux/in.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/in6.h>
10 #include <linux/udp.h>
11 #include <linux/tcp.h>
12 
13 #include <sys/syscall.h>
14 #include <bpf/bpf.h>
15 
16 #include "bpf_gotox.skel.h"
17 
18 static void __test_run(struct bpf_program *prog, void *ctx_in, size_t ctx_size_in)
19 {
20 	LIBBPF_OPTS(bpf_test_run_opts, topts,
21 			    .ctx_in = ctx_in,
22 			    .ctx_size_in = ctx_size_in,
23 		   );
24 	int err, prog_fd;
25 
26 	prog_fd = bpf_program__fd(prog);
27 	err = bpf_prog_test_run_opts(prog_fd, &topts);
28 	ASSERT_OK(err, "test_run_opts err");
29 }
30 
31 static void __subtest(struct bpf_gotox *skel, void (*check)(struct bpf_gotox *))
32 {
33 	if (skel->data->skip)
34 		test__skip();
35 	else
36 		check(skel);
37 }
38 
39 static void check_simple(struct bpf_gotox *skel,
40 			 struct bpf_program *prog,
41 			 __u64 ctx_in,
42 			 __u64 expected)
43 {
44 	skel->bss->ret_user = 0;
45 
46 	__test_run(prog, &ctx_in, sizeof(ctx_in));
47 
48 	if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user"))
49 		return;
50 }
51 
52 static void check_simple_fentry(struct bpf_gotox *skel,
53 				struct bpf_program *prog,
54 				__u64 ctx_in,
55 				__u64 expected)
56 {
57 	skel->bss->in_user = ctx_in;
58 	skel->bss->ret_user = 0;
59 
60 	/* trigger */
61 	usleep(1);
62 
63 	if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user"))
64 		return;
65 }
66 
67 /* validate that for two loads of the same jump table libbpf generates only one map */
68 static void check_one_map_two_jumps(struct bpf_gotox *skel)
69 {
70 	struct bpf_prog_info prog_info;
71 	struct bpf_map_info map_info;
72 	__u32 len;
73 	__u32 map_ids[16];
74 	int prog_fd, map_fd;
75 	int ret;
76 	int i;
77 	bool seen = false;
78 
79 	memset(&prog_info, 0, sizeof(prog_info));
80 	prog_info.map_ids = (long)map_ids;
81 	prog_info.nr_map_ids = ARRAY_SIZE(map_ids);
82 	prog_fd = bpf_program__fd(skel->progs.one_map_two_jumps);
83 	if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd(one_map_two_jumps)"))
84 		return;
85 
86 	len = sizeof(prog_info);
87 	ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &len);
88 	if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(prog_fd)"))
89 		return;
90 
91 	for (i = 0; i < prog_info.nr_map_ids; i++) {
92 		map_fd  = bpf_map_get_fd_by_id(map_ids[i]);
93 		if (!ASSERT_GE(map_fd, 0, "bpf_map_get_fd_by_id"))
94 			return;
95 
96 		len = sizeof(map_info);
97 		memset(&map_info, 0, len);
98 		ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &len);
99 		if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(map_fd)")) {
100 			close(map_fd);
101 			return;
102 		}
103 
104 		if (map_info.type == BPF_MAP_TYPE_INSN_ARRAY) {
105 			if (!ASSERT_EQ(seen, false, "more than one INSN_ARRAY map")) {
106 				close(map_fd);
107 				return;
108 			}
109 			seen = true;
110 		}
111 		close(map_fd);
112 	}
113 
114 	ASSERT_EQ(seen, true, "no INSN_ARRAY map");
115 }
116 
117 static void check_one_switch(struct bpf_gotox *skel)
118 {
119 	__u64 in[]   = {0, 1, 2, 3, 4,  5, 77};
120 	__u64 out[]  = {2, 3, 4, 5, 7, 19, 19};
121 	int i;
122 
123 	for (i = 0; i < ARRAY_SIZE(in); i++)
124 		check_simple(skel, skel->progs.one_switch, in[i], out[i]);
125 }
126 
127 static void check_one_switch_non_zero_sec_off(struct bpf_gotox *skel)
128 {
129 	__u64 in[]   = {0, 1, 2, 3, 4,  5, 77};
130 	__u64 out[]  = {2, 3, 4, 5, 7, 19, 19};
131 	int i;
132 
133 	for (i = 0; i < ARRAY_SIZE(in); i++)
134 		check_simple(skel, skel->progs.one_switch_non_zero_sec_off, in[i], out[i]);
135 }
136 
137 static void check_two_switches(struct bpf_gotox *skel)
138 {
139 	__u64 in[]   = {0, 1, 2, 3, 4,  5, 77};
140 	__u64 out[] = {103, 104, 107, 205, 115, 1019, 1019};
141 	int i;
142 
143 	for (i = 0; i < ARRAY_SIZE(in); i++)
144 		check_simple(skel, skel->progs.two_switches, in[i], out[i]);
145 }
146 
147 static void check_big_jump_table(struct bpf_gotox *skel)
148 {
149 	__u64 in[]  = {0, 11, 27, 31, 22, 45, 99};
150 	__u64 out[] = {2,  3,  4,  5, 19, 19, 19};
151 	int i;
152 
153 	for (i = 0; i < ARRAY_SIZE(in); i++)
154 		check_simple(skel, skel->progs.big_jump_table, in[i], out[i]);
155 }
156 
157 static void check_one_jump_two_maps(struct bpf_gotox *skel)
158 {
159 	__u64 in[]  = {0, 1, 2, 3, 4,  5, 77};
160 	__u64 out[] = {12, 15, 7 , 15, 12, 15, 15};
161 	int i;
162 
163 	for (i = 0; i < ARRAY_SIZE(in); i++)
164 		check_simple(skel, skel->progs.one_jump_two_maps, in[i], out[i]);
165 }
166 
167 static void check_static_global(struct bpf_gotox *skel)
168 {
169 	__u64 in[]   = {0, 1, 2, 3, 4,  5, 77};
170 	__u64 out[]  = {2, 3, 4, 5, 7, 19, 19};
171 	int i;
172 
173 	for (i = 0; i < ARRAY_SIZE(in); i++)
174 		check_simple(skel, skel->progs.use_static_global1, in[i], out[i]);
175 	for (i = 0; i < ARRAY_SIZE(in); i++)
176 		check_simple(skel, skel->progs.use_static_global2, in[i], out[i]);
177 }
178 
179 static void check_nonstatic_global(struct bpf_gotox *skel)
180 {
181 	__u64 in[]   = {0, 1, 2, 3, 4,  5, 77};
182 	__u64 out[]  = {2, 3, 4, 5, 7, 19, 19};
183 	int i;
184 
185 	for (i = 0; i < ARRAY_SIZE(in); i++)
186 		check_simple(skel, skel->progs.use_nonstatic_global1, in[i], out[i]);
187 
188 	for (i = 0; i < ARRAY_SIZE(in); i++)
189 		check_simple(skel, skel->progs.use_nonstatic_global2, in[i], out[i]);
190 }
191 
192 static void check_other_sec(struct bpf_gotox *skel)
193 {
194 	struct bpf_link *link;
195 	__u64 in[]   = {0, 1, 2, 3, 4,  5, 77};
196 	__u64 out[]  = {2, 3, 4, 5, 7, 19, 19};
197 	int i;
198 
199 	link = bpf_program__attach(skel->progs.simple_test_other_sec);
200 	if (!ASSERT_OK_PTR(link, "link"))
201 		return;
202 
203 	for (i = 0; i < ARRAY_SIZE(in); i++)
204 		check_simple_fentry(skel, skel->progs.simple_test_other_sec, in[i], out[i]);
205 
206 	bpf_link__destroy(link);
207 }
208 
209 static void check_static_global_other_sec(struct bpf_gotox *skel)
210 {
211 	struct bpf_link *link;
212 	__u64 in[]   = {0, 1, 2, 3, 4,  5, 77};
213 	__u64 out[]  = {2, 3, 4, 5, 7, 19, 19};
214 	int i;
215 
216 	link = bpf_program__attach(skel->progs.use_static_global_other_sec);
217 	if (!ASSERT_OK_PTR(link, "link"))
218 		return;
219 
220 	for (i = 0; i < ARRAY_SIZE(in); i++)
221 		check_simple_fentry(skel, skel->progs.use_static_global_other_sec, in[i], out[i]);
222 
223 	bpf_link__destroy(link);
224 }
225 
226 static void check_nonstatic_global_other_sec(struct bpf_gotox *skel)
227 {
228 	struct bpf_link *link;
229 	__u64 in[]   = {0, 1, 2, 3, 4,  5, 77};
230 	__u64 out[]  = {2, 3, 4, 5, 7, 19, 19};
231 	int i;
232 
233 	link = bpf_program__attach(skel->progs.use_nonstatic_global_other_sec);
234 	if (!ASSERT_OK_PTR(link, "link"))
235 		return;
236 
237 	for (i = 0; i < ARRAY_SIZE(in); i++)
238 		check_simple_fentry(skel, skel->progs.use_nonstatic_global_other_sec, in[i], out[i]);
239 
240 	bpf_link__destroy(link);
241 }
242 
243 /*
244  * The following subtests do not use skeleton rather than to check
245  * if the test should be skipped.
246  */
247 
248 static int create_jt_map(__u32 max_entries)
249 {
250 	const char *map_name = "jt";
251 	__u32 key_size = 4;
252 	__u32 value_size = sizeof(struct bpf_insn_array_value);
253 
254 	return bpf_map_create(BPF_MAP_TYPE_INSN_ARRAY, map_name,
255 			      key_size, value_size, max_entries, NULL);
256 }
257 
258 static int prog_load(struct bpf_insn *insns, __u32 insn_cnt)
259 {
260 	return bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
261 }
262 
263 static int __check_ldimm64_off_prog_load(__u32 max_entries, __u32 off)
264 {
265 	struct bpf_insn insns[] = {
266 		BPF_LD_IMM64_RAW(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 0),
267 		BPF_MOV64_IMM(BPF_REG_0, 0),
268 		BPF_EXIT_INSN(),
269 	};
270 	int map_fd, ret;
271 
272 	map_fd = create_jt_map(max_entries);
273 	if (!ASSERT_GE(map_fd, 0, "create_jt_map"))
274 		return -1;
275 	if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze")) {
276 		close(map_fd);
277 		return -1;
278 	}
279 
280 	insns[0].imm = map_fd;
281 	insns[1].imm = off;
282 
283 	ret = prog_load(insns, ARRAY_SIZE(insns));
284 	close(map_fd);
285 	return ret;
286 }
287 
288 /*
289  * Check that loads from an instruction array map are only allowed with offsets
290  * which are multiples of 8 and do not point to outside of the map.
291  */
292 static void check_ldimm64_off_load(struct bpf_gotox *skel __always_unused)
293 {
294 	const __u32 max_entries = 10;
295 	int prog_fd;
296 	__u32 off;
297 
298 	for (off = 0; off < max_entries; off++) {
299 		prog_fd = __check_ldimm64_off_prog_load(max_entries, off * 8);
300 		if (!ASSERT_GE(prog_fd, 0, "__check_ldimm64_off_prog_load"))
301 			return;
302 		close(prog_fd);
303 	}
304 
305 	prog_fd = __check_ldimm64_off_prog_load(max_entries, 7 /* not a multiple of 8 */);
306 	if (!ASSERT_EQ(prog_fd, -EACCES, "__check_ldimm64_off_prog_load: should be -EACCES")) {
307 		close(prog_fd);
308 		return;
309 	}
310 
311 	prog_fd = __check_ldimm64_off_prog_load(max_entries, max_entries * 8 /* too large */);
312 	if (!ASSERT_EQ(prog_fd, -EACCES, "__check_ldimm64_off_prog_load: should be -EACCES")) {
313 		close(prog_fd);
314 		return;
315 	}
316 }
317 
318 static int __check_ldimm64_gotox_prog_load(struct bpf_insn *insns,
319 					   __u32 insn_cnt,
320 					   __u32 off1, __u32 off2)
321 {
322 	const __u32 values[] = {5, 7, 9, 11, 13, 15};
323 	const __u32 max_entries = ARRAY_SIZE(values);
324 	struct bpf_insn_array_value val = {};
325 	int map_fd, ret, i;
326 
327 	map_fd = create_jt_map(max_entries);
328 	if (!ASSERT_GE(map_fd, 0, "create_jt_map"))
329 		return -1;
330 
331 	for (i = 0; i < max_entries; i++) {
332 		val.orig_off = values[i];
333 		if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0,
334 			       "bpf_map_update_elem")) {
335 			close(map_fd);
336 			return -1;
337 		}
338 	}
339 
340 	if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze")) {
341 		close(map_fd);
342 		return -1;
343 	}
344 
345 	/* r1 = &map + offset1 */
346 	insns[0].imm = map_fd;
347 	insns[1].imm = off1;
348 
349 	/* r1 += off2 */
350 	insns[2].imm = off2;
351 
352 	ret = prog_load(insns, insn_cnt);
353 	close(map_fd);
354 	return ret;
355 }
356 
357 static void reject_offsets(struct bpf_insn *insns, __u32 insn_cnt, __u32 off1, __u32 off2)
358 {
359 	int prog_fd;
360 
361 	prog_fd = __check_ldimm64_gotox_prog_load(insns, insn_cnt, off1, off2);
362 	if (!ASSERT_EQ(prog_fd, -EACCES, "__check_ldimm64_gotox_prog_load"))
363 		close(prog_fd);
364 }
365 
366 /*
367  * Verify a bit more complex programs which include indirect jumps
368  * and with jump tables loaded with a non-zero offset
369  */
370 static void check_ldimm64_off_gotox(struct bpf_gotox *skel __always_unused)
371 {
372 	struct bpf_insn insns[] = {
373 		/*
374 		 * The following instructions perform an indirect jump to
375 		 * labels below. Thus valid offsets in the map are {0,...,5}.
376 		 * The program rewrites the offsets in the instructions below:
377 		 *     r1 = &map + offset1
378 		 *     r1 += offset2
379 		 *     r1 = *r1
380 		 *     gotox r1
381 		 */
382 		BPF_LD_IMM64_RAW(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 0),
383 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
384 		BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
385 		BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_1, 0, 0, 0),
386 
387 		/* case 0: */
388 		BPF_MOV64_IMM(BPF_REG_0, 0),
389 		BPF_EXIT_INSN(),
390 		/* case 1: */
391 		BPF_MOV64_IMM(BPF_REG_0, 1),
392 		BPF_EXIT_INSN(),
393 		/* case 2: */
394 		BPF_MOV64_IMM(BPF_REG_0, 2),
395 		BPF_EXIT_INSN(),
396 		/* case 3: */
397 		BPF_MOV64_IMM(BPF_REG_0, 3),
398 		BPF_EXIT_INSN(),
399 		/* case 4: */
400 		BPF_MOV64_IMM(BPF_REG_0, 4),
401 		BPF_EXIT_INSN(),
402 		/* default: */
403 		BPF_MOV64_IMM(BPF_REG_0, 5),
404 		BPF_EXIT_INSN(),
405 	};
406 	int prog_fd, err;
407 	__u32 off1, off2;
408 
409 	/* allow all combinations off1 + off2 < 6 */
410 	for (off1 = 0; off1 < 6; off1++) {
411 		for (off2 = 0; off1 + off2 < 6; off2++) {
412 			LIBBPF_OPTS(bpf_test_run_opts, topts);
413 
414 			prog_fd = __check_ldimm64_gotox_prog_load(insns, ARRAY_SIZE(insns),
415 								  off1 * 8, off2 * 8);
416 			if (!ASSERT_GE(prog_fd, 0, "__check_ldimm64_gotox_prog_load"))
417 				return;
418 
419 			err = bpf_prog_test_run_opts(prog_fd, &topts);
420 			if (!ASSERT_OK(err, "test_run_opts err")) {
421 				close(prog_fd);
422 				return;
423 			}
424 
425 			if (!ASSERT_EQ(topts.retval, off1 + off2, "test_run_opts retval")) {
426 				close(prog_fd);
427 				return;
428 			}
429 
430 			close(prog_fd);
431 		}
432 	}
433 
434 	/* reject off1 + off2 >= 6 */
435 	reject_offsets(insns, ARRAY_SIZE(insns), 8 * 3, 8 * 3);
436 	reject_offsets(insns, ARRAY_SIZE(insns), 8 * 7, 8 * 0);
437 	reject_offsets(insns, ARRAY_SIZE(insns), 8 * 0, 8 * 7);
438 
439 	/* reject (off1 + off2) % 8 != 0 */
440 	reject_offsets(insns, ARRAY_SIZE(insns), 3, 3);
441 	reject_offsets(insns, ARRAY_SIZE(insns), 7, 0);
442 	reject_offsets(insns, ARRAY_SIZE(insns), 0, 7);
443 }
444 
445 void test_bpf_gotox(void)
446 {
447 	struct bpf_gotox *skel;
448 	int ret;
449 
450 	skel = bpf_gotox__open();
451 	if (!ASSERT_NEQ(skel, NULL, "bpf_gotox__open"))
452 		return;
453 
454 	ret = bpf_gotox__load(skel);
455 	if (!ASSERT_OK(ret, "bpf_gotox__load"))
456 		return;
457 
458 	skel->bss->pid = getpid();
459 
460 	if (test__start_subtest("one-switch"))
461 		__subtest(skel, check_one_switch);
462 
463 	if (test__start_subtest("one-switch-non-zero-sec-offset"))
464 		__subtest(skel, check_one_switch_non_zero_sec_off);
465 
466 	if (test__start_subtest("two-switches"))
467 		__subtest(skel, check_two_switches);
468 
469 	if (test__start_subtest("big-jump-table"))
470 		__subtest(skel, check_big_jump_table);
471 
472 	if (test__start_subtest("static-global"))
473 		__subtest(skel, check_static_global);
474 
475 	if (test__start_subtest("nonstatic-global"))
476 		__subtest(skel, check_nonstatic_global);
477 
478 	if (test__start_subtest("other-sec"))
479 		__subtest(skel, check_other_sec);
480 
481 	if (test__start_subtest("static-global-other-sec"))
482 		__subtest(skel, check_static_global_other_sec);
483 
484 	if (test__start_subtest("nonstatic-global-other-sec"))
485 		__subtest(skel, check_nonstatic_global_other_sec);
486 
487 	if (test__start_subtest("one-jump-two-maps"))
488 		__subtest(skel, check_one_jump_two_maps);
489 
490 	if (test__start_subtest("one-map-two-jumps"))
491 		__subtest(skel, check_one_map_two_jumps);
492 
493 	if (test__start_subtest("check-ldimm64-off"))
494 		__subtest(skel, check_ldimm64_off_load);
495 
496 	if (test__start_subtest("check-ldimm64-off-gotox"))
497 		__subtest(skel, check_ldimm64_off_gotox);
498 
499 	bpf_gotox__destroy(skel);
500 }
501