xref: /linux/tools/testing/selftests/bpf/prog_tests/tailcalls.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <unistd.h>
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "tailcall_poke.skel.h"
6 #include "tailcall_bpf2bpf_hierarchy2.skel.h"
7 #include "tailcall_bpf2bpf_hierarchy3.skel.h"
8 #include "tailcall_freplace.skel.h"
9 #include "tc_bpf2bpf.skel.h"
10 #include "tailcall_fail.skel.h"
11 
12 /* test_tailcall_1 checks basic functionality by patching multiple locations
13  * in a single program for a single tail call slot with nop->jmp, jmp->nop
14  * and jmp->jmp rewrites. Also checks for nop->nop.
15  */
test_tailcall_1(void)16 static void test_tailcall_1(void)
17 {
18 	int err, map_fd, prog_fd, main_fd, i, j;
19 	struct bpf_map *prog_array;
20 	struct bpf_program *prog;
21 	struct bpf_object *obj;
22 	char prog_name[32];
23 	char buff[128] = {};
24 	LIBBPF_OPTS(bpf_test_run_opts, topts,
25 		.data_in = buff,
26 		.data_size_in = sizeof(buff),
27 		.repeat = 1,
28 	);
29 
30 	err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
31 				 &prog_fd);
32 	if (CHECK_FAIL(err))
33 		return;
34 
35 	prog = bpf_object__find_program_by_name(obj, "entry");
36 	if (CHECK_FAIL(!prog))
37 		goto out;
38 
39 	main_fd = bpf_program__fd(prog);
40 	if (CHECK_FAIL(main_fd < 0))
41 		goto out;
42 
43 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
44 	if (CHECK_FAIL(!prog_array))
45 		goto out;
46 
47 	map_fd = bpf_map__fd(prog_array);
48 	if (CHECK_FAIL(map_fd < 0))
49 		goto out;
50 
51 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
52 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
53 
54 		prog = bpf_object__find_program_by_name(obj, prog_name);
55 		if (CHECK_FAIL(!prog))
56 			goto out;
57 
58 		prog_fd = bpf_program__fd(prog);
59 		if (CHECK_FAIL(prog_fd < 0))
60 			goto out;
61 
62 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
63 		if (CHECK_FAIL(err))
64 			goto out;
65 	}
66 
67 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
68 		err = bpf_prog_test_run_opts(main_fd, &topts);
69 		ASSERT_OK(err, "tailcall");
70 		ASSERT_EQ(topts.retval, i, "tailcall retval");
71 
72 		err = bpf_map_delete_elem(map_fd, &i);
73 		if (CHECK_FAIL(err))
74 			goto out;
75 	}
76 
77 	err = bpf_prog_test_run_opts(main_fd, &topts);
78 	ASSERT_OK(err, "tailcall");
79 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
80 
81 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
82 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
83 
84 		prog = bpf_object__find_program_by_name(obj, prog_name);
85 		if (CHECK_FAIL(!prog))
86 			goto out;
87 
88 		prog_fd = bpf_program__fd(prog);
89 		if (CHECK_FAIL(prog_fd < 0))
90 			goto out;
91 
92 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
93 		if (CHECK_FAIL(err))
94 			goto out;
95 	}
96 
97 	err = bpf_prog_test_run_opts(main_fd, &topts);
98 	ASSERT_OK(err, "tailcall");
99 	ASSERT_OK(topts.retval, "tailcall retval");
100 
101 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
102 		j = bpf_map__max_entries(prog_array) - 1 - i;
103 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
104 
105 		prog = bpf_object__find_program_by_name(obj, prog_name);
106 		if (CHECK_FAIL(!prog))
107 			goto out;
108 
109 		prog_fd = bpf_program__fd(prog);
110 		if (CHECK_FAIL(prog_fd < 0))
111 			goto out;
112 
113 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
114 		if (CHECK_FAIL(err))
115 			goto out;
116 	}
117 
118 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
119 		j = bpf_map__max_entries(prog_array) - 1 - i;
120 
121 		err = bpf_prog_test_run_opts(main_fd, &topts);
122 		ASSERT_OK(err, "tailcall");
123 		ASSERT_EQ(topts.retval, j, "tailcall retval");
124 
125 		err = bpf_map_delete_elem(map_fd, &i);
126 		if (CHECK_FAIL(err))
127 			goto out;
128 	}
129 
130 	err = bpf_prog_test_run_opts(main_fd, &topts);
131 	ASSERT_OK(err, "tailcall");
132 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
133 
134 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
135 		err = bpf_map_delete_elem(map_fd, &i);
136 		if (CHECK_FAIL(err >= 0 || errno != ENOENT))
137 			goto out;
138 
139 		err = bpf_prog_test_run_opts(main_fd, &topts);
140 		ASSERT_OK(err, "tailcall");
141 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
142 	}
143 
144 out:
145 	bpf_object__close(obj);
146 }
147 
148 /* test_tailcall_2 checks that patching multiple programs for a single
149  * tail call slot works. It also jumps through several programs and tests
150  * the tail call limit counter.
151  */
test_tailcall_2(void)152 static void test_tailcall_2(void)
153 {
154 	int err, map_fd, prog_fd, main_fd, i;
155 	struct bpf_map *prog_array;
156 	struct bpf_program *prog;
157 	struct bpf_object *obj;
158 	char prog_name[32];
159 	char buff[128] = {};
160 	LIBBPF_OPTS(bpf_test_run_opts, topts,
161 		.data_in = buff,
162 		.data_size_in = sizeof(buff),
163 		.repeat = 1,
164 	);
165 
166 	err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
167 				 &prog_fd);
168 	if (CHECK_FAIL(err))
169 		return;
170 
171 	prog = bpf_object__find_program_by_name(obj, "entry");
172 	if (CHECK_FAIL(!prog))
173 		goto out;
174 
175 	main_fd = bpf_program__fd(prog);
176 	if (CHECK_FAIL(main_fd < 0))
177 		goto out;
178 
179 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
180 	if (CHECK_FAIL(!prog_array))
181 		goto out;
182 
183 	map_fd = bpf_map__fd(prog_array);
184 	if (CHECK_FAIL(map_fd < 0))
185 		goto out;
186 
187 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
188 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
189 
190 		prog = bpf_object__find_program_by_name(obj, prog_name);
191 		if (CHECK_FAIL(!prog))
192 			goto out;
193 
194 		prog_fd = bpf_program__fd(prog);
195 		if (CHECK_FAIL(prog_fd < 0))
196 			goto out;
197 
198 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
199 		if (CHECK_FAIL(err))
200 			goto out;
201 	}
202 
203 	err = bpf_prog_test_run_opts(main_fd, &topts);
204 	ASSERT_OK(err, "tailcall");
205 	ASSERT_EQ(topts.retval, 2, "tailcall retval");
206 
207 	i = 2;
208 	err = bpf_map_delete_elem(map_fd, &i);
209 	if (CHECK_FAIL(err))
210 		goto out;
211 
212 	err = bpf_prog_test_run_opts(main_fd, &topts);
213 	ASSERT_OK(err, "tailcall");
214 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
215 
216 	i = 0;
217 	err = bpf_map_delete_elem(map_fd, &i);
218 	if (CHECK_FAIL(err))
219 		goto out;
220 
221 	err = bpf_prog_test_run_opts(main_fd, &topts);
222 	ASSERT_OK(err, "tailcall");
223 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
224 out:
225 	bpf_object__close(obj);
226 }
227 
test_tailcall_count(const char * which,bool test_fentry,bool test_fexit)228 static void test_tailcall_count(const char *which, bool test_fentry,
229 				bool test_fexit)
230 {
231 	struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
232 	struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
233 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
234 	struct bpf_map *prog_array, *data_map;
235 	struct bpf_program *prog;
236 	char buff[128] = {};
237 	LIBBPF_OPTS(bpf_test_run_opts, topts,
238 		.data_in = buff,
239 		.data_size_in = sizeof(buff),
240 		.repeat = 1,
241 	);
242 
243 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
244 			    &prog_fd);
245 	if (CHECK_FAIL(err))
246 		return;
247 
248 	prog = bpf_object__find_program_by_name(obj, "entry");
249 	if (CHECK_FAIL(!prog))
250 		goto out;
251 
252 	main_fd = bpf_program__fd(prog);
253 	if (CHECK_FAIL(main_fd < 0))
254 		goto out;
255 
256 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
257 	if (CHECK_FAIL(!prog_array))
258 		goto out;
259 
260 	map_fd = bpf_map__fd(prog_array);
261 	if (CHECK_FAIL(map_fd < 0))
262 		goto out;
263 
264 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
265 	if (CHECK_FAIL(!prog))
266 		goto out;
267 
268 	prog_fd = bpf_program__fd(prog);
269 	if (CHECK_FAIL(prog_fd < 0))
270 		goto out;
271 
272 	i = 0;
273 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
274 	if (CHECK_FAIL(err))
275 		goto out;
276 
277 	if (test_fentry) {
278 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
279 						   NULL);
280 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
281 			goto out;
282 
283 		prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
284 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
285 			goto out;
286 
287 		err = bpf_program__set_attach_target(prog, prog_fd,
288 						     "subprog_tail");
289 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
290 			goto out;
291 
292 		err = bpf_object__load(fentry_obj);
293 		if (!ASSERT_OK(err, "load fentry_obj"))
294 			goto out;
295 
296 		fentry_link = bpf_program__attach_trace(prog);
297 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
298 			goto out;
299 	}
300 
301 	if (test_fexit) {
302 		fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
303 						  NULL);
304 		if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
305 			goto out;
306 
307 		prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
308 		if (!ASSERT_OK_PTR(prog, "find fexit prog"))
309 			goto out;
310 
311 		err = bpf_program__set_attach_target(prog, prog_fd,
312 						     "subprog_tail");
313 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
314 			goto out;
315 
316 		err = bpf_object__load(fexit_obj);
317 		if (!ASSERT_OK(err, "load fexit_obj"))
318 			goto out;
319 
320 		fexit_link = bpf_program__attach_trace(prog);
321 		if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
322 			goto out;
323 	}
324 
325 	err = bpf_prog_test_run_opts(main_fd, &topts);
326 	ASSERT_OK(err, "tailcall");
327 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
328 
329 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
330 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
331 		goto out;
332 
333 	data_fd = bpf_map__fd(data_map);
334 	if (CHECK_FAIL(data_fd < 0))
335 		goto out;
336 
337 	i = 0;
338 	err = bpf_map_lookup_elem(data_fd, &i, &val);
339 	ASSERT_OK(err, "tailcall count");
340 	ASSERT_EQ(val, 33, "tailcall count");
341 
342 	if (test_fentry) {
343 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
344 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
345 				  "find tailcall_bpf2bpf_fentry.bss map"))
346 			goto out;
347 
348 		data_fd = bpf_map__fd(data_map);
349 		if (!ASSERT_FALSE(data_fd < 0,
350 				  "find tailcall_bpf2bpf_fentry.bss map fd"))
351 			goto out;
352 
353 		i = 0;
354 		err = bpf_map_lookup_elem(data_fd, &i, &val);
355 		ASSERT_OK(err, "fentry count");
356 		ASSERT_EQ(val, 33, "fentry count");
357 	}
358 
359 	if (test_fexit) {
360 		data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
361 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
362 				  "find tailcall_bpf2bpf_fexit.bss map"))
363 			goto out;
364 
365 		data_fd = bpf_map__fd(data_map);
366 		if (!ASSERT_FALSE(data_fd < 0,
367 				  "find tailcall_bpf2bpf_fexit.bss map fd"))
368 			goto out;
369 
370 		i = 0;
371 		err = bpf_map_lookup_elem(data_fd, &i, &val);
372 		ASSERT_OK(err, "fexit count");
373 		ASSERT_EQ(val, 33, "fexit count");
374 	}
375 
376 	i = 0;
377 	err = bpf_map_delete_elem(map_fd, &i);
378 	if (CHECK_FAIL(err))
379 		goto out;
380 
381 	err = bpf_prog_test_run_opts(main_fd, &topts);
382 	ASSERT_OK(err, "tailcall");
383 	ASSERT_OK(topts.retval, "tailcall retval");
384 out:
385 	bpf_link__destroy(fentry_link);
386 	bpf_link__destroy(fexit_link);
387 	bpf_object__close(fentry_obj);
388 	bpf_object__close(fexit_obj);
389 	bpf_object__close(obj);
390 }
391 
392 /* test_tailcall_3 checks that the count value of the tail call limit
393  * enforcement matches with expectations. JIT uses direct jump.
394  */
test_tailcall_3(void)395 static void test_tailcall_3(void)
396 {
397 	test_tailcall_count("tailcall3.bpf.o", false, false);
398 }
399 
400 /* test_tailcall_6 checks that the count value of the tail call limit
401  * enforcement matches with expectations. JIT uses indirect jump.
402  */
test_tailcall_6(void)403 static void test_tailcall_6(void)
404 {
405 	test_tailcall_count("tailcall6.bpf.o", false, false);
406 }
407 
408 /* test_tailcall_4 checks that the kernel properly selects indirect jump
409  * for the case where the key is not known. Latter is passed via global
410  * data to select different targets we can compare return value of.
411  */
test_tailcall_4(void)412 static void test_tailcall_4(void)
413 {
414 	int err, map_fd, prog_fd, main_fd, data_fd, i;
415 	struct bpf_map *prog_array, *data_map;
416 	struct bpf_program *prog;
417 	struct bpf_object *obj;
418 	static const int zero = 0;
419 	char buff[128] = {};
420 	char prog_name[32];
421 	LIBBPF_OPTS(bpf_test_run_opts, topts,
422 		.data_in = buff,
423 		.data_size_in = sizeof(buff),
424 		.repeat = 1,
425 	);
426 
427 	err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
428 				 &prog_fd);
429 	if (CHECK_FAIL(err))
430 		return;
431 
432 	prog = bpf_object__find_program_by_name(obj, "entry");
433 	if (CHECK_FAIL(!prog))
434 		goto out;
435 
436 	main_fd = bpf_program__fd(prog);
437 	if (CHECK_FAIL(main_fd < 0))
438 		goto out;
439 
440 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
441 	if (CHECK_FAIL(!prog_array))
442 		goto out;
443 
444 	map_fd = bpf_map__fd(prog_array);
445 	if (CHECK_FAIL(map_fd < 0))
446 		goto out;
447 
448 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
449 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
450 		goto out;
451 
452 	data_fd = bpf_map__fd(data_map);
453 	if (CHECK_FAIL(data_fd < 0))
454 		goto out;
455 
456 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
457 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
458 
459 		prog = bpf_object__find_program_by_name(obj, prog_name);
460 		if (CHECK_FAIL(!prog))
461 			goto out;
462 
463 		prog_fd = bpf_program__fd(prog);
464 		if (CHECK_FAIL(prog_fd < 0))
465 			goto out;
466 
467 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
468 		if (CHECK_FAIL(err))
469 			goto out;
470 	}
471 
472 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
473 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
474 		if (CHECK_FAIL(err))
475 			goto out;
476 
477 		err = bpf_prog_test_run_opts(main_fd, &topts);
478 		ASSERT_OK(err, "tailcall");
479 		ASSERT_EQ(topts.retval, i, "tailcall retval");
480 	}
481 
482 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
483 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
484 		if (CHECK_FAIL(err))
485 			goto out;
486 
487 		err = bpf_map_delete_elem(map_fd, &i);
488 		if (CHECK_FAIL(err))
489 			goto out;
490 
491 		err = bpf_prog_test_run_opts(main_fd, &topts);
492 		ASSERT_OK(err, "tailcall");
493 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
494 	}
495 out:
496 	bpf_object__close(obj);
497 }
498 
499 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
500  * an indirect jump when the keys are const but different from different branches.
501  */
test_tailcall_5(void)502 static void test_tailcall_5(void)
503 {
504 	int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
505 	struct bpf_map *prog_array, *data_map;
506 	struct bpf_program *prog;
507 	struct bpf_object *obj;
508 	static const int zero = 0;
509 	char buff[128] = {};
510 	char prog_name[32];
511 	LIBBPF_OPTS(bpf_test_run_opts, topts,
512 		.data_in = buff,
513 		.data_size_in = sizeof(buff),
514 		.repeat = 1,
515 	);
516 
517 	err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
518 				 &prog_fd);
519 	if (CHECK_FAIL(err))
520 		return;
521 
522 	prog = bpf_object__find_program_by_name(obj, "entry");
523 	if (CHECK_FAIL(!prog))
524 		goto out;
525 
526 	main_fd = bpf_program__fd(prog);
527 	if (CHECK_FAIL(main_fd < 0))
528 		goto out;
529 
530 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
531 	if (CHECK_FAIL(!prog_array))
532 		goto out;
533 
534 	map_fd = bpf_map__fd(prog_array);
535 	if (CHECK_FAIL(map_fd < 0))
536 		goto out;
537 
538 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
539 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
540 		goto out;
541 
542 	data_fd = bpf_map__fd(data_map);
543 	if (CHECK_FAIL(data_fd < 0))
544 		goto out;
545 
546 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
547 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
548 
549 		prog = bpf_object__find_program_by_name(obj, prog_name);
550 		if (CHECK_FAIL(!prog))
551 			goto out;
552 
553 		prog_fd = bpf_program__fd(prog);
554 		if (CHECK_FAIL(prog_fd < 0))
555 			goto out;
556 
557 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
558 		if (CHECK_FAIL(err))
559 			goto out;
560 	}
561 
562 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
563 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
564 		if (CHECK_FAIL(err))
565 			goto out;
566 
567 		err = bpf_prog_test_run_opts(main_fd, &topts);
568 		ASSERT_OK(err, "tailcall");
569 		ASSERT_EQ(topts.retval, i, "tailcall retval");
570 	}
571 
572 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
573 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
574 		if (CHECK_FAIL(err))
575 			goto out;
576 
577 		err = bpf_map_delete_elem(map_fd, &i);
578 		if (CHECK_FAIL(err))
579 			goto out;
580 
581 		err = bpf_prog_test_run_opts(main_fd, &topts);
582 		ASSERT_OK(err, "tailcall");
583 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
584 	}
585 out:
586 	bpf_object__close(obj);
587 }
588 
589 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
590  * correctly in correlation with BPF subprograms
591  */
test_tailcall_bpf2bpf_1(void)592 static void test_tailcall_bpf2bpf_1(void)
593 {
594 	int err, map_fd, prog_fd, main_fd, i;
595 	struct bpf_map *prog_array;
596 	struct bpf_program *prog;
597 	struct bpf_object *obj;
598 	char prog_name[32];
599 	LIBBPF_OPTS(bpf_test_run_opts, topts,
600 		.data_in = &pkt_v4,
601 		.data_size_in = sizeof(pkt_v4),
602 		.repeat = 1,
603 	);
604 
605 	err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
606 				 &obj, &prog_fd);
607 	if (CHECK_FAIL(err))
608 		return;
609 
610 	prog = bpf_object__find_program_by_name(obj, "entry");
611 	if (CHECK_FAIL(!prog))
612 		goto out;
613 
614 	main_fd = bpf_program__fd(prog);
615 	if (CHECK_FAIL(main_fd < 0))
616 		goto out;
617 
618 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
619 	if (CHECK_FAIL(!prog_array))
620 		goto out;
621 
622 	map_fd = bpf_map__fd(prog_array);
623 	if (CHECK_FAIL(map_fd < 0))
624 		goto out;
625 
626 	/* nop -> jmp */
627 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
628 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
629 
630 		prog = bpf_object__find_program_by_name(obj, prog_name);
631 		if (CHECK_FAIL(!prog))
632 			goto out;
633 
634 		prog_fd = bpf_program__fd(prog);
635 		if (CHECK_FAIL(prog_fd < 0))
636 			goto out;
637 
638 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
639 		if (CHECK_FAIL(err))
640 			goto out;
641 	}
642 
643 	err = bpf_prog_test_run_opts(main_fd, &topts);
644 	ASSERT_OK(err, "tailcall");
645 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
646 
647 	/* jmp -> nop, call subprog that will do tailcall */
648 	i = 1;
649 	err = bpf_map_delete_elem(map_fd, &i);
650 	if (CHECK_FAIL(err))
651 		goto out;
652 
653 	err = bpf_prog_test_run_opts(main_fd, &topts);
654 	ASSERT_OK(err, "tailcall");
655 	ASSERT_OK(topts.retval, "tailcall retval");
656 
657 	/* make sure that subprog can access ctx and entry prog that
658 	 * called this subprog can properly return
659 	 */
660 	i = 0;
661 	err = bpf_map_delete_elem(map_fd, &i);
662 	if (CHECK_FAIL(err))
663 		goto out;
664 
665 	err = bpf_prog_test_run_opts(main_fd, &topts);
666 	ASSERT_OK(err, "tailcall");
667 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
668 out:
669 	bpf_object__close(obj);
670 }
671 
672 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
673  * enforcement matches with expectations when tailcall is preceded with
674  * bpf2bpf call.
675  */
test_tailcall_bpf2bpf_2(void)676 static void test_tailcall_bpf2bpf_2(void)
677 {
678 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
679 	struct bpf_map *prog_array, *data_map;
680 	struct bpf_program *prog;
681 	struct bpf_object *obj;
682 	char buff[128] = {};
683 	LIBBPF_OPTS(bpf_test_run_opts, topts,
684 		.data_in = buff,
685 		.data_size_in = sizeof(buff),
686 		.repeat = 1,
687 	);
688 
689 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
690 				 &obj, &prog_fd);
691 	if (CHECK_FAIL(err))
692 		return;
693 
694 	prog = bpf_object__find_program_by_name(obj, "entry");
695 	if (CHECK_FAIL(!prog))
696 		goto out;
697 
698 	main_fd = bpf_program__fd(prog);
699 	if (CHECK_FAIL(main_fd < 0))
700 		goto out;
701 
702 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
703 	if (CHECK_FAIL(!prog_array))
704 		goto out;
705 
706 	map_fd = bpf_map__fd(prog_array);
707 	if (CHECK_FAIL(map_fd < 0))
708 		goto out;
709 
710 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
711 	if (CHECK_FAIL(!prog))
712 		goto out;
713 
714 	prog_fd = bpf_program__fd(prog);
715 	if (CHECK_FAIL(prog_fd < 0))
716 		goto out;
717 
718 	i = 0;
719 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
720 	if (CHECK_FAIL(err))
721 		goto out;
722 
723 	err = bpf_prog_test_run_opts(main_fd, &topts);
724 	ASSERT_OK(err, "tailcall");
725 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
726 
727 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
728 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
729 		goto out;
730 
731 	data_fd = bpf_map__fd(data_map);
732 	if (CHECK_FAIL(data_fd < 0))
733 		goto out;
734 
735 	i = 0;
736 	err = bpf_map_lookup_elem(data_fd, &i, &val);
737 	ASSERT_OK(err, "tailcall count");
738 	ASSERT_EQ(val, 33, "tailcall count");
739 
740 	i = 0;
741 	err = bpf_map_delete_elem(map_fd, &i);
742 	if (CHECK_FAIL(err))
743 		goto out;
744 
745 	err = bpf_prog_test_run_opts(main_fd, &topts);
746 	ASSERT_OK(err, "tailcall");
747 	ASSERT_OK(topts.retval, "tailcall retval");
748 out:
749 	bpf_object__close(obj);
750 }
751 
752 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
753  * 256 bytes) can be used within bpf subprograms that have the tailcalls
754  * in them
755  */
test_tailcall_bpf2bpf_3(void)756 static void test_tailcall_bpf2bpf_3(void)
757 {
758 	int err, map_fd, prog_fd, main_fd, i;
759 	struct bpf_map *prog_array;
760 	struct bpf_program *prog;
761 	struct bpf_object *obj;
762 	char prog_name[32];
763 	LIBBPF_OPTS(bpf_test_run_opts, topts,
764 		.data_in = &pkt_v4,
765 		.data_size_in = sizeof(pkt_v4),
766 		.repeat = 1,
767 	);
768 
769 	err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
770 				 &obj, &prog_fd);
771 	if (CHECK_FAIL(err))
772 		return;
773 
774 	prog = bpf_object__find_program_by_name(obj, "entry");
775 	if (CHECK_FAIL(!prog))
776 		goto out;
777 
778 	main_fd = bpf_program__fd(prog);
779 	if (CHECK_FAIL(main_fd < 0))
780 		goto out;
781 
782 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
783 	if (CHECK_FAIL(!prog_array))
784 		goto out;
785 
786 	map_fd = bpf_map__fd(prog_array);
787 	if (CHECK_FAIL(map_fd < 0))
788 		goto out;
789 
790 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
791 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
792 
793 		prog = bpf_object__find_program_by_name(obj, prog_name);
794 		if (CHECK_FAIL(!prog))
795 			goto out;
796 
797 		prog_fd = bpf_program__fd(prog);
798 		if (CHECK_FAIL(prog_fd < 0))
799 			goto out;
800 
801 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
802 		if (CHECK_FAIL(err))
803 			goto out;
804 	}
805 
806 	err = bpf_prog_test_run_opts(main_fd, &topts);
807 	ASSERT_OK(err, "tailcall");
808 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
809 
810 	i = 1;
811 	err = bpf_map_delete_elem(map_fd, &i);
812 	if (CHECK_FAIL(err))
813 		goto out;
814 
815 	err = bpf_prog_test_run_opts(main_fd, &topts);
816 	ASSERT_OK(err, "tailcall");
817 	ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
818 
819 	i = 0;
820 	err = bpf_map_delete_elem(map_fd, &i);
821 	if (CHECK_FAIL(err))
822 		goto out;
823 
824 	err = bpf_prog_test_run_opts(main_fd, &topts);
825 	ASSERT_OK(err, "tailcall");
826 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
827 out:
828 	bpf_object__close(obj);
829 }
830 
831 #include "tailcall_bpf2bpf4.skel.h"
832 
833 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
834  * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
835  * counter behaves correctly, bpf program will go through following flow:
836  *
837  * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
838  * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
839  * subprog2 [here bump global counter] --------^
840  *
841  * We go through first two tailcalls and start counting from the subprog2 where
842  * the loop begins. At the end of the test make sure that the global counter is
843  * equal to 31, because tailcall counter includes the first two tailcalls
844  * whereas global counter is incremented only on loop presented on flow above.
845  *
846  * The noise parameter is used to insert bpf_map_update calls into the logic
847  * to force verifier to patch instructions. This allows us to ensure jump
848  * logic remains correct with instruction movement.
849  */
test_tailcall_bpf2bpf_4(bool noise)850 static void test_tailcall_bpf2bpf_4(bool noise)
851 {
852 	int err, map_fd, prog_fd, main_fd, data_fd, i;
853 	struct tailcall_bpf2bpf4__bss val;
854 	struct bpf_map *prog_array, *data_map;
855 	struct bpf_program *prog;
856 	struct bpf_object *obj;
857 	char prog_name[32];
858 	LIBBPF_OPTS(bpf_test_run_opts, topts,
859 		.data_in = &pkt_v4,
860 		.data_size_in = sizeof(pkt_v4),
861 		.repeat = 1,
862 	);
863 
864 	err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
865 				 &obj, &prog_fd);
866 	if (CHECK_FAIL(err))
867 		return;
868 
869 	prog = bpf_object__find_program_by_name(obj, "entry");
870 	if (CHECK_FAIL(!prog))
871 		goto out;
872 
873 	main_fd = bpf_program__fd(prog);
874 	if (CHECK_FAIL(main_fd < 0))
875 		goto out;
876 
877 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
878 	if (CHECK_FAIL(!prog_array))
879 		goto out;
880 
881 	map_fd = bpf_map__fd(prog_array);
882 	if (CHECK_FAIL(map_fd < 0))
883 		goto out;
884 
885 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
886 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
887 
888 		prog = bpf_object__find_program_by_name(obj, prog_name);
889 		if (CHECK_FAIL(!prog))
890 			goto out;
891 
892 		prog_fd = bpf_program__fd(prog);
893 		if (CHECK_FAIL(prog_fd < 0))
894 			goto out;
895 
896 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
897 		if (CHECK_FAIL(err))
898 			goto out;
899 	}
900 
901 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
902 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
903 		goto out;
904 
905 	data_fd = bpf_map__fd(data_map);
906 	if (CHECK_FAIL(data_fd < 0))
907 		goto out;
908 
909 	i = 0;
910 	val.noise = noise;
911 	val.count = 0;
912 	err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
913 	if (CHECK_FAIL(err))
914 		goto out;
915 
916 	err = bpf_prog_test_run_opts(main_fd, &topts);
917 	ASSERT_OK(err, "tailcall");
918 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
919 
920 	i = 0;
921 	err = bpf_map_lookup_elem(data_fd, &i, &val);
922 	ASSERT_OK(err, "tailcall count");
923 	ASSERT_EQ(val.count, 31, "tailcall count");
924 
925 out:
926 	bpf_object__close(obj);
927 }
928 
929 #include "tailcall_bpf2bpf6.skel.h"
930 
931 /* Tail call counting works even when there is data on stack which is
932  * not aligned to 8 bytes.
933  */
test_tailcall_bpf2bpf_6(void)934 static void test_tailcall_bpf2bpf_6(void)
935 {
936 	struct tailcall_bpf2bpf6 *obj;
937 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
938 	LIBBPF_OPTS(bpf_test_run_opts, topts,
939 		.data_in = &pkt_v4,
940 		.data_size_in = sizeof(pkt_v4),
941 		.repeat = 1,
942 	);
943 
944 	obj = tailcall_bpf2bpf6__open_and_load();
945 	if (!ASSERT_OK_PTR(obj, "open and load"))
946 		return;
947 
948 	main_fd = bpf_program__fd(obj->progs.entry);
949 	if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
950 		goto out;
951 
952 	map_fd = bpf_map__fd(obj->maps.jmp_table);
953 	if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
954 		goto out;
955 
956 	prog_fd = bpf_program__fd(obj->progs.classifier_0);
957 	if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
958 		goto out;
959 
960 	i = 0;
961 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
962 	if (!ASSERT_OK(err, "jmp_table map update"))
963 		goto out;
964 
965 	err = bpf_prog_test_run_opts(main_fd, &topts);
966 	ASSERT_OK(err, "entry prog test run");
967 	ASSERT_EQ(topts.retval, 0, "tailcall retval");
968 
969 	data_fd = bpf_map__fd(obj->maps.bss);
970 	if (!ASSERT_GE(data_fd, 0, "bss map fd"))
971 		goto out;
972 
973 	i = 0;
974 	err = bpf_map_lookup_elem(data_fd, &i, &val);
975 	ASSERT_OK(err, "bss map lookup");
976 	ASSERT_EQ(val, 1, "done flag is set");
977 
978 out:
979 	tailcall_bpf2bpf6__destroy(obj);
980 }
981 
982 /* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
983  * limit enforcement matches with expectations when tailcall is preceded with
984  * bpf2bpf call, and the bpf2bpf call is traced by fentry.
985  */
test_tailcall_bpf2bpf_fentry(void)986 static void test_tailcall_bpf2bpf_fentry(void)
987 {
988 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
989 }
990 
991 /* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
992  * limit enforcement matches with expectations when tailcall is preceded with
993  * bpf2bpf call, and the bpf2bpf call is traced by fexit.
994  */
test_tailcall_bpf2bpf_fexit(void)995 static void test_tailcall_bpf2bpf_fexit(void)
996 {
997 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
998 }
999 
1000 /* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
1001  * call limit enforcement matches with expectations when tailcall is preceded
1002  * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
1003  */
test_tailcall_bpf2bpf_fentry_fexit(void)1004 static void test_tailcall_bpf2bpf_fentry_fexit(void)
1005 {
1006 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
1007 }
1008 
1009 /* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
1010  * call limit enforcement matches with expectations when tailcall is preceded
1011  * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
1012  */
test_tailcall_bpf2bpf_fentry_entry(void)1013 static void test_tailcall_bpf2bpf_fentry_entry(void)
1014 {
1015 	struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
1016 	int err, map_fd, prog_fd, data_fd, i, val;
1017 	struct bpf_map *prog_array, *data_map;
1018 	struct bpf_link *fentry_link = NULL;
1019 	struct bpf_program *prog;
1020 	char buff[128] = {};
1021 
1022 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1023 		.data_in = buff,
1024 		.data_size_in = sizeof(buff),
1025 		.repeat = 1,
1026 	);
1027 
1028 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
1029 				 BPF_PROG_TYPE_SCHED_CLS,
1030 				 &tgt_obj, &prog_fd);
1031 	if (!ASSERT_OK(err, "load tgt_obj"))
1032 		return;
1033 
1034 	prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
1035 	if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
1036 		goto out;
1037 
1038 	map_fd = bpf_map__fd(prog_array);
1039 	if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
1040 		goto out;
1041 
1042 	prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
1043 	if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
1044 		goto out;
1045 
1046 	prog_fd = bpf_program__fd(prog);
1047 	if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
1048 		goto out;
1049 
1050 	i = 0;
1051 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1052 	if (!ASSERT_OK(err, "update jmp_table"))
1053 		goto out;
1054 
1055 	fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1056 					   NULL);
1057 	if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1058 		goto out;
1059 
1060 	prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1061 	if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1062 		goto out;
1063 
1064 	err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
1065 	if (!ASSERT_OK(err, "set_attach_target classifier_0"))
1066 		goto out;
1067 
1068 	err = bpf_object__load(fentry_obj);
1069 	if (!ASSERT_OK(err, "load fentry_obj"))
1070 		goto out;
1071 
1072 	fentry_link = bpf_program__attach_trace(prog);
1073 	if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1074 		goto out;
1075 
1076 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1077 	ASSERT_OK(err, "tailcall");
1078 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1079 
1080 	data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
1081 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1082 			  "find tailcall.bss map"))
1083 		goto out;
1084 
1085 	data_fd = bpf_map__fd(data_map);
1086 	if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
1087 		goto out;
1088 
1089 	i = 0;
1090 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1091 	ASSERT_OK(err, "tailcall count");
1092 	ASSERT_EQ(val, 34, "tailcall count");
1093 
1094 	data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1095 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1096 			  "find tailcall_bpf2bpf_fentry.bss map"))
1097 		goto out;
1098 
1099 	data_fd = bpf_map__fd(data_map);
1100 	if (!ASSERT_FALSE(data_fd < 0,
1101 			  "find tailcall_bpf2bpf_fentry.bss map fd"))
1102 		goto out;
1103 
1104 	i = 0;
1105 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1106 	ASSERT_OK(err, "fentry count");
1107 	ASSERT_EQ(val, 1, "fentry count");
1108 
1109 out:
1110 	bpf_link__destroy(fentry_link);
1111 	bpf_object__close(fentry_obj);
1112 	bpf_object__close(tgt_obj);
1113 }
1114 
1115 #define JMP_TABLE "/sys/fs/bpf/jmp_table"
1116 
1117 static int poke_thread_exit;
1118 
poke_update(void * arg)1119 static void *poke_update(void *arg)
1120 {
1121 	__u32 zero = 0, prog1_fd, prog2_fd, map_fd;
1122 	struct tailcall_poke *call = arg;
1123 
1124 	map_fd = bpf_map__fd(call->maps.jmp_table);
1125 	prog1_fd = bpf_program__fd(call->progs.call1);
1126 	prog2_fd = bpf_program__fd(call->progs.call2);
1127 
1128 	while (!poke_thread_exit) {
1129 		bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
1130 		bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
1131 	}
1132 
1133 	return NULL;
1134 }
1135 
1136 /*
1137  * We are trying to hit prog array update during another program load
1138  * that shares the same prog array map.
1139  *
1140  * For that we share the jmp_table map between two skeleton instances
1141  * by pinning the jmp_table to same path. Then first skeleton instance
1142  * periodically updates jmp_table in 'poke update' thread while we load
1143  * the second skeleton instance in the main thread.
1144  */
test_tailcall_poke(void)1145 static void test_tailcall_poke(void)
1146 {
1147 	struct tailcall_poke *call, *test;
1148 	int err, cnt = 10;
1149 	pthread_t thread;
1150 
1151 	unlink(JMP_TABLE);
1152 
1153 	call = tailcall_poke__open_and_load();
1154 	if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
1155 		return;
1156 
1157 	err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
1158 	if (!ASSERT_OK(err, "bpf_map__pin"))
1159 		goto out;
1160 
1161 	err = pthread_create(&thread, NULL, poke_update, call);
1162 	if (!ASSERT_OK(err, "new toggler"))
1163 		goto out;
1164 
1165 	while (cnt--) {
1166 		test = tailcall_poke__open();
1167 		if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
1168 			break;
1169 
1170 		err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
1171 		if (!ASSERT_OK(err, "bpf_map__pin")) {
1172 			tailcall_poke__destroy(test);
1173 			break;
1174 		}
1175 
1176 		bpf_program__set_autoload(test->progs.test, true);
1177 		bpf_program__set_autoload(test->progs.call1, false);
1178 		bpf_program__set_autoload(test->progs.call2, false);
1179 
1180 		err = tailcall_poke__load(test);
1181 		tailcall_poke__destroy(test);
1182 		if (!ASSERT_OK(err, "tailcall_poke__load"))
1183 			break;
1184 	}
1185 
1186 	poke_thread_exit = 1;
1187 	ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
1188 
1189 out:
1190 	bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
1191 	tailcall_poke__destroy(call);
1192 }
1193 
test_tailcall_hierarchy_count(const char * which,bool test_fentry,bool test_fexit,bool test_fentry_entry)1194 static void test_tailcall_hierarchy_count(const char *which, bool test_fentry,
1195 					  bool test_fexit,
1196 					  bool test_fentry_entry)
1197 {
1198 	int err, map_fd, prog_fd, main_data_fd, fentry_data_fd, fexit_data_fd, i, val;
1199 	struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
1200 	struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
1201 	struct bpf_program *prog, *fentry_prog;
1202 	struct bpf_map *prog_array, *data_map;
1203 	int fentry_prog_fd;
1204 	char buff[128] = {};
1205 
1206 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1207 		.data_in = buff,
1208 		.data_size_in = sizeof(buff),
1209 		.repeat = 1,
1210 	);
1211 
1212 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
1213 				 &prog_fd);
1214 	if (!ASSERT_OK(err, "load obj"))
1215 		return;
1216 
1217 	prog = bpf_object__find_program_by_name(obj, "entry");
1218 	if (!ASSERT_OK_PTR(prog, "find entry prog"))
1219 		goto out;
1220 
1221 	prog_fd = bpf_program__fd(prog);
1222 	if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
1223 		goto out;
1224 
1225 	if (test_fentry_entry) {
1226 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_hierarchy_fentry.bpf.o",
1227 						   NULL);
1228 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1229 			goto out;
1230 
1231 		fentry_prog = bpf_object__find_program_by_name(fentry_obj,
1232 							       "fentry");
1233 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1234 			goto out;
1235 
1236 		err = bpf_program__set_attach_target(fentry_prog, prog_fd,
1237 						     "entry");
1238 		if (!ASSERT_OK(err, "set_attach_target entry"))
1239 			goto out;
1240 
1241 		err = bpf_object__load(fentry_obj);
1242 		if (!ASSERT_OK(err, "load fentry_obj"))
1243 			goto out;
1244 
1245 		fentry_link = bpf_program__attach_trace(fentry_prog);
1246 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1247 			goto out;
1248 
1249 		fentry_prog_fd = bpf_program__fd(fentry_prog);
1250 		if (!ASSERT_GE(fentry_prog_fd, 0, "fentry_prog_fd"))
1251 			goto out;
1252 
1253 		prog_array = bpf_object__find_map_by_name(fentry_obj, "jmp_table");
1254 		if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1255 			goto out;
1256 
1257 		map_fd = bpf_map__fd(prog_array);
1258 		if (!ASSERT_GE(map_fd, 0, "map_fd"))
1259 			goto out;
1260 
1261 		i = 0;
1262 		err = bpf_map_update_elem(map_fd, &i, &fentry_prog_fd, BPF_ANY);
1263 		if (!ASSERT_OK(err, "update jmp_table"))
1264 			goto out;
1265 
1266 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1267 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1268 				  "find data_map"))
1269 			goto out;
1270 
1271 	} else {
1272 		prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
1273 		if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1274 			goto out;
1275 
1276 		map_fd = bpf_map__fd(prog_array);
1277 		if (!ASSERT_GE(map_fd, 0, "map_fd"))
1278 			goto out;
1279 
1280 		i = 0;
1281 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1282 		if (!ASSERT_OK(err, "update jmp_table"))
1283 			goto out;
1284 
1285 		data_map = bpf_object__find_map_by_name(obj, ".bss");
1286 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1287 				  "find data_map"))
1288 			goto out;
1289 	}
1290 
1291 	if (test_fentry) {
1292 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1293 						   NULL);
1294 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1295 			goto out;
1296 
1297 		prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1298 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1299 			goto out;
1300 
1301 		err = bpf_program__set_attach_target(prog, prog_fd,
1302 						     "subprog_tail");
1303 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1304 			goto out;
1305 
1306 		err = bpf_object__load(fentry_obj);
1307 		if (!ASSERT_OK(err, "load fentry_obj"))
1308 			goto out;
1309 
1310 		fentry_link = bpf_program__attach_trace(prog);
1311 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1312 			goto out;
1313 	}
1314 
1315 	if (test_fexit) {
1316 		fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
1317 						  NULL);
1318 		if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
1319 			goto out;
1320 
1321 		prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
1322 		if (!ASSERT_OK_PTR(prog, "find fexit prog"))
1323 			goto out;
1324 
1325 		err = bpf_program__set_attach_target(prog, prog_fd,
1326 						     "subprog_tail");
1327 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1328 			goto out;
1329 
1330 		err = bpf_object__load(fexit_obj);
1331 		if (!ASSERT_OK(err, "load fexit_obj"))
1332 			goto out;
1333 
1334 		fexit_link = bpf_program__attach_trace(prog);
1335 		if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
1336 			goto out;
1337 	}
1338 
1339 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1340 	ASSERT_OK(err, "tailcall");
1341 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1342 
1343 	main_data_fd = bpf_map__fd(data_map);
1344 	if (!ASSERT_GE(main_data_fd, 0, "main_data_fd"))
1345 		goto out;
1346 
1347 	i = 0;
1348 	err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1349 	ASSERT_OK(err, "tailcall count");
1350 	ASSERT_EQ(val, 34, "tailcall count");
1351 
1352 	if (test_fentry) {
1353 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1354 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1355 				  "find tailcall_bpf2bpf_fentry.bss map"))
1356 			goto out;
1357 
1358 		fentry_data_fd = bpf_map__fd(data_map);
1359 		if (!ASSERT_GE(fentry_data_fd, 0,
1360 				  "find tailcall_bpf2bpf_fentry.bss map fd"))
1361 			goto out;
1362 
1363 		i = 0;
1364 		err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1365 		ASSERT_OK(err, "fentry count");
1366 		ASSERT_EQ(val, 68, "fentry count");
1367 	}
1368 
1369 	if (test_fexit) {
1370 		data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
1371 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1372 				  "find tailcall_bpf2bpf_fexit.bss map"))
1373 			goto out;
1374 
1375 		fexit_data_fd = bpf_map__fd(data_map);
1376 		if (!ASSERT_GE(fexit_data_fd, 0,
1377 				  "find tailcall_bpf2bpf_fexit.bss map fd"))
1378 			goto out;
1379 
1380 		i = 0;
1381 		err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1382 		ASSERT_OK(err, "fexit count");
1383 		ASSERT_EQ(val, 68, "fexit count");
1384 	}
1385 
1386 	i = 0;
1387 	err = bpf_map_delete_elem(map_fd, &i);
1388 	if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1389 		goto out;
1390 
1391 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1392 	ASSERT_OK(err, "tailcall");
1393 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1394 
1395 	i = 0;
1396 	err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1397 	ASSERT_OK(err, "tailcall count");
1398 	ASSERT_EQ(val, 35, "tailcall count");
1399 
1400 	if (test_fentry) {
1401 		i = 0;
1402 		err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1403 		ASSERT_OK(err, "fentry count");
1404 		ASSERT_EQ(val, 70, "fentry count");
1405 	}
1406 
1407 	if (test_fexit) {
1408 		i = 0;
1409 		err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1410 		ASSERT_OK(err, "fexit count");
1411 		ASSERT_EQ(val, 70, "fexit count");
1412 	}
1413 
1414 out:
1415 	bpf_link__destroy(fentry_link);
1416 	bpf_link__destroy(fexit_link);
1417 	bpf_object__close(fentry_obj);
1418 	bpf_object__close(fexit_obj);
1419 	bpf_object__close(obj);
1420 }
1421 
1422 /* test_tailcall_bpf2bpf_hierarchy_1 checks that the count value of the tail
1423  * call limit enforcement matches with expectations when tailcalls are preceded
1424  * with two bpf2bpf calls.
1425  *
1426  *         subprog --tailcall-> entry
1427  * entry <
1428  *         subprog --tailcall-> entry
1429  */
test_tailcall_bpf2bpf_hierarchy_1(void)1430 static void test_tailcall_bpf2bpf_hierarchy_1(void)
1431 {
1432 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1433 				      false, false, false);
1434 }
1435 
1436 /* test_tailcall_bpf2bpf_hierarchy_fentry checks that the count value of the
1437  * tail call limit enforcement matches with expectations when tailcalls are
1438  * preceded with two bpf2bpf calls, and the two subprogs are traced by fentry.
1439  */
test_tailcall_bpf2bpf_hierarchy_fentry(void)1440 static void test_tailcall_bpf2bpf_hierarchy_fentry(void)
1441 {
1442 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1443 				      true, false, false);
1444 }
1445 
1446 /* test_tailcall_bpf2bpf_hierarchy_fexit checks that the count value of the tail
1447  * call limit enforcement matches with expectations when tailcalls are preceded
1448  * with two bpf2bpf calls, and the two subprogs are traced by fexit.
1449  */
test_tailcall_bpf2bpf_hierarchy_fexit(void)1450 static void test_tailcall_bpf2bpf_hierarchy_fexit(void)
1451 {
1452 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1453 				      false, true, false);
1454 }
1455 
1456 /* test_tailcall_bpf2bpf_hierarchy_fentry_fexit checks that the count value of
1457  * the tail call limit enforcement matches with expectations when tailcalls are
1458  * preceded with two bpf2bpf calls, and the two subprogs are traced by both
1459  * fentry and fexit.
1460  */
test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void)1461 static void test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void)
1462 {
1463 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1464 				      true, true, false);
1465 }
1466 
1467 /* test_tailcall_bpf2bpf_hierarchy_fentry_entry checks that the count value of
1468  * the tail call limit enforcement matches with expectations when tailcalls are
1469  * preceded with two bpf2bpf calls in fentry.
1470  */
test_tailcall_bpf2bpf_hierarchy_fentry_entry(void)1471 static void test_tailcall_bpf2bpf_hierarchy_fentry_entry(void)
1472 {
1473 	test_tailcall_hierarchy_count("tc_dummy.bpf.o", false, false, true);
1474 }
1475 
1476 /* test_tailcall_bpf2bpf_hierarchy_2 checks that the count value of the tail
1477  * call limit enforcement matches with expectations:
1478  *
1479  *         subprog_tail0 --tailcall-> classifier_0 -> subprog_tail0
1480  * entry <
1481  *         subprog_tail1 --tailcall-> classifier_1 -> subprog_tail1
1482  */
test_tailcall_bpf2bpf_hierarchy_2(void)1483 static void test_tailcall_bpf2bpf_hierarchy_2(void)
1484 {
1485 	RUN_TESTS(tailcall_bpf2bpf_hierarchy2);
1486 }
1487 
1488 /* test_tailcall_bpf2bpf_hierarchy_3 checks that the count value of the tail
1489  * call limit enforcement matches with expectations:
1490  *
1491  *                                   subprog with jmp_table0 to classifier_0
1492  * entry --tailcall-> classifier_0 <
1493  *                                   subprog with jmp_table1 to classifier_0
1494  */
test_tailcall_bpf2bpf_hierarchy_3(void)1495 static void test_tailcall_bpf2bpf_hierarchy_3(void)
1496 {
1497 	RUN_TESTS(tailcall_bpf2bpf_hierarchy3);
1498 }
1499 
1500 /* test_tailcall_freplace checks that the freplace prog fails to update the
1501  * prog_array map, no matter whether the freplace prog attaches to its target.
1502  */
test_tailcall_freplace(void)1503 static void test_tailcall_freplace(void)
1504 {
1505 	struct tailcall_freplace *freplace_skel = NULL;
1506 	struct bpf_link *freplace_link = NULL;
1507 	struct bpf_program *freplace_prog;
1508 	struct tc_bpf2bpf *tc_skel = NULL;
1509 	int prog_fd, tc_prog_fd, map_fd;
1510 	char buff[128] = {};
1511 	int err, key;
1512 
1513 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1514 		    .data_in = buff,
1515 		    .data_size_in = sizeof(buff),
1516 		    .repeat = 1,
1517 	);
1518 
1519 	freplace_skel = tailcall_freplace__open();
1520 	if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
1521 		return;
1522 
1523 	tc_skel = tc_bpf2bpf__open_and_load();
1524 	if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
1525 		goto out;
1526 
1527 	tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
1528 	freplace_prog = freplace_skel->progs.entry_freplace;
1529 	err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd,
1530 					     "subprog_tc");
1531 	if (!ASSERT_OK(err, "set_attach_target"))
1532 		goto out;
1533 
1534 	err = tailcall_freplace__load(freplace_skel);
1535 	if (!ASSERT_OK(err, "tailcall_freplace__load"))
1536 		goto out;
1537 
1538 	map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
1539 	prog_fd = bpf_program__fd(freplace_prog);
1540 	key = 0;
1541 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1542 	ASSERT_ERR(err, "update jmp_table failure");
1543 
1544 	freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd,
1545 						     "subprog_tc");
1546 	if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1547 		goto out;
1548 
1549 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1550 	ASSERT_ERR(err, "update jmp_table failure");
1551 
1552 out:
1553 	bpf_link__destroy(freplace_link);
1554 	tailcall_freplace__destroy(freplace_skel);
1555 	tc_bpf2bpf__destroy(tc_skel);
1556 }
1557 
1558 /* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail
1559  * callee prog with freplace prog or fails to update an extended prog to
1560  * prog_array map.
1561  */
test_tailcall_bpf2bpf_freplace(void)1562 static void test_tailcall_bpf2bpf_freplace(void)
1563 {
1564 	struct tailcall_freplace *freplace_skel = NULL;
1565 	struct bpf_link *freplace_link = NULL;
1566 	struct tc_bpf2bpf *tc_skel = NULL;
1567 	char buff[128] = {};
1568 	int prog_fd, map_fd;
1569 	int err, key;
1570 
1571 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1572 		    .data_in = buff,
1573 		    .data_size_in = sizeof(buff),
1574 		    .repeat = 1,
1575 	);
1576 
1577 	tc_skel = tc_bpf2bpf__open_and_load();
1578 	if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
1579 		goto out;
1580 
1581 	prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
1582 	freplace_skel = tailcall_freplace__open();
1583 	if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
1584 		goto out;
1585 
1586 	err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace,
1587 					     prog_fd, "subprog_tc");
1588 	if (!ASSERT_OK(err, "set_attach_target"))
1589 		goto out;
1590 
1591 	err = tailcall_freplace__load(freplace_skel);
1592 	if (!ASSERT_OK(err, "tailcall_freplace__load"))
1593 		goto out;
1594 
1595 	/* OK to attach then detach freplace prog. */
1596 
1597 	freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1598 						     prog_fd, "subprog_tc");
1599 	if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1600 		goto out;
1601 
1602 	err = bpf_link__destroy(freplace_link);
1603 	if (!ASSERT_OK(err, "destroy link"))
1604 		goto out;
1605 
1606 	/* OK to update prog_array map then delete element from the map. */
1607 
1608 	key = 0;
1609 	map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
1610 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1611 	if (!ASSERT_OK(err, "update jmp_table"))
1612 		goto out;
1613 
1614 	err = bpf_map_delete_elem(map_fd, &key);
1615 	if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1616 		goto out;
1617 
1618 	/* Fail to attach a tail callee prog with freplace prog. */
1619 
1620 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1621 	if (!ASSERT_OK(err, "update jmp_table"))
1622 		goto out;
1623 
1624 	freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1625 						     prog_fd, "subprog_tc");
1626 	if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure"))
1627 		goto out;
1628 
1629 	err = bpf_map_delete_elem(map_fd, &key);
1630 	if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1631 		goto out;
1632 
1633 	/* Fail to update an extended prog to prog_array map. */
1634 
1635 	freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1636 						     prog_fd, "subprog_tc");
1637 	if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1638 		goto out;
1639 
1640 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1641 	if (!ASSERT_ERR(err, "update jmp_table failure"))
1642 		goto out;
1643 
1644 out:
1645 	bpf_link__destroy(freplace_link);
1646 	tailcall_freplace__destroy(freplace_skel);
1647 	tc_bpf2bpf__destroy(tc_skel);
1648 }
1649 
test_tailcall_failure()1650 static void test_tailcall_failure()
1651 {
1652 	RUN_TESTS(tailcall_fail);
1653 }
1654 
test_tailcalls(void)1655 void test_tailcalls(void)
1656 {
1657 	if (test__start_subtest("tailcall_1"))
1658 		test_tailcall_1();
1659 	if (test__start_subtest("tailcall_2"))
1660 		test_tailcall_2();
1661 	if (test__start_subtest("tailcall_3"))
1662 		test_tailcall_3();
1663 	if (test__start_subtest("tailcall_4"))
1664 		test_tailcall_4();
1665 	if (test__start_subtest("tailcall_5"))
1666 		test_tailcall_5();
1667 	if (test__start_subtest("tailcall_6"))
1668 		test_tailcall_6();
1669 	if (test__start_subtest("tailcall_bpf2bpf_1"))
1670 		test_tailcall_bpf2bpf_1();
1671 	if (test__start_subtest("tailcall_bpf2bpf_2"))
1672 		test_tailcall_bpf2bpf_2();
1673 	if (test__start_subtest("tailcall_bpf2bpf_3"))
1674 		test_tailcall_bpf2bpf_3();
1675 	if (test__start_subtest("tailcall_bpf2bpf_4"))
1676 		test_tailcall_bpf2bpf_4(false);
1677 	if (test__start_subtest("tailcall_bpf2bpf_5"))
1678 		test_tailcall_bpf2bpf_4(true);
1679 	if (test__start_subtest("tailcall_bpf2bpf_6"))
1680 		test_tailcall_bpf2bpf_6();
1681 	if (test__start_subtest("tailcall_bpf2bpf_fentry"))
1682 		test_tailcall_bpf2bpf_fentry();
1683 	if (test__start_subtest("tailcall_bpf2bpf_fexit"))
1684 		test_tailcall_bpf2bpf_fexit();
1685 	if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
1686 		test_tailcall_bpf2bpf_fentry_fexit();
1687 	if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
1688 		test_tailcall_bpf2bpf_fentry_entry();
1689 	if (test__start_subtest("tailcall_poke"))
1690 		test_tailcall_poke();
1691 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_1"))
1692 		test_tailcall_bpf2bpf_hierarchy_1();
1693 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry"))
1694 		test_tailcall_bpf2bpf_hierarchy_fentry();
1695 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fexit"))
1696 		test_tailcall_bpf2bpf_hierarchy_fexit();
1697 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_fexit"))
1698 		test_tailcall_bpf2bpf_hierarchy_fentry_fexit();
1699 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_entry"))
1700 		test_tailcall_bpf2bpf_hierarchy_fentry_entry();
1701 	test_tailcall_bpf2bpf_hierarchy_2();
1702 	test_tailcall_bpf2bpf_hierarchy_3();
1703 	if (test__start_subtest("tailcall_freplace"))
1704 		test_tailcall_freplace();
1705 	if (test__start_subtest("tailcall_bpf2bpf_freplace"))
1706 		test_tailcall_bpf2bpf_freplace();
1707 	if (test__start_subtest("tailcall_failure"))
1708 		test_tailcall_failure();
1709 }
1710