xref: /linux/tools/testing/selftests/bpf/prog_tests/tailcalls.c (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <unistd.h>
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "tailcall_poke.skel.h"
6 #include "tailcall_bpf2bpf_hierarchy2.skel.h"
7 #include "tailcall_bpf2bpf_hierarchy3.skel.h"
8 #include "tailcall_freplace.skel.h"
9 #include "tc_bpf2bpf.skel.h"
10 #include "tailcall_fail.skel.h"
11 #include "tailcall_sleepable.skel.h"
12 
13 /* test_tailcall_1 checks basic functionality by patching multiple locations
14  * in a single program for a single tail call slot with nop->jmp, jmp->nop
15  * and jmp->jmp rewrites. Also checks for nop->nop.
16  */
17 static void test_tailcall_1(void)
18 {
19 	int err, map_fd, prog_fd, main_fd, i, j;
20 	struct bpf_map *prog_array;
21 	struct bpf_program *prog;
22 	struct bpf_object *obj;
23 	char prog_name[32];
24 	char buff[128] = {};
25 	LIBBPF_OPTS(bpf_test_run_opts, topts,
26 		.data_in = buff,
27 		.data_size_in = sizeof(buff),
28 		.repeat = 1,
29 	);
30 
31 	err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
32 				 &prog_fd);
33 	if (CHECK_FAIL(err))
34 		return;
35 
36 	prog = bpf_object__find_program_by_name(obj, "entry");
37 	if (CHECK_FAIL(!prog))
38 		goto out;
39 
40 	main_fd = bpf_program__fd(prog);
41 	if (CHECK_FAIL(main_fd < 0))
42 		goto out;
43 
44 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
45 	if (CHECK_FAIL(!prog_array))
46 		goto out;
47 
48 	map_fd = bpf_map__fd(prog_array);
49 	if (CHECK_FAIL(map_fd < 0))
50 		goto out;
51 
52 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
53 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
54 
55 		prog = bpf_object__find_program_by_name(obj, prog_name);
56 		if (CHECK_FAIL(!prog))
57 			goto out;
58 
59 		prog_fd = bpf_program__fd(prog);
60 		if (CHECK_FAIL(prog_fd < 0))
61 			goto out;
62 
63 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
64 		if (CHECK_FAIL(err))
65 			goto out;
66 	}
67 
68 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
69 		err = bpf_prog_test_run_opts(main_fd, &topts);
70 		ASSERT_OK(err, "tailcall");
71 		ASSERT_EQ(topts.retval, i, "tailcall retval");
72 
73 		err = bpf_map_delete_elem(map_fd, &i);
74 		if (CHECK_FAIL(err))
75 			goto out;
76 	}
77 
78 	err = bpf_prog_test_run_opts(main_fd, &topts);
79 	ASSERT_OK(err, "tailcall");
80 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
81 
82 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
83 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
84 
85 		prog = bpf_object__find_program_by_name(obj, prog_name);
86 		if (CHECK_FAIL(!prog))
87 			goto out;
88 
89 		prog_fd = bpf_program__fd(prog);
90 		if (CHECK_FAIL(prog_fd < 0))
91 			goto out;
92 
93 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
94 		if (CHECK_FAIL(err))
95 			goto out;
96 	}
97 
98 	err = bpf_prog_test_run_opts(main_fd, &topts);
99 	ASSERT_OK(err, "tailcall");
100 	ASSERT_OK(topts.retval, "tailcall retval");
101 
102 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
103 		j = bpf_map__max_entries(prog_array) - 1 - i;
104 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
105 
106 		prog = bpf_object__find_program_by_name(obj, prog_name);
107 		if (CHECK_FAIL(!prog))
108 			goto out;
109 
110 		prog_fd = bpf_program__fd(prog);
111 		if (CHECK_FAIL(prog_fd < 0))
112 			goto out;
113 
114 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
115 		if (CHECK_FAIL(err))
116 			goto out;
117 	}
118 
119 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
120 		j = bpf_map__max_entries(prog_array) - 1 - i;
121 
122 		err = bpf_prog_test_run_opts(main_fd, &topts);
123 		ASSERT_OK(err, "tailcall");
124 		ASSERT_EQ(topts.retval, j, "tailcall retval");
125 
126 		err = bpf_map_delete_elem(map_fd, &i);
127 		if (CHECK_FAIL(err))
128 			goto out;
129 	}
130 
131 	err = bpf_prog_test_run_opts(main_fd, &topts);
132 	ASSERT_OK(err, "tailcall");
133 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
134 
135 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
136 		err = bpf_map_delete_elem(map_fd, &i);
137 		if (CHECK_FAIL(err >= 0 || errno != ENOENT))
138 			goto out;
139 
140 		err = bpf_prog_test_run_opts(main_fd, &topts);
141 		ASSERT_OK(err, "tailcall");
142 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
143 	}
144 
145 out:
146 	bpf_object__close(obj);
147 }
148 
149 /* test_tailcall_2 checks that patching multiple programs for a single
150  * tail call slot works. It also jumps through several programs and tests
151  * the tail call limit counter.
152  */
153 static void test_tailcall_2(void)
154 {
155 	int err, map_fd, prog_fd, main_fd, i;
156 	struct bpf_map *prog_array;
157 	struct bpf_program *prog;
158 	struct bpf_object *obj;
159 	char prog_name[32];
160 	char buff[128] = {};
161 	LIBBPF_OPTS(bpf_test_run_opts, topts,
162 		.data_in = buff,
163 		.data_size_in = sizeof(buff),
164 		.repeat = 1,
165 	);
166 
167 	err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
168 				 &prog_fd);
169 	if (CHECK_FAIL(err))
170 		return;
171 
172 	prog = bpf_object__find_program_by_name(obj, "entry");
173 	if (CHECK_FAIL(!prog))
174 		goto out;
175 
176 	main_fd = bpf_program__fd(prog);
177 	if (CHECK_FAIL(main_fd < 0))
178 		goto out;
179 
180 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
181 	if (CHECK_FAIL(!prog_array))
182 		goto out;
183 
184 	map_fd = bpf_map__fd(prog_array);
185 	if (CHECK_FAIL(map_fd < 0))
186 		goto out;
187 
188 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
189 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
190 
191 		prog = bpf_object__find_program_by_name(obj, prog_name);
192 		if (CHECK_FAIL(!prog))
193 			goto out;
194 
195 		prog_fd = bpf_program__fd(prog);
196 		if (CHECK_FAIL(prog_fd < 0))
197 			goto out;
198 
199 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
200 		if (CHECK_FAIL(err))
201 			goto out;
202 	}
203 
204 	err = bpf_prog_test_run_opts(main_fd, &topts);
205 	ASSERT_OK(err, "tailcall");
206 	ASSERT_EQ(topts.retval, 2, "tailcall retval");
207 
208 	i = 2;
209 	err = bpf_map_delete_elem(map_fd, &i);
210 	if (CHECK_FAIL(err))
211 		goto out;
212 
213 	err = bpf_prog_test_run_opts(main_fd, &topts);
214 	ASSERT_OK(err, "tailcall");
215 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
216 
217 	i = 0;
218 	err = bpf_map_delete_elem(map_fd, &i);
219 	if (CHECK_FAIL(err))
220 		goto out;
221 
222 	err = bpf_prog_test_run_opts(main_fd, &topts);
223 	ASSERT_OK(err, "tailcall");
224 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
225 out:
226 	bpf_object__close(obj);
227 }
228 
229 static void test_tailcall_count(const char *which, bool test_fentry,
230 				bool test_fexit)
231 {
232 	struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
233 	struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
234 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
235 	struct bpf_map *prog_array, *data_map;
236 	struct bpf_program *prog;
237 	char buff[128] = {};
238 	LIBBPF_OPTS(bpf_test_run_opts, topts,
239 		.data_in = buff,
240 		.data_size_in = sizeof(buff),
241 		.repeat = 1,
242 	);
243 
244 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
245 			    &prog_fd);
246 	if (CHECK_FAIL(err))
247 		return;
248 
249 	prog = bpf_object__find_program_by_name(obj, "entry");
250 	if (CHECK_FAIL(!prog))
251 		goto out;
252 
253 	main_fd = bpf_program__fd(prog);
254 	if (CHECK_FAIL(main_fd < 0))
255 		goto out;
256 
257 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
258 	if (CHECK_FAIL(!prog_array))
259 		goto out;
260 
261 	map_fd = bpf_map__fd(prog_array);
262 	if (CHECK_FAIL(map_fd < 0))
263 		goto out;
264 
265 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
266 	if (CHECK_FAIL(!prog))
267 		goto out;
268 
269 	prog_fd = bpf_program__fd(prog);
270 	if (CHECK_FAIL(prog_fd < 0))
271 		goto out;
272 
273 	i = 0;
274 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
275 	if (CHECK_FAIL(err))
276 		goto out;
277 
278 	if (test_fentry) {
279 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
280 						   NULL);
281 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
282 			goto out;
283 
284 		prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
285 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
286 			goto out;
287 
288 		err = bpf_program__set_attach_target(prog, prog_fd,
289 						     "subprog_tail");
290 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
291 			goto out;
292 
293 		err = bpf_object__load(fentry_obj);
294 		if (!ASSERT_OK(err, "load fentry_obj"))
295 			goto out;
296 
297 		fentry_link = bpf_program__attach_trace(prog);
298 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
299 			goto out;
300 	}
301 
302 	if (test_fexit) {
303 		fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
304 						  NULL);
305 		if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
306 			goto out;
307 
308 		prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
309 		if (!ASSERT_OK_PTR(prog, "find fexit prog"))
310 			goto out;
311 
312 		err = bpf_program__set_attach_target(prog, prog_fd,
313 						     "subprog_tail");
314 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
315 			goto out;
316 
317 		err = bpf_object__load(fexit_obj);
318 		if (!ASSERT_OK(err, "load fexit_obj"))
319 			goto out;
320 
321 		fexit_link = bpf_program__attach_trace(prog);
322 		if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
323 			goto out;
324 	}
325 
326 	err = bpf_prog_test_run_opts(main_fd, &topts);
327 	ASSERT_OK(err, "tailcall");
328 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
329 
330 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
331 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
332 		goto out;
333 
334 	data_fd = bpf_map__fd(data_map);
335 	if (CHECK_FAIL(data_fd < 0))
336 		goto out;
337 
338 	i = 0;
339 	err = bpf_map_lookup_elem(data_fd, &i, &val);
340 	ASSERT_OK(err, "tailcall count");
341 	ASSERT_EQ(val, 33, "tailcall count");
342 
343 	if (test_fentry) {
344 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
345 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
346 				  "find tailcall_bpf2bpf_fentry.bss map"))
347 			goto out;
348 
349 		data_fd = bpf_map__fd(data_map);
350 		if (!ASSERT_FALSE(data_fd < 0,
351 				  "find tailcall_bpf2bpf_fentry.bss map fd"))
352 			goto out;
353 
354 		i = 0;
355 		err = bpf_map_lookup_elem(data_fd, &i, &val);
356 		ASSERT_OK(err, "fentry count");
357 		ASSERT_EQ(val, 33, "fentry count");
358 	}
359 
360 	if (test_fexit) {
361 		data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
362 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
363 				  "find tailcall_bpf2bpf_fexit.bss map"))
364 			goto out;
365 
366 		data_fd = bpf_map__fd(data_map);
367 		if (!ASSERT_FALSE(data_fd < 0,
368 				  "find tailcall_bpf2bpf_fexit.bss map fd"))
369 			goto out;
370 
371 		i = 0;
372 		err = bpf_map_lookup_elem(data_fd, &i, &val);
373 		ASSERT_OK(err, "fexit count");
374 		ASSERT_EQ(val, 33, "fexit count");
375 	}
376 
377 	i = 0;
378 	err = bpf_map_delete_elem(map_fd, &i);
379 	if (CHECK_FAIL(err))
380 		goto out;
381 
382 	err = bpf_prog_test_run_opts(main_fd, &topts);
383 	ASSERT_OK(err, "tailcall");
384 	ASSERT_OK(topts.retval, "tailcall retval");
385 out:
386 	bpf_link__destroy(fentry_link);
387 	bpf_link__destroy(fexit_link);
388 	bpf_object__close(fentry_obj);
389 	bpf_object__close(fexit_obj);
390 	bpf_object__close(obj);
391 }
392 
393 /* test_tailcall_3 checks that the count value of the tail call limit
394  * enforcement matches with expectations. JIT uses direct jump.
395  */
396 static void test_tailcall_3(void)
397 {
398 	test_tailcall_count("tailcall3.bpf.o", false, false);
399 }
400 
401 /* test_tailcall_6 checks that the count value of the tail call limit
402  * enforcement matches with expectations. JIT uses indirect jump.
403  */
404 static void test_tailcall_6(void)
405 {
406 	test_tailcall_count("tailcall6.bpf.o", false, false);
407 }
408 
409 /* test_tailcall_4 checks that the kernel properly selects indirect jump
410  * for the case where the key is not known. Latter is passed via global
411  * data to select different targets we can compare return value of.
412  */
413 static void test_tailcall_4(void)
414 {
415 	int err, map_fd, prog_fd, main_fd, data_fd, i;
416 	struct bpf_map *prog_array, *data_map;
417 	struct bpf_program *prog;
418 	struct bpf_object *obj;
419 	static const int zero = 0;
420 	char buff[128] = {};
421 	char prog_name[32];
422 	LIBBPF_OPTS(bpf_test_run_opts, topts,
423 		.data_in = buff,
424 		.data_size_in = sizeof(buff),
425 		.repeat = 1,
426 	);
427 
428 	err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
429 				 &prog_fd);
430 	if (CHECK_FAIL(err))
431 		return;
432 
433 	prog = bpf_object__find_program_by_name(obj, "entry");
434 	if (CHECK_FAIL(!prog))
435 		goto out;
436 
437 	main_fd = bpf_program__fd(prog);
438 	if (CHECK_FAIL(main_fd < 0))
439 		goto out;
440 
441 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
442 	if (CHECK_FAIL(!prog_array))
443 		goto out;
444 
445 	map_fd = bpf_map__fd(prog_array);
446 	if (CHECK_FAIL(map_fd < 0))
447 		goto out;
448 
449 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
450 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
451 		goto out;
452 
453 	data_fd = bpf_map__fd(data_map);
454 	if (CHECK_FAIL(data_fd < 0))
455 		goto out;
456 
457 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
458 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
459 
460 		prog = bpf_object__find_program_by_name(obj, prog_name);
461 		if (CHECK_FAIL(!prog))
462 			goto out;
463 
464 		prog_fd = bpf_program__fd(prog);
465 		if (CHECK_FAIL(prog_fd < 0))
466 			goto out;
467 
468 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
469 		if (CHECK_FAIL(err))
470 			goto out;
471 	}
472 
473 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
474 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
475 		if (CHECK_FAIL(err))
476 			goto out;
477 
478 		err = bpf_prog_test_run_opts(main_fd, &topts);
479 		ASSERT_OK(err, "tailcall");
480 		ASSERT_EQ(topts.retval, i, "tailcall retval");
481 	}
482 
483 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
484 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
485 		if (CHECK_FAIL(err))
486 			goto out;
487 
488 		err = bpf_map_delete_elem(map_fd, &i);
489 		if (CHECK_FAIL(err))
490 			goto out;
491 
492 		err = bpf_prog_test_run_opts(main_fd, &topts);
493 		ASSERT_OK(err, "tailcall");
494 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
495 	}
496 out:
497 	bpf_object__close(obj);
498 }
499 
500 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
501  * an indirect jump when the keys are const but different from different branches.
502  */
503 static void test_tailcall_5(void)
504 {
505 	int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
506 	struct bpf_map *prog_array, *data_map;
507 	struct bpf_program *prog;
508 	struct bpf_object *obj;
509 	static const int zero = 0;
510 	char buff[128] = {};
511 	char prog_name[32];
512 	LIBBPF_OPTS(bpf_test_run_opts, topts,
513 		.data_in = buff,
514 		.data_size_in = sizeof(buff),
515 		.repeat = 1,
516 	);
517 
518 	err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
519 				 &prog_fd);
520 	if (CHECK_FAIL(err))
521 		return;
522 
523 	prog = bpf_object__find_program_by_name(obj, "entry");
524 	if (CHECK_FAIL(!prog))
525 		goto out;
526 
527 	main_fd = bpf_program__fd(prog);
528 	if (CHECK_FAIL(main_fd < 0))
529 		goto out;
530 
531 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
532 	if (CHECK_FAIL(!prog_array))
533 		goto out;
534 
535 	map_fd = bpf_map__fd(prog_array);
536 	if (CHECK_FAIL(map_fd < 0))
537 		goto out;
538 
539 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
540 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
541 		goto out;
542 
543 	data_fd = bpf_map__fd(data_map);
544 	if (CHECK_FAIL(data_fd < 0))
545 		goto out;
546 
547 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
548 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
549 
550 		prog = bpf_object__find_program_by_name(obj, prog_name);
551 		if (CHECK_FAIL(!prog))
552 			goto out;
553 
554 		prog_fd = bpf_program__fd(prog);
555 		if (CHECK_FAIL(prog_fd < 0))
556 			goto out;
557 
558 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
559 		if (CHECK_FAIL(err))
560 			goto out;
561 	}
562 
563 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
564 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
565 		if (CHECK_FAIL(err))
566 			goto out;
567 
568 		err = bpf_prog_test_run_opts(main_fd, &topts);
569 		ASSERT_OK(err, "tailcall");
570 		ASSERT_EQ(topts.retval, i, "tailcall retval");
571 	}
572 
573 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
574 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
575 		if (CHECK_FAIL(err))
576 			goto out;
577 
578 		err = bpf_map_delete_elem(map_fd, &i);
579 		if (CHECK_FAIL(err))
580 			goto out;
581 
582 		err = bpf_prog_test_run_opts(main_fd, &topts);
583 		ASSERT_OK(err, "tailcall");
584 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
585 	}
586 out:
587 	bpf_object__close(obj);
588 }
589 
590 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
591  * correctly in correlation with BPF subprograms
592  */
593 static void test_tailcall_bpf2bpf_1(void)
594 {
595 	int err, map_fd, prog_fd, main_fd, i;
596 	struct bpf_map *prog_array;
597 	struct bpf_program *prog;
598 	struct bpf_object *obj;
599 	char prog_name[32];
600 	LIBBPF_OPTS(bpf_test_run_opts, topts,
601 		.data_in = &pkt_v4,
602 		.data_size_in = sizeof(pkt_v4),
603 		.repeat = 1,
604 	);
605 
606 	err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
607 				 &obj, &prog_fd);
608 	if (CHECK_FAIL(err))
609 		return;
610 
611 	prog = bpf_object__find_program_by_name(obj, "entry");
612 	if (CHECK_FAIL(!prog))
613 		goto out;
614 
615 	main_fd = bpf_program__fd(prog);
616 	if (CHECK_FAIL(main_fd < 0))
617 		goto out;
618 
619 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
620 	if (CHECK_FAIL(!prog_array))
621 		goto out;
622 
623 	map_fd = bpf_map__fd(prog_array);
624 	if (CHECK_FAIL(map_fd < 0))
625 		goto out;
626 
627 	/* nop -> jmp */
628 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
629 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
630 
631 		prog = bpf_object__find_program_by_name(obj, prog_name);
632 		if (CHECK_FAIL(!prog))
633 			goto out;
634 
635 		prog_fd = bpf_program__fd(prog);
636 		if (CHECK_FAIL(prog_fd < 0))
637 			goto out;
638 
639 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
640 		if (CHECK_FAIL(err))
641 			goto out;
642 	}
643 
644 	err = bpf_prog_test_run_opts(main_fd, &topts);
645 	ASSERT_OK(err, "tailcall");
646 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
647 
648 	/* jmp -> nop, call subprog that will do tailcall */
649 	i = 1;
650 	err = bpf_map_delete_elem(map_fd, &i);
651 	if (CHECK_FAIL(err))
652 		goto out;
653 
654 	err = bpf_prog_test_run_opts(main_fd, &topts);
655 	ASSERT_OK(err, "tailcall");
656 	ASSERT_OK(topts.retval, "tailcall retval");
657 
658 	/* make sure that subprog can access ctx and entry prog that
659 	 * called this subprog can properly return
660 	 */
661 	i = 0;
662 	err = bpf_map_delete_elem(map_fd, &i);
663 	if (CHECK_FAIL(err))
664 		goto out;
665 
666 	err = bpf_prog_test_run_opts(main_fd, &topts);
667 	ASSERT_OK(err, "tailcall");
668 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
669 out:
670 	bpf_object__close(obj);
671 }
672 
673 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
674  * enforcement matches with expectations when tailcall is preceded with
675  * bpf2bpf call.
676  */
677 static void test_tailcall_bpf2bpf_2(void)
678 {
679 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
680 	struct bpf_map *prog_array, *data_map;
681 	struct bpf_program *prog;
682 	struct bpf_object *obj;
683 	char buff[128] = {};
684 	LIBBPF_OPTS(bpf_test_run_opts, topts,
685 		.data_in = buff,
686 		.data_size_in = sizeof(buff),
687 		.repeat = 1,
688 	);
689 
690 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
691 				 &obj, &prog_fd);
692 	if (CHECK_FAIL(err))
693 		return;
694 
695 	prog = bpf_object__find_program_by_name(obj, "entry");
696 	if (CHECK_FAIL(!prog))
697 		goto out;
698 
699 	main_fd = bpf_program__fd(prog);
700 	if (CHECK_FAIL(main_fd < 0))
701 		goto out;
702 
703 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
704 	if (CHECK_FAIL(!prog_array))
705 		goto out;
706 
707 	map_fd = bpf_map__fd(prog_array);
708 	if (CHECK_FAIL(map_fd < 0))
709 		goto out;
710 
711 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
712 	if (CHECK_FAIL(!prog))
713 		goto out;
714 
715 	prog_fd = bpf_program__fd(prog);
716 	if (CHECK_FAIL(prog_fd < 0))
717 		goto out;
718 
719 	i = 0;
720 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
721 	if (CHECK_FAIL(err))
722 		goto out;
723 
724 	err = bpf_prog_test_run_opts(main_fd, &topts);
725 	ASSERT_OK(err, "tailcall");
726 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
727 
728 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
729 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
730 		goto out;
731 
732 	data_fd = bpf_map__fd(data_map);
733 	if (CHECK_FAIL(data_fd < 0))
734 		goto out;
735 
736 	i = 0;
737 	err = bpf_map_lookup_elem(data_fd, &i, &val);
738 	ASSERT_OK(err, "tailcall count");
739 	ASSERT_EQ(val, 33, "tailcall count");
740 
741 	i = 0;
742 	err = bpf_map_delete_elem(map_fd, &i);
743 	if (CHECK_FAIL(err))
744 		goto out;
745 
746 	err = bpf_prog_test_run_opts(main_fd, &topts);
747 	ASSERT_OK(err, "tailcall");
748 	ASSERT_OK(topts.retval, "tailcall retval");
749 out:
750 	bpf_object__close(obj);
751 }
752 
753 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
754  * 256 bytes) can be used within bpf subprograms that have the tailcalls
755  * in them
756  */
757 static void test_tailcall_bpf2bpf_3(void)
758 {
759 	int err, map_fd, prog_fd, main_fd, i;
760 	struct bpf_map *prog_array;
761 	struct bpf_program *prog;
762 	struct bpf_object *obj;
763 	char prog_name[32];
764 	LIBBPF_OPTS(bpf_test_run_opts, topts,
765 		.data_in = &pkt_v4,
766 		.data_size_in = sizeof(pkt_v4),
767 		.repeat = 1,
768 	);
769 
770 	err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
771 				 &obj, &prog_fd);
772 	if (CHECK_FAIL(err))
773 		return;
774 
775 	prog = bpf_object__find_program_by_name(obj, "entry");
776 	if (CHECK_FAIL(!prog))
777 		goto out;
778 
779 	main_fd = bpf_program__fd(prog);
780 	if (CHECK_FAIL(main_fd < 0))
781 		goto out;
782 
783 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
784 	if (CHECK_FAIL(!prog_array))
785 		goto out;
786 
787 	map_fd = bpf_map__fd(prog_array);
788 	if (CHECK_FAIL(map_fd < 0))
789 		goto out;
790 
791 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
792 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
793 
794 		prog = bpf_object__find_program_by_name(obj, prog_name);
795 		if (CHECK_FAIL(!prog))
796 			goto out;
797 
798 		prog_fd = bpf_program__fd(prog);
799 		if (CHECK_FAIL(prog_fd < 0))
800 			goto out;
801 
802 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
803 		if (CHECK_FAIL(err))
804 			goto out;
805 	}
806 
807 	err = bpf_prog_test_run_opts(main_fd, &topts);
808 	ASSERT_OK(err, "tailcall");
809 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
810 
811 	i = 1;
812 	err = bpf_map_delete_elem(map_fd, &i);
813 	if (CHECK_FAIL(err))
814 		goto out;
815 
816 	err = bpf_prog_test_run_opts(main_fd, &topts);
817 	ASSERT_OK(err, "tailcall");
818 	ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
819 
820 	i = 0;
821 	err = bpf_map_delete_elem(map_fd, &i);
822 	if (CHECK_FAIL(err))
823 		goto out;
824 
825 	err = bpf_prog_test_run_opts(main_fd, &topts);
826 	ASSERT_OK(err, "tailcall");
827 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
828 out:
829 	bpf_object__close(obj);
830 }
831 
832 #include "tailcall_bpf2bpf4.skel.h"
833 
834 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
835  * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
836  * counter behaves correctly, bpf program will go through following flow:
837  *
838  * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
839  * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
840  * subprog2 [here bump global counter] --------^
841  *
842  * We go through first two tailcalls and start counting from the subprog2 where
843  * the loop begins. At the end of the test make sure that the global counter is
844  * equal to 31, because tailcall counter includes the first two tailcalls
845  * whereas global counter is incremented only on loop presented on flow above.
846  *
847  * The noise parameter is used to insert bpf_map_update calls into the logic
848  * to force verifier to patch instructions. This allows us to ensure jump
849  * logic remains correct with instruction movement.
850  */
851 static void test_tailcall_bpf2bpf_4(bool noise)
852 {
853 	int err, map_fd, prog_fd, main_fd, data_fd, i;
854 	struct tailcall_bpf2bpf4__bss val;
855 	struct bpf_map *prog_array, *data_map;
856 	struct bpf_program *prog;
857 	struct bpf_object *obj;
858 	char prog_name[32];
859 	LIBBPF_OPTS(bpf_test_run_opts, topts,
860 		.data_in = &pkt_v4,
861 		.data_size_in = sizeof(pkt_v4),
862 		.repeat = 1,
863 	);
864 
865 	err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
866 				 &obj, &prog_fd);
867 	if (CHECK_FAIL(err))
868 		return;
869 
870 	prog = bpf_object__find_program_by_name(obj, "entry");
871 	if (CHECK_FAIL(!prog))
872 		goto out;
873 
874 	main_fd = bpf_program__fd(prog);
875 	if (CHECK_FAIL(main_fd < 0))
876 		goto out;
877 
878 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
879 	if (CHECK_FAIL(!prog_array))
880 		goto out;
881 
882 	map_fd = bpf_map__fd(prog_array);
883 	if (CHECK_FAIL(map_fd < 0))
884 		goto out;
885 
886 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
887 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
888 
889 		prog = bpf_object__find_program_by_name(obj, prog_name);
890 		if (CHECK_FAIL(!prog))
891 			goto out;
892 
893 		prog_fd = bpf_program__fd(prog);
894 		if (CHECK_FAIL(prog_fd < 0))
895 			goto out;
896 
897 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
898 		if (CHECK_FAIL(err))
899 			goto out;
900 	}
901 
902 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
903 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
904 		goto out;
905 
906 	data_fd = bpf_map__fd(data_map);
907 	if (CHECK_FAIL(data_fd < 0))
908 		goto out;
909 
910 	i = 0;
911 	val.noise = noise;
912 	val.count = 0;
913 	err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
914 	if (CHECK_FAIL(err))
915 		goto out;
916 
917 	err = bpf_prog_test_run_opts(main_fd, &topts);
918 	ASSERT_OK(err, "tailcall");
919 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
920 
921 	i = 0;
922 	err = bpf_map_lookup_elem(data_fd, &i, &val);
923 	ASSERT_OK(err, "tailcall count");
924 	ASSERT_EQ(val.count, 31, "tailcall count");
925 
926 out:
927 	bpf_object__close(obj);
928 }
929 
930 #include "tailcall_bpf2bpf6.skel.h"
931 
932 /* Tail call counting works even when there is data on stack which is
933  * not aligned to 8 bytes.
934  */
935 static void test_tailcall_bpf2bpf_6(void)
936 {
937 	struct tailcall_bpf2bpf6 *obj;
938 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
939 	LIBBPF_OPTS(bpf_test_run_opts, topts,
940 		.data_in = &pkt_v4,
941 		.data_size_in = sizeof(pkt_v4),
942 		.repeat = 1,
943 	);
944 
945 	obj = tailcall_bpf2bpf6__open_and_load();
946 	if (!ASSERT_OK_PTR(obj, "open and load"))
947 		return;
948 
949 	main_fd = bpf_program__fd(obj->progs.entry);
950 	if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
951 		goto out;
952 
953 	map_fd = bpf_map__fd(obj->maps.jmp_table);
954 	if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
955 		goto out;
956 
957 	prog_fd = bpf_program__fd(obj->progs.classifier_0);
958 	if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
959 		goto out;
960 
961 	i = 0;
962 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
963 	if (!ASSERT_OK(err, "jmp_table map update"))
964 		goto out;
965 
966 	err = bpf_prog_test_run_opts(main_fd, &topts);
967 	ASSERT_OK(err, "entry prog test run");
968 	ASSERT_EQ(topts.retval, 0, "tailcall retval");
969 
970 	data_fd = bpf_map__fd(obj->maps.bss);
971 	if (!ASSERT_GE(data_fd, 0, "bss map fd"))
972 		goto out;
973 
974 	i = 0;
975 	err = bpf_map_lookup_elem(data_fd, &i, &val);
976 	ASSERT_OK(err, "bss map lookup");
977 	ASSERT_EQ(val, 1, "done flag is set");
978 
979 out:
980 	tailcall_bpf2bpf6__destroy(obj);
981 }
982 
983 /* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
984  * limit enforcement matches with expectations when tailcall is preceded with
985  * bpf2bpf call, and the bpf2bpf call is traced by fentry.
986  */
987 static void test_tailcall_bpf2bpf_fentry(void)
988 {
989 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
990 }
991 
992 /* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
993  * limit enforcement matches with expectations when tailcall is preceded with
994  * bpf2bpf call, and the bpf2bpf call is traced by fexit.
995  */
996 static void test_tailcall_bpf2bpf_fexit(void)
997 {
998 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
999 }
1000 
1001 /* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
1002  * call limit enforcement matches with expectations when tailcall is preceded
1003  * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
1004  */
1005 static void test_tailcall_bpf2bpf_fentry_fexit(void)
1006 {
1007 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
1008 }
1009 
1010 /* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
1011  * call limit enforcement matches with expectations when tailcall is preceded
1012  * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
1013  */
1014 static void test_tailcall_bpf2bpf_fentry_entry(void)
1015 {
1016 	struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
1017 	int err, map_fd, prog_fd, data_fd, i, val;
1018 	struct bpf_map *prog_array, *data_map;
1019 	struct bpf_link *fentry_link = NULL;
1020 	struct bpf_program *prog;
1021 	char buff[128] = {};
1022 
1023 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1024 		.data_in = buff,
1025 		.data_size_in = sizeof(buff),
1026 		.repeat = 1,
1027 	);
1028 
1029 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
1030 				 BPF_PROG_TYPE_SCHED_CLS,
1031 				 &tgt_obj, &prog_fd);
1032 	if (!ASSERT_OK(err, "load tgt_obj"))
1033 		return;
1034 
1035 	prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
1036 	if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
1037 		goto out;
1038 
1039 	map_fd = bpf_map__fd(prog_array);
1040 	if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
1041 		goto out;
1042 
1043 	prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
1044 	if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
1045 		goto out;
1046 
1047 	prog_fd = bpf_program__fd(prog);
1048 	if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
1049 		goto out;
1050 
1051 	i = 0;
1052 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1053 	if (!ASSERT_OK(err, "update jmp_table"))
1054 		goto out;
1055 
1056 	fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1057 					   NULL);
1058 	if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1059 		goto out;
1060 
1061 	prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1062 	if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1063 		goto out;
1064 
1065 	err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
1066 	if (!ASSERT_OK(err, "set_attach_target classifier_0"))
1067 		goto out;
1068 
1069 	err = bpf_object__load(fentry_obj);
1070 	if (!ASSERT_OK(err, "load fentry_obj"))
1071 		goto out;
1072 
1073 	fentry_link = bpf_program__attach_trace(prog);
1074 	if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1075 		goto out;
1076 
1077 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1078 	ASSERT_OK(err, "tailcall");
1079 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1080 
1081 	data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
1082 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1083 			  "find tailcall.bss map"))
1084 		goto out;
1085 
1086 	data_fd = bpf_map__fd(data_map);
1087 	if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
1088 		goto out;
1089 
1090 	i = 0;
1091 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1092 	ASSERT_OK(err, "tailcall count");
1093 	ASSERT_EQ(val, 34, "tailcall count");
1094 
1095 	data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1096 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1097 			  "find tailcall_bpf2bpf_fentry.bss map"))
1098 		goto out;
1099 
1100 	data_fd = bpf_map__fd(data_map);
1101 	if (!ASSERT_FALSE(data_fd < 0,
1102 			  "find tailcall_bpf2bpf_fentry.bss map fd"))
1103 		goto out;
1104 
1105 	i = 0;
1106 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1107 	ASSERT_OK(err, "fentry count");
1108 	ASSERT_EQ(val, 1, "fentry count");
1109 
1110 out:
1111 	bpf_link__destroy(fentry_link);
1112 	bpf_object__close(fentry_obj);
1113 	bpf_object__close(tgt_obj);
1114 }
1115 
1116 #define JMP_TABLE "/sys/fs/bpf/jmp_table"
1117 
1118 static int poke_thread_exit;
1119 
1120 static void *poke_update(void *arg)
1121 {
1122 	__u32 zero = 0, prog1_fd, prog2_fd, map_fd;
1123 	struct tailcall_poke *call = arg;
1124 
1125 	map_fd = bpf_map__fd(call->maps.jmp_table);
1126 	prog1_fd = bpf_program__fd(call->progs.call1);
1127 	prog2_fd = bpf_program__fd(call->progs.call2);
1128 
1129 	while (!poke_thread_exit) {
1130 		bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
1131 		bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
1132 	}
1133 
1134 	return NULL;
1135 }
1136 
1137 /*
1138  * We are trying to hit prog array update during another program load
1139  * that shares the same prog array map.
1140  *
1141  * For that we share the jmp_table map between two skeleton instances
1142  * by pinning the jmp_table to same path. Then first skeleton instance
1143  * periodically updates jmp_table in 'poke update' thread while we load
1144  * the second skeleton instance in the main thread.
1145  */
1146 static void test_tailcall_poke(void)
1147 {
1148 	struct tailcall_poke *call, *test;
1149 	int err, cnt = 10;
1150 	pthread_t thread;
1151 
1152 	unlink(JMP_TABLE);
1153 
1154 	call = tailcall_poke__open_and_load();
1155 	if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
1156 		return;
1157 
1158 	err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
1159 	if (!ASSERT_OK(err, "bpf_map__pin"))
1160 		goto out;
1161 
1162 	err = pthread_create(&thread, NULL, poke_update, call);
1163 	if (!ASSERT_OK(err, "new toggler"))
1164 		goto out;
1165 
1166 	while (cnt--) {
1167 		test = tailcall_poke__open();
1168 		if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
1169 			break;
1170 
1171 		err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
1172 		if (!ASSERT_OK(err, "bpf_map__pin")) {
1173 			tailcall_poke__destroy(test);
1174 			break;
1175 		}
1176 
1177 		bpf_program__set_autoload(test->progs.test, true);
1178 		bpf_program__set_autoload(test->progs.call1, false);
1179 		bpf_program__set_autoload(test->progs.call2, false);
1180 
1181 		err = tailcall_poke__load(test);
1182 		tailcall_poke__destroy(test);
1183 		if (!ASSERT_OK(err, "tailcall_poke__load"))
1184 			break;
1185 	}
1186 
1187 	poke_thread_exit = 1;
1188 	ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
1189 
1190 out:
1191 	bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
1192 	tailcall_poke__destroy(call);
1193 }
1194 
1195 static void test_tailcall_hierarchy_count(const char *which, bool test_fentry,
1196 					  bool test_fexit,
1197 					  bool test_fentry_entry)
1198 {
1199 	int err, map_fd, prog_fd, main_data_fd, fentry_data_fd = 0, fexit_data_fd = 0, i, val;
1200 	struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
1201 	struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
1202 	struct bpf_program *prog, *fentry_prog;
1203 	struct bpf_map *prog_array, *data_map;
1204 	int fentry_prog_fd;
1205 	char buff[128] = {};
1206 
1207 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1208 		.data_in = buff,
1209 		.data_size_in = sizeof(buff),
1210 		.repeat = 1,
1211 	);
1212 
1213 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
1214 				 &prog_fd);
1215 	if (!ASSERT_OK(err, "load obj"))
1216 		return;
1217 
1218 	prog = bpf_object__find_program_by_name(obj, "entry");
1219 	if (!ASSERT_OK_PTR(prog, "find entry prog"))
1220 		goto out;
1221 
1222 	prog_fd = bpf_program__fd(prog);
1223 	if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
1224 		goto out;
1225 
1226 	if (test_fentry_entry) {
1227 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_hierarchy_fentry.bpf.o",
1228 						   NULL);
1229 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1230 			goto out;
1231 
1232 		fentry_prog = bpf_object__find_program_by_name(fentry_obj,
1233 							       "fentry");
1234 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1235 			goto out;
1236 
1237 		err = bpf_program__set_attach_target(fentry_prog, prog_fd,
1238 						     "entry");
1239 		if (!ASSERT_OK(err, "set_attach_target entry"))
1240 			goto out;
1241 
1242 		err = bpf_object__load(fentry_obj);
1243 		if (!ASSERT_OK(err, "load fentry_obj"))
1244 			goto out;
1245 
1246 		fentry_link = bpf_program__attach_trace(fentry_prog);
1247 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1248 			goto out;
1249 
1250 		fentry_prog_fd = bpf_program__fd(fentry_prog);
1251 		if (!ASSERT_GE(fentry_prog_fd, 0, "fentry_prog_fd"))
1252 			goto out;
1253 
1254 		prog_array = bpf_object__find_map_by_name(fentry_obj, "jmp_table");
1255 		if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1256 			goto out;
1257 
1258 		map_fd = bpf_map__fd(prog_array);
1259 		if (!ASSERT_GE(map_fd, 0, "map_fd"))
1260 			goto out;
1261 
1262 		i = 0;
1263 		err = bpf_map_update_elem(map_fd, &i, &fentry_prog_fd, BPF_ANY);
1264 		if (!ASSERT_OK(err, "update jmp_table"))
1265 			goto out;
1266 
1267 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1268 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1269 				  "find data_map"))
1270 			goto out;
1271 
1272 	} else {
1273 		prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
1274 		if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1275 			goto out;
1276 
1277 		map_fd = bpf_map__fd(prog_array);
1278 		if (!ASSERT_GE(map_fd, 0, "map_fd"))
1279 			goto out;
1280 
1281 		i = 0;
1282 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1283 		if (!ASSERT_OK(err, "update jmp_table"))
1284 			goto out;
1285 
1286 		data_map = bpf_object__find_map_by_name(obj, ".bss");
1287 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1288 				  "find data_map"))
1289 			goto out;
1290 	}
1291 
1292 	if (test_fentry) {
1293 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1294 						   NULL);
1295 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1296 			goto out;
1297 
1298 		prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1299 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1300 			goto out;
1301 
1302 		err = bpf_program__set_attach_target(prog, prog_fd,
1303 						     "subprog_tail");
1304 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1305 			goto out;
1306 
1307 		err = bpf_object__load(fentry_obj);
1308 		if (!ASSERT_OK(err, "load fentry_obj"))
1309 			goto out;
1310 
1311 		fentry_link = bpf_program__attach_trace(prog);
1312 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1313 			goto out;
1314 	}
1315 
1316 	if (test_fexit) {
1317 		fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
1318 						  NULL);
1319 		if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
1320 			goto out;
1321 
1322 		prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
1323 		if (!ASSERT_OK_PTR(prog, "find fexit prog"))
1324 			goto out;
1325 
1326 		err = bpf_program__set_attach_target(prog, prog_fd,
1327 						     "subprog_tail");
1328 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1329 			goto out;
1330 
1331 		err = bpf_object__load(fexit_obj);
1332 		if (!ASSERT_OK(err, "load fexit_obj"))
1333 			goto out;
1334 
1335 		fexit_link = bpf_program__attach_trace(prog);
1336 		if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
1337 			goto out;
1338 	}
1339 
1340 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1341 	ASSERT_OK(err, "tailcall");
1342 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1343 
1344 	main_data_fd = bpf_map__fd(data_map);
1345 	if (!ASSERT_GE(main_data_fd, 0, "main_data_fd"))
1346 		goto out;
1347 
1348 	i = 0;
1349 	err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1350 	ASSERT_OK(err, "tailcall count");
1351 	ASSERT_EQ(val, 34, "tailcall count");
1352 
1353 	if (test_fentry) {
1354 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1355 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1356 				  "find tailcall_bpf2bpf_fentry.bss map"))
1357 			goto out;
1358 
1359 		fentry_data_fd = bpf_map__fd(data_map);
1360 		if (!ASSERT_GE(fentry_data_fd, 0,
1361 				  "find tailcall_bpf2bpf_fentry.bss map fd"))
1362 			goto out;
1363 
1364 		i = 0;
1365 		err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1366 		ASSERT_OK(err, "fentry count");
1367 		ASSERT_EQ(val, 68, "fentry count");
1368 	}
1369 
1370 	if (test_fexit) {
1371 		data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
1372 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1373 				  "find tailcall_bpf2bpf_fexit.bss map"))
1374 			goto out;
1375 
1376 		fexit_data_fd = bpf_map__fd(data_map);
1377 		if (!ASSERT_GE(fexit_data_fd, 0,
1378 				  "find tailcall_bpf2bpf_fexit.bss map fd"))
1379 			goto out;
1380 
1381 		i = 0;
1382 		err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1383 		ASSERT_OK(err, "fexit count");
1384 		ASSERT_EQ(val, 68, "fexit count");
1385 	}
1386 
1387 	i = 0;
1388 	err = bpf_map_delete_elem(map_fd, &i);
1389 	if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1390 		goto out;
1391 
1392 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1393 	ASSERT_OK(err, "tailcall");
1394 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1395 
1396 	i = 0;
1397 	err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1398 	ASSERT_OK(err, "tailcall count");
1399 	ASSERT_EQ(val, 35, "tailcall count");
1400 
1401 	if (test_fentry) {
1402 		i = 0;
1403 		err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1404 		ASSERT_OK(err, "fentry count");
1405 		ASSERT_EQ(val, 70, "fentry count");
1406 	}
1407 
1408 	if (test_fexit) {
1409 		i = 0;
1410 		err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1411 		ASSERT_OK(err, "fexit count");
1412 		ASSERT_EQ(val, 70, "fexit count");
1413 	}
1414 
1415 out:
1416 	bpf_link__destroy(fentry_link);
1417 	bpf_link__destroy(fexit_link);
1418 	bpf_object__close(fentry_obj);
1419 	bpf_object__close(fexit_obj);
1420 	bpf_object__close(obj);
1421 }
1422 
1423 /* test_tailcall_bpf2bpf_hierarchy_1 checks that the count value of the tail
1424  * call limit enforcement matches with expectations when tailcalls are preceded
1425  * with two bpf2bpf calls.
1426  *
1427  *         subprog --tailcall-> entry
1428  * entry <
1429  *         subprog --tailcall-> entry
1430  */
1431 static void test_tailcall_bpf2bpf_hierarchy_1(void)
1432 {
1433 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1434 				      false, false, false);
1435 }
1436 
1437 /* test_tailcall_bpf2bpf_hierarchy_fentry checks that the count value of the
1438  * tail call limit enforcement matches with expectations when tailcalls are
1439  * preceded with two bpf2bpf calls, and the two subprogs are traced by fentry.
1440  */
1441 static void test_tailcall_bpf2bpf_hierarchy_fentry(void)
1442 {
1443 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1444 				      true, false, false);
1445 }
1446 
1447 /* test_tailcall_bpf2bpf_hierarchy_fexit checks that the count value of the tail
1448  * call limit enforcement matches with expectations when tailcalls are preceded
1449  * with two bpf2bpf calls, and the two subprogs are traced by fexit.
1450  */
1451 static void test_tailcall_bpf2bpf_hierarchy_fexit(void)
1452 {
1453 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1454 				      false, true, false);
1455 }
1456 
1457 /* test_tailcall_bpf2bpf_hierarchy_fentry_fexit checks that the count value of
1458  * the tail call limit enforcement matches with expectations when tailcalls are
1459  * preceded with two bpf2bpf calls, and the two subprogs are traced by both
1460  * fentry and fexit.
1461  */
1462 static void test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void)
1463 {
1464 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1465 				      true, true, false);
1466 }
1467 
1468 /* test_tailcall_bpf2bpf_hierarchy_fentry_entry checks that the count value of
1469  * the tail call limit enforcement matches with expectations when tailcalls are
1470  * preceded with two bpf2bpf calls in fentry.
1471  */
1472 static void test_tailcall_bpf2bpf_hierarchy_fentry_entry(void)
1473 {
1474 	test_tailcall_hierarchy_count("tc_dummy.bpf.o", false, false, true);
1475 }
1476 
1477 /* test_tailcall_bpf2bpf_hierarchy_2 checks that the count value of the tail
1478  * call limit enforcement matches with expectations:
1479  *
1480  *         subprog_tail0 --tailcall-> classifier_0 -> subprog_tail0
1481  * entry <
1482  *         subprog_tail1 --tailcall-> classifier_1 -> subprog_tail1
1483  */
1484 static void test_tailcall_bpf2bpf_hierarchy_2(void)
1485 {
1486 	RUN_TESTS(tailcall_bpf2bpf_hierarchy2);
1487 }
1488 
1489 /* test_tailcall_bpf2bpf_hierarchy_3 checks that the count value of the tail
1490  * call limit enforcement matches with expectations:
1491  *
1492  *                                   subprog with jmp_table0 to classifier_0
1493  * entry --tailcall-> classifier_0 <
1494  *                                   subprog with jmp_table1 to classifier_0
1495  */
1496 static void test_tailcall_bpf2bpf_hierarchy_3(void)
1497 {
1498 	RUN_TESTS(tailcall_bpf2bpf_hierarchy3);
1499 }
1500 
1501 /* test_tailcall_freplace checks that the freplace prog fails to update the
1502  * prog_array map, no matter whether the freplace prog attaches to its target.
1503  */
1504 static void test_tailcall_freplace(void)
1505 {
1506 	struct tailcall_freplace *freplace_skel = NULL;
1507 	struct bpf_link *freplace_link = NULL;
1508 	struct bpf_program *freplace_prog;
1509 	struct tc_bpf2bpf *tc_skel = NULL;
1510 	int prog_fd, tc_prog_fd, map_fd;
1511 	char buff[128] = {};
1512 	int err, key;
1513 
1514 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1515 		    .data_in = buff,
1516 		    .data_size_in = sizeof(buff),
1517 		    .repeat = 1,
1518 	);
1519 
1520 	freplace_skel = tailcall_freplace__open();
1521 	if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
1522 		return;
1523 
1524 	tc_skel = tc_bpf2bpf__open_and_load();
1525 	if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
1526 		goto out;
1527 
1528 	tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
1529 	freplace_prog = freplace_skel->progs.entry_freplace;
1530 	err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd,
1531 					     "subprog_tc");
1532 	if (!ASSERT_OK(err, "set_attach_target"))
1533 		goto out;
1534 
1535 	err = tailcall_freplace__load(freplace_skel);
1536 	if (!ASSERT_OK(err, "tailcall_freplace__load"))
1537 		goto out;
1538 
1539 	map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
1540 	prog_fd = bpf_program__fd(freplace_prog);
1541 	key = 0;
1542 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1543 	ASSERT_ERR(err, "update jmp_table failure");
1544 
1545 	freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd,
1546 						     "subprog_tc");
1547 	if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1548 		goto out;
1549 
1550 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1551 	ASSERT_ERR(err, "update jmp_table failure");
1552 
1553 out:
1554 	bpf_link__destroy(freplace_link);
1555 	tailcall_freplace__destroy(freplace_skel);
1556 	tc_bpf2bpf__destroy(tc_skel);
1557 }
1558 
1559 /* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail
1560  * callee prog with freplace prog or fails to update an extended prog to
1561  * prog_array map.
1562  */
1563 static void test_tailcall_bpf2bpf_freplace(void)
1564 {
1565 	struct tailcall_freplace *freplace_skel = NULL;
1566 	struct bpf_link *freplace_link = NULL;
1567 	struct tc_bpf2bpf *tc_skel = NULL;
1568 	char buff[128] = {};
1569 	int prog_fd, map_fd;
1570 	int err, key;
1571 
1572 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1573 		    .data_in = buff,
1574 		    .data_size_in = sizeof(buff),
1575 		    .repeat = 1,
1576 	);
1577 
1578 	tc_skel = tc_bpf2bpf__open_and_load();
1579 	if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
1580 		goto out;
1581 
1582 	prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
1583 	freplace_skel = tailcall_freplace__open();
1584 	if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
1585 		goto out;
1586 
1587 	err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace,
1588 					     prog_fd, "subprog_tc");
1589 	if (!ASSERT_OK(err, "set_attach_target"))
1590 		goto out;
1591 
1592 	err = tailcall_freplace__load(freplace_skel);
1593 	if (!ASSERT_OK(err, "tailcall_freplace__load"))
1594 		goto out;
1595 
1596 	/* OK to attach then detach freplace prog. */
1597 
1598 	freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1599 						     prog_fd, "subprog_tc");
1600 	if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1601 		goto out;
1602 
1603 	err = bpf_link__destroy(freplace_link);
1604 	freplace_link = NULL;
1605 	if (!ASSERT_OK(err, "destroy link"))
1606 		goto out;
1607 
1608 	/* OK to update prog_array map then delete element from the map. */
1609 
1610 	key = 0;
1611 	map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
1612 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1613 	if (!ASSERT_OK(err, "update jmp_table"))
1614 		goto out;
1615 
1616 	err = bpf_map_delete_elem(map_fd, &key);
1617 	if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1618 		goto out;
1619 
1620 	/* Fail to attach a tail callee prog with freplace prog. */
1621 
1622 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1623 	if (!ASSERT_OK(err, "update jmp_table"))
1624 		goto out;
1625 
1626 	freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1627 						     prog_fd, "subprog_tc");
1628 	if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure"))
1629 		goto out;
1630 
1631 	err = bpf_map_delete_elem(map_fd, &key);
1632 	if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1633 		goto out;
1634 
1635 	/* Fail to update an extended prog to prog_array map. */
1636 
1637 	freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1638 						     prog_fd, "subprog_tc");
1639 	if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1640 		goto out;
1641 
1642 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1643 	if (!ASSERT_ERR(err, "update jmp_table failure"))
1644 		goto out;
1645 
1646 out:
1647 	bpf_link__destroy(freplace_link);
1648 	tailcall_freplace__destroy(freplace_skel);
1649 	tc_bpf2bpf__destroy(tc_skel);
1650 }
1651 
1652 static void test_tailcall_failure()
1653 {
1654 	RUN_TESTS(tailcall_fail);
1655 }
1656 
1657 noinline void uprobe_sleepable_trigger(void)
1658 {
1659 	asm volatile ("");
1660 }
1661 
1662 static void test_tailcall_sleepable(void)
1663 {
1664 	LIBBPF_OPTS(bpf_uprobe_opts, opts);
1665 	struct tailcall_sleepable *skel;
1666 	int prog_fd, map_fd;
1667 	int err, key;
1668 
1669 	skel = tailcall_sleepable__open();
1670 	if (!ASSERT_OK_PTR(skel, "tailcall_sleepable__open"))
1671 		return;
1672 
1673 	/*
1674 	 * Test that we can't load uprobe_normal and uprobe_sleepable_1,
1675 	 * because they share tailcall map.
1676 	 */
1677 	bpf_program__set_autoload(skel->progs.uprobe_normal, true);
1678 	bpf_program__set_autoload(skel->progs.uprobe_sleepable_1, true);
1679 
1680 	err = tailcall_sleepable__load(skel);
1681 	if (!ASSERT_ERR(err, "tailcall_sleepable__load"))
1682 		goto out;
1683 
1684 	tailcall_sleepable__destroy(skel);
1685 
1686 	/*
1687 	 * Test that we can tail call from sleepable to sleepable program.
1688 	 */
1689 	skel = tailcall_sleepable__open();
1690 	if (!ASSERT_OK_PTR(skel, "tailcall_sleepable__open"))
1691 		return;
1692 
1693 	bpf_program__set_autoload(skel->progs.uprobe_sleepable_1, true);
1694 	bpf_program__set_autoload(skel->progs.uprobe_sleepable_2, true);
1695 
1696 	err = tailcall_sleepable__load(skel);
1697 	if (!ASSERT_OK(err, "tailcall_sleepable__load"))
1698 		goto out;
1699 
1700 	/* Add sleepable uprobe_sleepable_2 to jmp_table[0]. */
1701 	key = 0;
1702 	prog_fd = bpf_program__fd(skel->progs.uprobe_sleepable_2);
1703 	map_fd = bpf_map__fd(skel->maps.jmp_table);
1704 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1705 	if (!ASSERT_OK(err, "update jmp_table"))
1706 		goto out;
1707 
1708 	skel->bss->my_pid = getpid();
1709 
1710 	/* Attach uprobe_sleepable_1 to uprobe_sleepable_trigger and hit it.  */
1711 	opts.func_name = "uprobe_sleepable_trigger";
1712 	skel->links.uprobe_sleepable_1 = bpf_program__attach_uprobe_opts(
1713 						skel->progs.uprobe_sleepable_1,
1714 						-1,
1715 						"/proc/self/exe",
1716 						0 /* offset */,
1717 						&opts);
1718 	if (!ASSERT_OK_PTR(skel->links.uprobe_sleepable_1, "bpf_program__attach_uprobe_opts"))
1719 		goto out;
1720 
1721 	uprobe_sleepable_trigger();
1722 	ASSERT_EQ(skel->bss->executed, 1, "executed");
1723 
1724 out:
1725 	tailcall_sleepable__destroy(skel);
1726 }
1727 
1728 void test_tailcalls(void)
1729 {
1730 	if (test__start_subtest("tailcall_1"))
1731 		test_tailcall_1();
1732 	if (test__start_subtest("tailcall_2"))
1733 		test_tailcall_2();
1734 	if (test__start_subtest("tailcall_3"))
1735 		test_tailcall_3();
1736 	if (test__start_subtest("tailcall_4"))
1737 		test_tailcall_4();
1738 	if (test__start_subtest("tailcall_5"))
1739 		test_tailcall_5();
1740 	if (test__start_subtest("tailcall_6"))
1741 		test_tailcall_6();
1742 	if (test__start_subtest("tailcall_bpf2bpf_1"))
1743 		test_tailcall_bpf2bpf_1();
1744 	if (test__start_subtest("tailcall_bpf2bpf_2"))
1745 		test_tailcall_bpf2bpf_2();
1746 	if (test__start_subtest("tailcall_bpf2bpf_3"))
1747 		test_tailcall_bpf2bpf_3();
1748 	if (test__start_subtest("tailcall_bpf2bpf_4"))
1749 		test_tailcall_bpf2bpf_4(false);
1750 	if (test__start_subtest("tailcall_bpf2bpf_5"))
1751 		test_tailcall_bpf2bpf_4(true);
1752 	if (test__start_subtest("tailcall_bpf2bpf_6"))
1753 		test_tailcall_bpf2bpf_6();
1754 	if (test__start_subtest("tailcall_bpf2bpf_fentry"))
1755 		test_tailcall_bpf2bpf_fentry();
1756 	if (test__start_subtest("tailcall_bpf2bpf_fexit"))
1757 		test_tailcall_bpf2bpf_fexit();
1758 	if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
1759 		test_tailcall_bpf2bpf_fentry_fexit();
1760 	if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
1761 		test_tailcall_bpf2bpf_fentry_entry();
1762 	if (test__start_subtest("tailcall_poke"))
1763 		test_tailcall_poke();
1764 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_1"))
1765 		test_tailcall_bpf2bpf_hierarchy_1();
1766 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry"))
1767 		test_tailcall_bpf2bpf_hierarchy_fentry();
1768 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fexit"))
1769 		test_tailcall_bpf2bpf_hierarchy_fexit();
1770 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_fexit"))
1771 		test_tailcall_bpf2bpf_hierarchy_fentry_fexit();
1772 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_entry"))
1773 		test_tailcall_bpf2bpf_hierarchy_fentry_entry();
1774 	test_tailcall_bpf2bpf_hierarchy_2();
1775 	test_tailcall_bpf2bpf_hierarchy_3();
1776 	if (test__start_subtest("tailcall_freplace"))
1777 		test_tailcall_freplace();
1778 	if (test__start_subtest("tailcall_bpf2bpf_freplace"))
1779 		test_tailcall_bpf2bpf_freplace();
1780 	if (test__start_subtest("tailcall_failure"))
1781 		test_tailcall_failure();
1782 	if (test__start_subtest("tailcall_sleepable"))
1783 		test_tailcall_sleepable();
1784 }
1785