xref: /linux/tools/testing/selftests/bpf/prog_tests/tailcalls.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <unistd.h>
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "tailcall_poke.skel.h"
6 
7 
8 /* test_tailcall_1 checks basic functionality by patching multiple locations
9  * in a single program for a single tail call slot with nop->jmp, jmp->nop
10  * and jmp->jmp rewrites. Also checks for nop->nop.
11  */
12 static void test_tailcall_1(void)
13 {
14 	int err, map_fd, prog_fd, main_fd, i, j;
15 	struct bpf_map *prog_array;
16 	struct bpf_program *prog;
17 	struct bpf_object *obj;
18 	char prog_name[32];
19 	char buff[128] = {};
20 	LIBBPF_OPTS(bpf_test_run_opts, topts,
21 		.data_in = buff,
22 		.data_size_in = sizeof(buff),
23 		.repeat = 1,
24 	);
25 
26 	err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
27 				 &prog_fd);
28 	if (CHECK_FAIL(err))
29 		return;
30 
31 	prog = bpf_object__find_program_by_name(obj, "entry");
32 	if (CHECK_FAIL(!prog))
33 		goto out;
34 
35 	main_fd = bpf_program__fd(prog);
36 	if (CHECK_FAIL(main_fd < 0))
37 		goto out;
38 
39 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
40 	if (CHECK_FAIL(!prog_array))
41 		goto out;
42 
43 	map_fd = bpf_map__fd(prog_array);
44 	if (CHECK_FAIL(map_fd < 0))
45 		goto out;
46 
47 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
48 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
49 
50 		prog = bpf_object__find_program_by_name(obj, prog_name);
51 		if (CHECK_FAIL(!prog))
52 			goto out;
53 
54 		prog_fd = bpf_program__fd(prog);
55 		if (CHECK_FAIL(prog_fd < 0))
56 			goto out;
57 
58 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
59 		if (CHECK_FAIL(err))
60 			goto out;
61 	}
62 
63 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
64 		err = bpf_prog_test_run_opts(main_fd, &topts);
65 		ASSERT_OK(err, "tailcall");
66 		ASSERT_EQ(topts.retval, i, "tailcall retval");
67 
68 		err = bpf_map_delete_elem(map_fd, &i);
69 		if (CHECK_FAIL(err))
70 			goto out;
71 	}
72 
73 	err = bpf_prog_test_run_opts(main_fd, &topts);
74 	ASSERT_OK(err, "tailcall");
75 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
76 
77 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
78 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
79 
80 		prog = bpf_object__find_program_by_name(obj, prog_name);
81 		if (CHECK_FAIL(!prog))
82 			goto out;
83 
84 		prog_fd = bpf_program__fd(prog);
85 		if (CHECK_FAIL(prog_fd < 0))
86 			goto out;
87 
88 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
89 		if (CHECK_FAIL(err))
90 			goto out;
91 	}
92 
93 	err = bpf_prog_test_run_opts(main_fd, &topts);
94 	ASSERT_OK(err, "tailcall");
95 	ASSERT_OK(topts.retval, "tailcall retval");
96 
97 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
98 		j = bpf_map__max_entries(prog_array) - 1 - i;
99 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
100 
101 		prog = bpf_object__find_program_by_name(obj, prog_name);
102 		if (CHECK_FAIL(!prog))
103 			goto out;
104 
105 		prog_fd = bpf_program__fd(prog);
106 		if (CHECK_FAIL(prog_fd < 0))
107 			goto out;
108 
109 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
110 		if (CHECK_FAIL(err))
111 			goto out;
112 	}
113 
114 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
115 		j = bpf_map__max_entries(prog_array) - 1 - i;
116 
117 		err = bpf_prog_test_run_opts(main_fd, &topts);
118 		ASSERT_OK(err, "tailcall");
119 		ASSERT_EQ(topts.retval, j, "tailcall retval");
120 
121 		err = bpf_map_delete_elem(map_fd, &i);
122 		if (CHECK_FAIL(err))
123 			goto out;
124 	}
125 
126 	err = bpf_prog_test_run_opts(main_fd, &topts);
127 	ASSERT_OK(err, "tailcall");
128 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
129 
130 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
131 		err = bpf_map_delete_elem(map_fd, &i);
132 		if (CHECK_FAIL(err >= 0 || errno != ENOENT))
133 			goto out;
134 
135 		err = bpf_prog_test_run_opts(main_fd, &topts);
136 		ASSERT_OK(err, "tailcall");
137 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
138 	}
139 
140 out:
141 	bpf_object__close(obj);
142 }
143 
144 /* test_tailcall_2 checks that patching multiple programs for a single
145  * tail call slot works. It also jumps through several programs and tests
146  * the tail call limit counter.
147  */
148 static void test_tailcall_2(void)
149 {
150 	int err, map_fd, prog_fd, main_fd, i;
151 	struct bpf_map *prog_array;
152 	struct bpf_program *prog;
153 	struct bpf_object *obj;
154 	char prog_name[32];
155 	char buff[128] = {};
156 	LIBBPF_OPTS(bpf_test_run_opts, topts,
157 		.data_in = buff,
158 		.data_size_in = sizeof(buff),
159 		.repeat = 1,
160 	);
161 
162 	err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
163 				 &prog_fd);
164 	if (CHECK_FAIL(err))
165 		return;
166 
167 	prog = bpf_object__find_program_by_name(obj, "entry");
168 	if (CHECK_FAIL(!prog))
169 		goto out;
170 
171 	main_fd = bpf_program__fd(prog);
172 	if (CHECK_FAIL(main_fd < 0))
173 		goto out;
174 
175 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
176 	if (CHECK_FAIL(!prog_array))
177 		goto out;
178 
179 	map_fd = bpf_map__fd(prog_array);
180 	if (CHECK_FAIL(map_fd < 0))
181 		goto out;
182 
183 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
184 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
185 
186 		prog = bpf_object__find_program_by_name(obj, prog_name);
187 		if (CHECK_FAIL(!prog))
188 			goto out;
189 
190 		prog_fd = bpf_program__fd(prog);
191 		if (CHECK_FAIL(prog_fd < 0))
192 			goto out;
193 
194 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
195 		if (CHECK_FAIL(err))
196 			goto out;
197 	}
198 
199 	err = bpf_prog_test_run_opts(main_fd, &topts);
200 	ASSERT_OK(err, "tailcall");
201 	ASSERT_EQ(topts.retval, 2, "tailcall retval");
202 
203 	i = 2;
204 	err = bpf_map_delete_elem(map_fd, &i);
205 	if (CHECK_FAIL(err))
206 		goto out;
207 
208 	err = bpf_prog_test_run_opts(main_fd, &topts);
209 	ASSERT_OK(err, "tailcall");
210 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
211 
212 	i = 0;
213 	err = bpf_map_delete_elem(map_fd, &i);
214 	if (CHECK_FAIL(err))
215 		goto out;
216 
217 	err = bpf_prog_test_run_opts(main_fd, &topts);
218 	ASSERT_OK(err, "tailcall");
219 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
220 out:
221 	bpf_object__close(obj);
222 }
223 
224 static void test_tailcall_count(const char *which, bool test_fentry,
225 				bool test_fexit)
226 {
227 	struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
228 	struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
229 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
230 	struct bpf_map *prog_array, *data_map;
231 	struct bpf_program *prog;
232 	char buff[128] = {};
233 	LIBBPF_OPTS(bpf_test_run_opts, topts,
234 		.data_in = buff,
235 		.data_size_in = sizeof(buff),
236 		.repeat = 1,
237 	);
238 
239 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
240 			    &prog_fd);
241 	if (CHECK_FAIL(err))
242 		return;
243 
244 	prog = bpf_object__find_program_by_name(obj, "entry");
245 	if (CHECK_FAIL(!prog))
246 		goto out;
247 
248 	main_fd = bpf_program__fd(prog);
249 	if (CHECK_FAIL(main_fd < 0))
250 		goto out;
251 
252 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
253 	if (CHECK_FAIL(!prog_array))
254 		goto out;
255 
256 	map_fd = bpf_map__fd(prog_array);
257 	if (CHECK_FAIL(map_fd < 0))
258 		goto out;
259 
260 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
261 	if (CHECK_FAIL(!prog))
262 		goto out;
263 
264 	prog_fd = bpf_program__fd(prog);
265 	if (CHECK_FAIL(prog_fd < 0))
266 		goto out;
267 
268 	i = 0;
269 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
270 	if (CHECK_FAIL(err))
271 		goto out;
272 
273 	if (test_fentry) {
274 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
275 						   NULL);
276 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
277 			goto out;
278 
279 		prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
280 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
281 			goto out;
282 
283 		err = bpf_program__set_attach_target(prog, prog_fd,
284 						     "subprog_tail");
285 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
286 			goto out;
287 
288 		err = bpf_object__load(fentry_obj);
289 		if (!ASSERT_OK(err, "load fentry_obj"))
290 			goto out;
291 
292 		fentry_link = bpf_program__attach_trace(prog);
293 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
294 			goto out;
295 	}
296 
297 	if (test_fexit) {
298 		fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
299 						  NULL);
300 		if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
301 			goto out;
302 
303 		prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
304 		if (!ASSERT_OK_PTR(prog, "find fexit prog"))
305 			goto out;
306 
307 		err = bpf_program__set_attach_target(prog, prog_fd,
308 						     "subprog_tail");
309 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
310 			goto out;
311 
312 		err = bpf_object__load(fexit_obj);
313 		if (!ASSERT_OK(err, "load fexit_obj"))
314 			goto out;
315 
316 		fexit_link = bpf_program__attach_trace(prog);
317 		if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
318 			goto out;
319 	}
320 
321 	err = bpf_prog_test_run_opts(main_fd, &topts);
322 	ASSERT_OK(err, "tailcall");
323 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
324 
325 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
326 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
327 		goto out;
328 
329 	data_fd = bpf_map__fd(data_map);
330 	if (CHECK_FAIL(data_fd < 0))
331 		goto out;
332 
333 	i = 0;
334 	err = bpf_map_lookup_elem(data_fd, &i, &val);
335 	ASSERT_OK(err, "tailcall count");
336 	ASSERT_EQ(val, 33, "tailcall count");
337 
338 	if (test_fentry) {
339 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
340 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
341 				  "find tailcall_bpf2bpf_fentry.bss map"))
342 			goto out;
343 
344 		data_fd = bpf_map__fd(data_map);
345 		if (!ASSERT_FALSE(data_fd < 0,
346 				  "find tailcall_bpf2bpf_fentry.bss map fd"))
347 			goto out;
348 
349 		i = 0;
350 		err = bpf_map_lookup_elem(data_fd, &i, &val);
351 		ASSERT_OK(err, "fentry count");
352 		ASSERT_EQ(val, 33, "fentry count");
353 	}
354 
355 	if (test_fexit) {
356 		data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
357 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
358 				  "find tailcall_bpf2bpf_fexit.bss map"))
359 			goto out;
360 
361 		data_fd = bpf_map__fd(data_map);
362 		if (!ASSERT_FALSE(data_fd < 0,
363 				  "find tailcall_bpf2bpf_fexit.bss map fd"))
364 			goto out;
365 
366 		i = 0;
367 		err = bpf_map_lookup_elem(data_fd, &i, &val);
368 		ASSERT_OK(err, "fexit count");
369 		ASSERT_EQ(val, 33, "fexit count");
370 	}
371 
372 	i = 0;
373 	err = bpf_map_delete_elem(map_fd, &i);
374 	if (CHECK_FAIL(err))
375 		goto out;
376 
377 	err = bpf_prog_test_run_opts(main_fd, &topts);
378 	ASSERT_OK(err, "tailcall");
379 	ASSERT_OK(topts.retval, "tailcall retval");
380 out:
381 	bpf_link__destroy(fentry_link);
382 	bpf_link__destroy(fexit_link);
383 	bpf_object__close(fentry_obj);
384 	bpf_object__close(fexit_obj);
385 	bpf_object__close(obj);
386 }
387 
388 /* test_tailcall_3 checks that the count value of the tail call limit
389  * enforcement matches with expectations. JIT uses direct jump.
390  */
391 static void test_tailcall_3(void)
392 {
393 	test_tailcall_count("tailcall3.bpf.o", false, false);
394 }
395 
396 /* test_tailcall_6 checks that the count value of the tail call limit
397  * enforcement matches with expectations. JIT uses indirect jump.
398  */
399 static void test_tailcall_6(void)
400 {
401 	test_tailcall_count("tailcall6.bpf.o", false, false);
402 }
403 
404 /* test_tailcall_4 checks that the kernel properly selects indirect jump
405  * for the case where the key is not known. Latter is passed via global
406  * data to select different targets we can compare return value of.
407  */
408 static void test_tailcall_4(void)
409 {
410 	int err, map_fd, prog_fd, main_fd, data_fd, i;
411 	struct bpf_map *prog_array, *data_map;
412 	struct bpf_program *prog;
413 	struct bpf_object *obj;
414 	static const int zero = 0;
415 	char buff[128] = {};
416 	char prog_name[32];
417 	LIBBPF_OPTS(bpf_test_run_opts, topts,
418 		.data_in = buff,
419 		.data_size_in = sizeof(buff),
420 		.repeat = 1,
421 	);
422 
423 	err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
424 				 &prog_fd);
425 	if (CHECK_FAIL(err))
426 		return;
427 
428 	prog = bpf_object__find_program_by_name(obj, "entry");
429 	if (CHECK_FAIL(!prog))
430 		goto out;
431 
432 	main_fd = bpf_program__fd(prog);
433 	if (CHECK_FAIL(main_fd < 0))
434 		goto out;
435 
436 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
437 	if (CHECK_FAIL(!prog_array))
438 		goto out;
439 
440 	map_fd = bpf_map__fd(prog_array);
441 	if (CHECK_FAIL(map_fd < 0))
442 		goto out;
443 
444 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
445 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
446 		goto out;
447 
448 	data_fd = bpf_map__fd(data_map);
449 	if (CHECK_FAIL(data_fd < 0))
450 		goto out;
451 
452 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
453 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
454 
455 		prog = bpf_object__find_program_by_name(obj, prog_name);
456 		if (CHECK_FAIL(!prog))
457 			goto out;
458 
459 		prog_fd = bpf_program__fd(prog);
460 		if (CHECK_FAIL(prog_fd < 0))
461 			goto out;
462 
463 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
464 		if (CHECK_FAIL(err))
465 			goto out;
466 	}
467 
468 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
469 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
470 		if (CHECK_FAIL(err))
471 			goto out;
472 
473 		err = bpf_prog_test_run_opts(main_fd, &topts);
474 		ASSERT_OK(err, "tailcall");
475 		ASSERT_EQ(topts.retval, i, "tailcall retval");
476 	}
477 
478 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
479 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
480 		if (CHECK_FAIL(err))
481 			goto out;
482 
483 		err = bpf_map_delete_elem(map_fd, &i);
484 		if (CHECK_FAIL(err))
485 			goto out;
486 
487 		err = bpf_prog_test_run_opts(main_fd, &topts);
488 		ASSERT_OK(err, "tailcall");
489 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
490 	}
491 out:
492 	bpf_object__close(obj);
493 }
494 
495 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
496  * an indirect jump when the keys are const but different from different branches.
497  */
498 static void test_tailcall_5(void)
499 {
500 	int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
501 	struct bpf_map *prog_array, *data_map;
502 	struct bpf_program *prog;
503 	struct bpf_object *obj;
504 	static const int zero = 0;
505 	char buff[128] = {};
506 	char prog_name[32];
507 	LIBBPF_OPTS(bpf_test_run_opts, topts,
508 		.data_in = buff,
509 		.data_size_in = sizeof(buff),
510 		.repeat = 1,
511 	);
512 
513 	err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
514 				 &prog_fd);
515 	if (CHECK_FAIL(err))
516 		return;
517 
518 	prog = bpf_object__find_program_by_name(obj, "entry");
519 	if (CHECK_FAIL(!prog))
520 		goto out;
521 
522 	main_fd = bpf_program__fd(prog);
523 	if (CHECK_FAIL(main_fd < 0))
524 		goto out;
525 
526 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
527 	if (CHECK_FAIL(!prog_array))
528 		goto out;
529 
530 	map_fd = bpf_map__fd(prog_array);
531 	if (CHECK_FAIL(map_fd < 0))
532 		goto out;
533 
534 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
535 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
536 		goto out;
537 
538 	data_fd = bpf_map__fd(data_map);
539 	if (CHECK_FAIL(data_fd < 0))
540 		goto out;
541 
542 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
543 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
544 
545 		prog = bpf_object__find_program_by_name(obj, prog_name);
546 		if (CHECK_FAIL(!prog))
547 			goto out;
548 
549 		prog_fd = bpf_program__fd(prog);
550 		if (CHECK_FAIL(prog_fd < 0))
551 			goto out;
552 
553 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
554 		if (CHECK_FAIL(err))
555 			goto out;
556 	}
557 
558 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
559 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
560 		if (CHECK_FAIL(err))
561 			goto out;
562 
563 		err = bpf_prog_test_run_opts(main_fd, &topts);
564 		ASSERT_OK(err, "tailcall");
565 		ASSERT_EQ(topts.retval, i, "tailcall retval");
566 	}
567 
568 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
569 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
570 		if (CHECK_FAIL(err))
571 			goto out;
572 
573 		err = bpf_map_delete_elem(map_fd, &i);
574 		if (CHECK_FAIL(err))
575 			goto out;
576 
577 		err = bpf_prog_test_run_opts(main_fd, &topts);
578 		ASSERT_OK(err, "tailcall");
579 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
580 	}
581 out:
582 	bpf_object__close(obj);
583 }
584 
585 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
586  * correctly in correlation with BPF subprograms
587  */
588 static void test_tailcall_bpf2bpf_1(void)
589 {
590 	int err, map_fd, prog_fd, main_fd, i;
591 	struct bpf_map *prog_array;
592 	struct bpf_program *prog;
593 	struct bpf_object *obj;
594 	char prog_name[32];
595 	LIBBPF_OPTS(bpf_test_run_opts, topts,
596 		.data_in = &pkt_v4,
597 		.data_size_in = sizeof(pkt_v4),
598 		.repeat = 1,
599 	);
600 
601 	err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
602 				 &obj, &prog_fd);
603 	if (CHECK_FAIL(err))
604 		return;
605 
606 	prog = bpf_object__find_program_by_name(obj, "entry");
607 	if (CHECK_FAIL(!prog))
608 		goto out;
609 
610 	main_fd = bpf_program__fd(prog);
611 	if (CHECK_FAIL(main_fd < 0))
612 		goto out;
613 
614 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
615 	if (CHECK_FAIL(!prog_array))
616 		goto out;
617 
618 	map_fd = bpf_map__fd(prog_array);
619 	if (CHECK_FAIL(map_fd < 0))
620 		goto out;
621 
622 	/* nop -> jmp */
623 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
624 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
625 
626 		prog = bpf_object__find_program_by_name(obj, prog_name);
627 		if (CHECK_FAIL(!prog))
628 			goto out;
629 
630 		prog_fd = bpf_program__fd(prog);
631 		if (CHECK_FAIL(prog_fd < 0))
632 			goto out;
633 
634 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
635 		if (CHECK_FAIL(err))
636 			goto out;
637 	}
638 
639 	err = bpf_prog_test_run_opts(main_fd, &topts);
640 	ASSERT_OK(err, "tailcall");
641 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
642 
643 	/* jmp -> nop, call subprog that will do tailcall */
644 	i = 1;
645 	err = bpf_map_delete_elem(map_fd, &i);
646 	if (CHECK_FAIL(err))
647 		goto out;
648 
649 	err = bpf_prog_test_run_opts(main_fd, &topts);
650 	ASSERT_OK(err, "tailcall");
651 	ASSERT_OK(topts.retval, "tailcall retval");
652 
653 	/* make sure that subprog can access ctx and entry prog that
654 	 * called this subprog can properly return
655 	 */
656 	i = 0;
657 	err = bpf_map_delete_elem(map_fd, &i);
658 	if (CHECK_FAIL(err))
659 		goto out;
660 
661 	err = bpf_prog_test_run_opts(main_fd, &topts);
662 	ASSERT_OK(err, "tailcall");
663 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
664 out:
665 	bpf_object__close(obj);
666 }
667 
668 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
669  * enforcement matches with expectations when tailcall is preceded with
670  * bpf2bpf call.
671  */
672 static void test_tailcall_bpf2bpf_2(void)
673 {
674 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
675 	struct bpf_map *prog_array, *data_map;
676 	struct bpf_program *prog;
677 	struct bpf_object *obj;
678 	char buff[128] = {};
679 	LIBBPF_OPTS(bpf_test_run_opts, topts,
680 		.data_in = buff,
681 		.data_size_in = sizeof(buff),
682 		.repeat = 1,
683 	);
684 
685 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
686 				 &obj, &prog_fd);
687 	if (CHECK_FAIL(err))
688 		return;
689 
690 	prog = bpf_object__find_program_by_name(obj, "entry");
691 	if (CHECK_FAIL(!prog))
692 		goto out;
693 
694 	main_fd = bpf_program__fd(prog);
695 	if (CHECK_FAIL(main_fd < 0))
696 		goto out;
697 
698 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
699 	if (CHECK_FAIL(!prog_array))
700 		goto out;
701 
702 	map_fd = bpf_map__fd(prog_array);
703 	if (CHECK_FAIL(map_fd < 0))
704 		goto out;
705 
706 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
707 	if (CHECK_FAIL(!prog))
708 		goto out;
709 
710 	prog_fd = bpf_program__fd(prog);
711 	if (CHECK_FAIL(prog_fd < 0))
712 		goto out;
713 
714 	i = 0;
715 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
716 	if (CHECK_FAIL(err))
717 		goto out;
718 
719 	err = bpf_prog_test_run_opts(main_fd, &topts);
720 	ASSERT_OK(err, "tailcall");
721 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
722 
723 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
724 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
725 		goto out;
726 
727 	data_fd = bpf_map__fd(data_map);
728 	if (CHECK_FAIL(data_fd < 0))
729 		goto out;
730 
731 	i = 0;
732 	err = bpf_map_lookup_elem(data_fd, &i, &val);
733 	ASSERT_OK(err, "tailcall count");
734 	ASSERT_EQ(val, 33, "tailcall count");
735 
736 	i = 0;
737 	err = bpf_map_delete_elem(map_fd, &i);
738 	if (CHECK_FAIL(err))
739 		goto out;
740 
741 	err = bpf_prog_test_run_opts(main_fd, &topts);
742 	ASSERT_OK(err, "tailcall");
743 	ASSERT_OK(topts.retval, "tailcall retval");
744 out:
745 	bpf_object__close(obj);
746 }
747 
748 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
749  * 256 bytes) can be used within bpf subprograms that have the tailcalls
750  * in them
751  */
752 static void test_tailcall_bpf2bpf_3(void)
753 {
754 	int err, map_fd, prog_fd, main_fd, i;
755 	struct bpf_map *prog_array;
756 	struct bpf_program *prog;
757 	struct bpf_object *obj;
758 	char prog_name[32];
759 	LIBBPF_OPTS(bpf_test_run_opts, topts,
760 		.data_in = &pkt_v4,
761 		.data_size_in = sizeof(pkt_v4),
762 		.repeat = 1,
763 	);
764 
765 	err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
766 				 &obj, &prog_fd);
767 	if (CHECK_FAIL(err))
768 		return;
769 
770 	prog = bpf_object__find_program_by_name(obj, "entry");
771 	if (CHECK_FAIL(!prog))
772 		goto out;
773 
774 	main_fd = bpf_program__fd(prog);
775 	if (CHECK_FAIL(main_fd < 0))
776 		goto out;
777 
778 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
779 	if (CHECK_FAIL(!prog_array))
780 		goto out;
781 
782 	map_fd = bpf_map__fd(prog_array);
783 	if (CHECK_FAIL(map_fd < 0))
784 		goto out;
785 
786 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
787 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
788 
789 		prog = bpf_object__find_program_by_name(obj, prog_name);
790 		if (CHECK_FAIL(!prog))
791 			goto out;
792 
793 		prog_fd = bpf_program__fd(prog);
794 		if (CHECK_FAIL(prog_fd < 0))
795 			goto out;
796 
797 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
798 		if (CHECK_FAIL(err))
799 			goto out;
800 	}
801 
802 	err = bpf_prog_test_run_opts(main_fd, &topts);
803 	ASSERT_OK(err, "tailcall");
804 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
805 
806 	i = 1;
807 	err = bpf_map_delete_elem(map_fd, &i);
808 	if (CHECK_FAIL(err))
809 		goto out;
810 
811 	err = bpf_prog_test_run_opts(main_fd, &topts);
812 	ASSERT_OK(err, "tailcall");
813 	ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
814 
815 	i = 0;
816 	err = bpf_map_delete_elem(map_fd, &i);
817 	if (CHECK_FAIL(err))
818 		goto out;
819 
820 	err = bpf_prog_test_run_opts(main_fd, &topts);
821 	ASSERT_OK(err, "tailcall");
822 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
823 out:
824 	bpf_object__close(obj);
825 }
826 
827 #include "tailcall_bpf2bpf4.skel.h"
828 
829 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
830  * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
831  * counter behaves correctly, bpf program will go through following flow:
832  *
833  * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
834  * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
835  * subprog2 [here bump global counter] --------^
836  *
837  * We go through first two tailcalls and start counting from the subprog2 where
838  * the loop begins. At the end of the test make sure that the global counter is
839  * equal to 31, because tailcall counter includes the first two tailcalls
840  * whereas global counter is incremented only on loop presented on flow above.
841  *
842  * The noise parameter is used to insert bpf_map_update calls into the logic
843  * to force verifier to patch instructions. This allows us to ensure jump
844  * logic remains correct with instruction movement.
845  */
846 static void test_tailcall_bpf2bpf_4(bool noise)
847 {
848 	int err, map_fd, prog_fd, main_fd, data_fd, i;
849 	struct tailcall_bpf2bpf4__bss val;
850 	struct bpf_map *prog_array, *data_map;
851 	struct bpf_program *prog;
852 	struct bpf_object *obj;
853 	char prog_name[32];
854 	LIBBPF_OPTS(bpf_test_run_opts, topts,
855 		.data_in = &pkt_v4,
856 		.data_size_in = sizeof(pkt_v4),
857 		.repeat = 1,
858 	);
859 
860 	err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
861 				 &obj, &prog_fd);
862 	if (CHECK_FAIL(err))
863 		return;
864 
865 	prog = bpf_object__find_program_by_name(obj, "entry");
866 	if (CHECK_FAIL(!prog))
867 		goto out;
868 
869 	main_fd = bpf_program__fd(prog);
870 	if (CHECK_FAIL(main_fd < 0))
871 		goto out;
872 
873 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
874 	if (CHECK_FAIL(!prog_array))
875 		goto out;
876 
877 	map_fd = bpf_map__fd(prog_array);
878 	if (CHECK_FAIL(map_fd < 0))
879 		goto out;
880 
881 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
882 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
883 
884 		prog = bpf_object__find_program_by_name(obj, prog_name);
885 		if (CHECK_FAIL(!prog))
886 			goto out;
887 
888 		prog_fd = bpf_program__fd(prog);
889 		if (CHECK_FAIL(prog_fd < 0))
890 			goto out;
891 
892 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
893 		if (CHECK_FAIL(err))
894 			goto out;
895 	}
896 
897 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
898 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
899 		goto out;
900 
901 	data_fd = bpf_map__fd(data_map);
902 	if (CHECK_FAIL(data_fd < 0))
903 		goto out;
904 
905 	i = 0;
906 	val.noise = noise;
907 	val.count = 0;
908 	err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
909 	if (CHECK_FAIL(err))
910 		goto out;
911 
912 	err = bpf_prog_test_run_opts(main_fd, &topts);
913 	ASSERT_OK(err, "tailcall");
914 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
915 
916 	i = 0;
917 	err = bpf_map_lookup_elem(data_fd, &i, &val);
918 	ASSERT_OK(err, "tailcall count");
919 	ASSERT_EQ(val.count, 31, "tailcall count");
920 
921 out:
922 	bpf_object__close(obj);
923 }
924 
925 #include "tailcall_bpf2bpf6.skel.h"
926 
927 /* Tail call counting works even when there is data on stack which is
928  * not aligned to 8 bytes.
929  */
930 static void test_tailcall_bpf2bpf_6(void)
931 {
932 	struct tailcall_bpf2bpf6 *obj;
933 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
934 	LIBBPF_OPTS(bpf_test_run_opts, topts,
935 		.data_in = &pkt_v4,
936 		.data_size_in = sizeof(pkt_v4),
937 		.repeat = 1,
938 	);
939 
940 	obj = tailcall_bpf2bpf6__open_and_load();
941 	if (!ASSERT_OK_PTR(obj, "open and load"))
942 		return;
943 
944 	main_fd = bpf_program__fd(obj->progs.entry);
945 	if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
946 		goto out;
947 
948 	map_fd = bpf_map__fd(obj->maps.jmp_table);
949 	if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
950 		goto out;
951 
952 	prog_fd = bpf_program__fd(obj->progs.classifier_0);
953 	if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
954 		goto out;
955 
956 	i = 0;
957 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
958 	if (!ASSERT_OK(err, "jmp_table map update"))
959 		goto out;
960 
961 	err = bpf_prog_test_run_opts(main_fd, &topts);
962 	ASSERT_OK(err, "entry prog test run");
963 	ASSERT_EQ(topts.retval, 0, "tailcall retval");
964 
965 	data_fd = bpf_map__fd(obj->maps.bss);
966 	if (!ASSERT_GE(data_fd, 0, "bss map fd"))
967 		goto out;
968 
969 	i = 0;
970 	err = bpf_map_lookup_elem(data_fd, &i, &val);
971 	ASSERT_OK(err, "bss map lookup");
972 	ASSERT_EQ(val, 1, "done flag is set");
973 
974 out:
975 	tailcall_bpf2bpf6__destroy(obj);
976 }
977 
978 /* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
979  * limit enforcement matches with expectations when tailcall is preceded with
980  * bpf2bpf call, and the bpf2bpf call is traced by fentry.
981  */
982 static void test_tailcall_bpf2bpf_fentry(void)
983 {
984 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
985 }
986 
987 /* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
988  * limit enforcement matches with expectations when tailcall is preceded with
989  * bpf2bpf call, and the bpf2bpf call is traced by fexit.
990  */
991 static void test_tailcall_bpf2bpf_fexit(void)
992 {
993 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
994 }
995 
996 /* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
997  * call limit enforcement matches with expectations when tailcall is preceded
998  * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
999  */
1000 static void test_tailcall_bpf2bpf_fentry_fexit(void)
1001 {
1002 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
1003 }
1004 
1005 /* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
1006  * call limit enforcement matches with expectations when tailcall is preceded
1007  * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
1008  */
1009 static void test_tailcall_bpf2bpf_fentry_entry(void)
1010 {
1011 	struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
1012 	int err, map_fd, prog_fd, data_fd, i, val;
1013 	struct bpf_map *prog_array, *data_map;
1014 	struct bpf_link *fentry_link = NULL;
1015 	struct bpf_program *prog;
1016 	char buff[128] = {};
1017 
1018 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1019 		.data_in = buff,
1020 		.data_size_in = sizeof(buff),
1021 		.repeat = 1,
1022 	);
1023 
1024 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
1025 				 BPF_PROG_TYPE_SCHED_CLS,
1026 				 &tgt_obj, &prog_fd);
1027 	if (!ASSERT_OK(err, "load tgt_obj"))
1028 		return;
1029 
1030 	prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
1031 	if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
1032 		goto out;
1033 
1034 	map_fd = bpf_map__fd(prog_array);
1035 	if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
1036 		goto out;
1037 
1038 	prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
1039 	if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
1040 		goto out;
1041 
1042 	prog_fd = bpf_program__fd(prog);
1043 	if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
1044 		goto out;
1045 
1046 	i = 0;
1047 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1048 	if (!ASSERT_OK(err, "update jmp_table"))
1049 		goto out;
1050 
1051 	fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1052 					   NULL);
1053 	if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1054 		goto out;
1055 
1056 	prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1057 	if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1058 		goto out;
1059 
1060 	err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
1061 	if (!ASSERT_OK(err, "set_attach_target classifier_0"))
1062 		goto out;
1063 
1064 	err = bpf_object__load(fentry_obj);
1065 	if (!ASSERT_OK(err, "load fentry_obj"))
1066 		goto out;
1067 
1068 	fentry_link = bpf_program__attach_trace(prog);
1069 	if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1070 		goto out;
1071 
1072 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1073 	ASSERT_OK(err, "tailcall");
1074 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1075 
1076 	data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
1077 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1078 			  "find tailcall.bss map"))
1079 		goto out;
1080 
1081 	data_fd = bpf_map__fd(data_map);
1082 	if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
1083 		goto out;
1084 
1085 	i = 0;
1086 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1087 	ASSERT_OK(err, "tailcall count");
1088 	ASSERT_EQ(val, 34, "tailcall count");
1089 
1090 	data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1091 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1092 			  "find tailcall_bpf2bpf_fentry.bss map"))
1093 		goto out;
1094 
1095 	data_fd = bpf_map__fd(data_map);
1096 	if (!ASSERT_FALSE(data_fd < 0,
1097 			  "find tailcall_bpf2bpf_fentry.bss map fd"))
1098 		goto out;
1099 
1100 	i = 0;
1101 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1102 	ASSERT_OK(err, "fentry count");
1103 	ASSERT_EQ(val, 1, "fentry count");
1104 
1105 out:
1106 	bpf_link__destroy(fentry_link);
1107 	bpf_object__close(fentry_obj);
1108 	bpf_object__close(tgt_obj);
1109 }
1110 
1111 #define JMP_TABLE "/sys/fs/bpf/jmp_table"
1112 
1113 static int poke_thread_exit;
1114 
1115 static void *poke_update(void *arg)
1116 {
1117 	__u32 zero = 0, prog1_fd, prog2_fd, map_fd;
1118 	struct tailcall_poke *call = arg;
1119 
1120 	map_fd = bpf_map__fd(call->maps.jmp_table);
1121 	prog1_fd = bpf_program__fd(call->progs.call1);
1122 	prog2_fd = bpf_program__fd(call->progs.call2);
1123 
1124 	while (!poke_thread_exit) {
1125 		bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
1126 		bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
1127 	}
1128 
1129 	return NULL;
1130 }
1131 
1132 /*
1133  * We are trying to hit prog array update during another program load
1134  * that shares the same prog array map.
1135  *
1136  * For that we share the jmp_table map between two skeleton instances
1137  * by pinning the jmp_table to same path. Then first skeleton instance
1138  * periodically updates jmp_table in 'poke update' thread while we load
1139  * the second skeleton instance in the main thread.
1140  */
1141 static void test_tailcall_poke(void)
1142 {
1143 	struct tailcall_poke *call, *test;
1144 	int err, cnt = 10;
1145 	pthread_t thread;
1146 
1147 	unlink(JMP_TABLE);
1148 
1149 	call = tailcall_poke__open_and_load();
1150 	if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
1151 		return;
1152 
1153 	err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
1154 	if (!ASSERT_OK(err, "bpf_map__pin"))
1155 		goto out;
1156 
1157 	err = pthread_create(&thread, NULL, poke_update, call);
1158 	if (!ASSERT_OK(err, "new toggler"))
1159 		goto out;
1160 
1161 	while (cnt--) {
1162 		test = tailcall_poke__open();
1163 		if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
1164 			break;
1165 
1166 		err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
1167 		if (!ASSERT_OK(err, "bpf_map__pin")) {
1168 			tailcall_poke__destroy(test);
1169 			break;
1170 		}
1171 
1172 		bpf_program__set_autoload(test->progs.test, true);
1173 		bpf_program__set_autoload(test->progs.call1, false);
1174 		bpf_program__set_autoload(test->progs.call2, false);
1175 
1176 		err = tailcall_poke__load(test);
1177 		tailcall_poke__destroy(test);
1178 		if (!ASSERT_OK(err, "tailcall_poke__load"))
1179 			break;
1180 	}
1181 
1182 	poke_thread_exit = 1;
1183 	ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
1184 
1185 out:
1186 	bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
1187 	tailcall_poke__destroy(call);
1188 }
1189 
1190 void test_tailcalls(void)
1191 {
1192 	if (test__start_subtest("tailcall_1"))
1193 		test_tailcall_1();
1194 	if (test__start_subtest("tailcall_2"))
1195 		test_tailcall_2();
1196 	if (test__start_subtest("tailcall_3"))
1197 		test_tailcall_3();
1198 	if (test__start_subtest("tailcall_4"))
1199 		test_tailcall_4();
1200 	if (test__start_subtest("tailcall_5"))
1201 		test_tailcall_5();
1202 	if (test__start_subtest("tailcall_6"))
1203 		test_tailcall_6();
1204 	if (test__start_subtest("tailcall_bpf2bpf_1"))
1205 		test_tailcall_bpf2bpf_1();
1206 	if (test__start_subtest("tailcall_bpf2bpf_2"))
1207 		test_tailcall_bpf2bpf_2();
1208 	if (test__start_subtest("tailcall_bpf2bpf_3"))
1209 		test_tailcall_bpf2bpf_3();
1210 	if (test__start_subtest("tailcall_bpf2bpf_4"))
1211 		test_tailcall_bpf2bpf_4(false);
1212 	if (test__start_subtest("tailcall_bpf2bpf_5"))
1213 		test_tailcall_bpf2bpf_4(true);
1214 	if (test__start_subtest("tailcall_bpf2bpf_6"))
1215 		test_tailcall_bpf2bpf_6();
1216 	if (test__start_subtest("tailcall_bpf2bpf_fentry"))
1217 		test_tailcall_bpf2bpf_fentry();
1218 	if (test__start_subtest("tailcall_bpf2bpf_fexit"))
1219 		test_tailcall_bpf2bpf_fexit();
1220 	if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
1221 		test_tailcall_bpf2bpf_fentry_fexit();
1222 	if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
1223 		test_tailcall_bpf2bpf_fentry_entry();
1224 	if (test__start_subtest("tailcall_poke"))
1225 		test_tailcall_poke();
1226 }
1227