xref: /linux/tools/testing/selftests/bpf/prog_tests/tailcalls.c (revision 4f88dde0e1525ee69d29ba235c3965685439d0e3)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <unistd.h>
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "tailcall_poke.skel.h"
6 #include "tailcall_bpf2bpf_hierarchy2.skel.h"
7 #include "tailcall_bpf2bpf_hierarchy3.skel.h"
8 
9 
10 /* test_tailcall_1 checks basic functionality by patching multiple locations
11  * in a single program for a single tail call slot with nop->jmp, jmp->nop
12  * and jmp->jmp rewrites. Also checks for nop->nop.
13  */
14 static void test_tailcall_1(void)
15 {
16 	int err, map_fd, prog_fd, main_fd, i, j;
17 	struct bpf_map *prog_array;
18 	struct bpf_program *prog;
19 	struct bpf_object *obj;
20 	char prog_name[32];
21 	char buff[128] = {};
22 	LIBBPF_OPTS(bpf_test_run_opts, topts,
23 		.data_in = buff,
24 		.data_size_in = sizeof(buff),
25 		.repeat = 1,
26 	);
27 
28 	err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
29 				 &prog_fd);
30 	if (CHECK_FAIL(err))
31 		return;
32 
33 	prog = bpf_object__find_program_by_name(obj, "entry");
34 	if (CHECK_FAIL(!prog))
35 		goto out;
36 
37 	main_fd = bpf_program__fd(prog);
38 	if (CHECK_FAIL(main_fd < 0))
39 		goto out;
40 
41 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
42 	if (CHECK_FAIL(!prog_array))
43 		goto out;
44 
45 	map_fd = bpf_map__fd(prog_array);
46 	if (CHECK_FAIL(map_fd < 0))
47 		goto out;
48 
49 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
50 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
51 
52 		prog = bpf_object__find_program_by_name(obj, prog_name);
53 		if (CHECK_FAIL(!prog))
54 			goto out;
55 
56 		prog_fd = bpf_program__fd(prog);
57 		if (CHECK_FAIL(prog_fd < 0))
58 			goto out;
59 
60 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
61 		if (CHECK_FAIL(err))
62 			goto out;
63 	}
64 
65 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
66 		err = bpf_prog_test_run_opts(main_fd, &topts);
67 		ASSERT_OK(err, "tailcall");
68 		ASSERT_EQ(topts.retval, i, "tailcall retval");
69 
70 		err = bpf_map_delete_elem(map_fd, &i);
71 		if (CHECK_FAIL(err))
72 			goto out;
73 	}
74 
75 	err = bpf_prog_test_run_opts(main_fd, &topts);
76 	ASSERT_OK(err, "tailcall");
77 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
78 
79 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
80 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
81 
82 		prog = bpf_object__find_program_by_name(obj, prog_name);
83 		if (CHECK_FAIL(!prog))
84 			goto out;
85 
86 		prog_fd = bpf_program__fd(prog);
87 		if (CHECK_FAIL(prog_fd < 0))
88 			goto out;
89 
90 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
91 		if (CHECK_FAIL(err))
92 			goto out;
93 	}
94 
95 	err = bpf_prog_test_run_opts(main_fd, &topts);
96 	ASSERT_OK(err, "tailcall");
97 	ASSERT_OK(topts.retval, "tailcall retval");
98 
99 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
100 		j = bpf_map__max_entries(prog_array) - 1 - i;
101 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
102 
103 		prog = bpf_object__find_program_by_name(obj, prog_name);
104 		if (CHECK_FAIL(!prog))
105 			goto out;
106 
107 		prog_fd = bpf_program__fd(prog);
108 		if (CHECK_FAIL(prog_fd < 0))
109 			goto out;
110 
111 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
112 		if (CHECK_FAIL(err))
113 			goto out;
114 	}
115 
116 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
117 		j = bpf_map__max_entries(prog_array) - 1 - i;
118 
119 		err = bpf_prog_test_run_opts(main_fd, &topts);
120 		ASSERT_OK(err, "tailcall");
121 		ASSERT_EQ(topts.retval, j, "tailcall retval");
122 
123 		err = bpf_map_delete_elem(map_fd, &i);
124 		if (CHECK_FAIL(err))
125 			goto out;
126 	}
127 
128 	err = bpf_prog_test_run_opts(main_fd, &topts);
129 	ASSERT_OK(err, "tailcall");
130 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
131 
132 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
133 		err = bpf_map_delete_elem(map_fd, &i);
134 		if (CHECK_FAIL(err >= 0 || errno != ENOENT))
135 			goto out;
136 
137 		err = bpf_prog_test_run_opts(main_fd, &topts);
138 		ASSERT_OK(err, "tailcall");
139 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
140 	}
141 
142 out:
143 	bpf_object__close(obj);
144 }
145 
146 /* test_tailcall_2 checks that patching multiple programs for a single
147  * tail call slot works. It also jumps through several programs and tests
148  * the tail call limit counter.
149  */
150 static void test_tailcall_2(void)
151 {
152 	int err, map_fd, prog_fd, main_fd, i;
153 	struct bpf_map *prog_array;
154 	struct bpf_program *prog;
155 	struct bpf_object *obj;
156 	char prog_name[32];
157 	char buff[128] = {};
158 	LIBBPF_OPTS(bpf_test_run_opts, topts,
159 		.data_in = buff,
160 		.data_size_in = sizeof(buff),
161 		.repeat = 1,
162 	);
163 
164 	err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
165 				 &prog_fd);
166 	if (CHECK_FAIL(err))
167 		return;
168 
169 	prog = bpf_object__find_program_by_name(obj, "entry");
170 	if (CHECK_FAIL(!prog))
171 		goto out;
172 
173 	main_fd = bpf_program__fd(prog);
174 	if (CHECK_FAIL(main_fd < 0))
175 		goto out;
176 
177 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
178 	if (CHECK_FAIL(!prog_array))
179 		goto out;
180 
181 	map_fd = bpf_map__fd(prog_array);
182 	if (CHECK_FAIL(map_fd < 0))
183 		goto out;
184 
185 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
186 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
187 
188 		prog = bpf_object__find_program_by_name(obj, prog_name);
189 		if (CHECK_FAIL(!prog))
190 			goto out;
191 
192 		prog_fd = bpf_program__fd(prog);
193 		if (CHECK_FAIL(prog_fd < 0))
194 			goto out;
195 
196 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
197 		if (CHECK_FAIL(err))
198 			goto out;
199 	}
200 
201 	err = bpf_prog_test_run_opts(main_fd, &topts);
202 	ASSERT_OK(err, "tailcall");
203 	ASSERT_EQ(topts.retval, 2, "tailcall retval");
204 
205 	i = 2;
206 	err = bpf_map_delete_elem(map_fd, &i);
207 	if (CHECK_FAIL(err))
208 		goto out;
209 
210 	err = bpf_prog_test_run_opts(main_fd, &topts);
211 	ASSERT_OK(err, "tailcall");
212 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
213 
214 	i = 0;
215 	err = bpf_map_delete_elem(map_fd, &i);
216 	if (CHECK_FAIL(err))
217 		goto out;
218 
219 	err = bpf_prog_test_run_opts(main_fd, &topts);
220 	ASSERT_OK(err, "tailcall");
221 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
222 out:
223 	bpf_object__close(obj);
224 }
225 
226 static void test_tailcall_count(const char *which, bool test_fentry,
227 				bool test_fexit)
228 {
229 	struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
230 	struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
231 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
232 	struct bpf_map *prog_array, *data_map;
233 	struct bpf_program *prog;
234 	char buff[128] = {};
235 	LIBBPF_OPTS(bpf_test_run_opts, topts,
236 		.data_in = buff,
237 		.data_size_in = sizeof(buff),
238 		.repeat = 1,
239 	);
240 
241 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
242 			    &prog_fd);
243 	if (CHECK_FAIL(err))
244 		return;
245 
246 	prog = bpf_object__find_program_by_name(obj, "entry");
247 	if (CHECK_FAIL(!prog))
248 		goto out;
249 
250 	main_fd = bpf_program__fd(prog);
251 	if (CHECK_FAIL(main_fd < 0))
252 		goto out;
253 
254 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
255 	if (CHECK_FAIL(!prog_array))
256 		goto out;
257 
258 	map_fd = bpf_map__fd(prog_array);
259 	if (CHECK_FAIL(map_fd < 0))
260 		goto out;
261 
262 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
263 	if (CHECK_FAIL(!prog))
264 		goto out;
265 
266 	prog_fd = bpf_program__fd(prog);
267 	if (CHECK_FAIL(prog_fd < 0))
268 		goto out;
269 
270 	i = 0;
271 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
272 	if (CHECK_FAIL(err))
273 		goto out;
274 
275 	if (test_fentry) {
276 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
277 						   NULL);
278 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
279 			goto out;
280 
281 		prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
282 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
283 			goto out;
284 
285 		err = bpf_program__set_attach_target(prog, prog_fd,
286 						     "subprog_tail");
287 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
288 			goto out;
289 
290 		err = bpf_object__load(fentry_obj);
291 		if (!ASSERT_OK(err, "load fentry_obj"))
292 			goto out;
293 
294 		fentry_link = bpf_program__attach_trace(prog);
295 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
296 			goto out;
297 	}
298 
299 	if (test_fexit) {
300 		fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
301 						  NULL);
302 		if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
303 			goto out;
304 
305 		prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
306 		if (!ASSERT_OK_PTR(prog, "find fexit prog"))
307 			goto out;
308 
309 		err = bpf_program__set_attach_target(prog, prog_fd,
310 						     "subprog_tail");
311 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
312 			goto out;
313 
314 		err = bpf_object__load(fexit_obj);
315 		if (!ASSERT_OK(err, "load fexit_obj"))
316 			goto out;
317 
318 		fexit_link = bpf_program__attach_trace(prog);
319 		if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
320 			goto out;
321 	}
322 
323 	err = bpf_prog_test_run_opts(main_fd, &topts);
324 	ASSERT_OK(err, "tailcall");
325 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
326 
327 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
328 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
329 		goto out;
330 
331 	data_fd = bpf_map__fd(data_map);
332 	if (CHECK_FAIL(data_fd < 0))
333 		goto out;
334 
335 	i = 0;
336 	err = bpf_map_lookup_elem(data_fd, &i, &val);
337 	ASSERT_OK(err, "tailcall count");
338 	ASSERT_EQ(val, 33, "tailcall count");
339 
340 	if (test_fentry) {
341 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
342 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
343 				  "find tailcall_bpf2bpf_fentry.bss map"))
344 			goto out;
345 
346 		data_fd = bpf_map__fd(data_map);
347 		if (!ASSERT_FALSE(data_fd < 0,
348 				  "find tailcall_bpf2bpf_fentry.bss map fd"))
349 			goto out;
350 
351 		i = 0;
352 		err = bpf_map_lookup_elem(data_fd, &i, &val);
353 		ASSERT_OK(err, "fentry count");
354 		ASSERT_EQ(val, 33, "fentry count");
355 	}
356 
357 	if (test_fexit) {
358 		data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
359 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
360 				  "find tailcall_bpf2bpf_fexit.bss map"))
361 			goto out;
362 
363 		data_fd = bpf_map__fd(data_map);
364 		if (!ASSERT_FALSE(data_fd < 0,
365 				  "find tailcall_bpf2bpf_fexit.bss map fd"))
366 			goto out;
367 
368 		i = 0;
369 		err = bpf_map_lookup_elem(data_fd, &i, &val);
370 		ASSERT_OK(err, "fexit count");
371 		ASSERT_EQ(val, 33, "fexit count");
372 	}
373 
374 	i = 0;
375 	err = bpf_map_delete_elem(map_fd, &i);
376 	if (CHECK_FAIL(err))
377 		goto out;
378 
379 	err = bpf_prog_test_run_opts(main_fd, &topts);
380 	ASSERT_OK(err, "tailcall");
381 	ASSERT_OK(topts.retval, "tailcall retval");
382 out:
383 	bpf_link__destroy(fentry_link);
384 	bpf_link__destroy(fexit_link);
385 	bpf_object__close(fentry_obj);
386 	bpf_object__close(fexit_obj);
387 	bpf_object__close(obj);
388 }
389 
390 /* test_tailcall_3 checks that the count value of the tail call limit
391  * enforcement matches with expectations. JIT uses direct jump.
392  */
393 static void test_tailcall_3(void)
394 {
395 	test_tailcall_count("tailcall3.bpf.o", false, false);
396 }
397 
398 /* test_tailcall_6 checks that the count value of the tail call limit
399  * enforcement matches with expectations. JIT uses indirect jump.
400  */
401 static void test_tailcall_6(void)
402 {
403 	test_tailcall_count("tailcall6.bpf.o", false, false);
404 }
405 
406 /* test_tailcall_4 checks that the kernel properly selects indirect jump
407  * for the case where the key is not known. Latter is passed via global
408  * data to select different targets we can compare return value of.
409  */
410 static void test_tailcall_4(void)
411 {
412 	int err, map_fd, prog_fd, main_fd, data_fd, i;
413 	struct bpf_map *prog_array, *data_map;
414 	struct bpf_program *prog;
415 	struct bpf_object *obj;
416 	static const int zero = 0;
417 	char buff[128] = {};
418 	char prog_name[32];
419 	LIBBPF_OPTS(bpf_test_run_opts, topts,
420 		.data_in = buff,
421 		.data_size_in = sizeof(buff),
422 		.repeat = 1,
423 	);
424 
425 	err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
426 				 &prog_fd);
427 	if (CHECK_FAIL(err))
428 		return;
429 
430 	prog = bpf_object__find_program_by_name(obj, "entry");
431 	if (CHECK_FAIL(!prog))
432 		goto out;
433 
434 	main_fd = bpf_program__fd(prog);
435 	if (CHECK_FAIL(main_fd < 0))
436 		goto out;
437 
438 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
439 	if (CHECK_FAIL(!prog_array))
440 		goto out;
441 
442 	map_fd = bpf_map__fd(prog_array);
443 	if (CHECK_FAIL(map_fd < 0))
444 		goto out;
445 
446 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
447 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
448 		goto out;
449 
450 	data_fd = bpf_map__fd(data_map);
451 	if (CHECK_FAIL(data_fd < 0))
452 		goto out;
453 
454 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
455 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
456 
457 		prog = bpf_object__find_program_by_name(obj, prog_name);
458 		if (CHECK_FAIL(!prog))
459 			goto out;
460 
461 		prog_fd = bpf_program__fd(prog);
462 		if (CHECK_FAIL(prog_fd < 0))
463 			goto out;
464 
465 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
466 		if (CHECK_FAIL(err))
467 			goto out;
468 	}
469 
470 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
471 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
472 		if (CHECK_FAIL(err))
473 			goto out;
474 
475 		err = bpf_prog_test_run_opts(main_fd, &topts);
476 		ASSERT_OK(err, "tailcall");
477 		ASSERT_EQ(topts.retval, i, "tailcall retval");
478 	}
479 
480 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
481 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
482 		if (CHECK_FAIL(err))
483 			goto out;
484 
485 		err = bpf_map_delete_elem(map_fd, &i);
486 		if (CHECK_FAIL(err))
487 			goto out;
488 
489 		err = bpf_prog_test_run_opts(main_fd, &topts);
490 		ASSERT_OK(err, "tailcall");
491 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
492 	}
493 out:
494 	bpf_object__close(obj);
495 }
496 
497 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
498  * an indirect jump when the keys are const but different from different branches.
499  */
500 static void test_tailcall_5(void)
501 {
502 	int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
503 	struct bpf_map *prog_array, *data_map;
504 	struct bpf_program *prog;
505 	struct bpf_object *obj;
506 	static const int zero = 0;
507 	char buff[128] = {};
508 	char prog_name[32];
509 	LIBBPF_OPTS(bpf_test_run_opts, topts,
510 		.data_in = buff,
511 		.data_size_in = sizeof(buff),
512 		.repeat = 1,
513 	);
514 
515 	err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
516 				 &prog_fd);
517 	if (CHECK_FAIL(err))
518 		return;
519 
520 	prog = bpf_object__find_program_by_name(obj, "entry");
521 	if (CHECK_FAIL(!prog))
522 		goto out;
523 
524 	main_fd = bpf_program__fd(prog);
525 	if (CHECK_FAIL(main_fd < 0))
526 		goto out;
527 
528 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
529 	if (CHECK_FAIL(!prog_array))
530 		goto out;
531 
532 	map_fd = bpf_map__fd(prog_array);
533 	if (CHECK_FAIL(map_fd < 0))
534 		goto out;
535 
536 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
537 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
538 		goto out;
539 
540 	data_fd = bpf_map__fd(data_map);
541 	if (CHECK_FAIL(data_fd < 0))
542 		goto out;
543 
544 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
545 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
546 
547 		prog = bpf_object__find_program_by_name(obj, prog_name);
548 		if (CHECK_FAIL(!prog))
549 			goto out;
550 
551 		prog_fd = bpf_program__fd(prog);
552 		if (CHECK_FAIL(prog_fd < 0))
553 			goto out;
554 
555 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
556 		if (CHECK_FAIL(err))
557 			goto out;
558 	}
559 
560 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
561 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
562 		if (CHECK_FAIL(err))
563 			goto out;
564 
565 		err = bpf_prog_test_run_opts(main_fd, &topts);
566 		ASSERT_OK(err, "tailcall");
567 		ASSERT_EQ(topts.retval, i, "tailcall retval");
568 	}
569 
570 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
571 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
572 		if (CHECK_FAIL(err))
573 			goto out;
574 
575 		err = bpf_map_delete_elem(map_fd, &i);
576 		if (CHECK_FAIL(err))
577 			goto out;
578 
579 		err = bpf_prog_test_run_opts(main_fd, &topts);
580 		ASSERT_OK(err, "tailcall");
581 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
582 	}
583 out:
584 	bpf_object__close(obj);
585 }
586 
587 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
588  * correctly in correlation with BPF subprograms
589  */
590 static void test_tailcall_bpf2bpf_1(void)
591 {
592 	int err, map_fd, prog_fd, main_fd, i;
593 	struct bpf_map *prog_array;
594 	struct bpf_program *prog;
595 	struct bpf_object *obj;
596 	char prog_name[32];
597 	LIBBPF_OPTS(bpf_test_run_opts, topts,
598 		.data_in = &pkt_v4,
599 		.data_size_in = sizeof(pkt_v4),
600 		.repeat = 1,
601 	);
602 
603 	err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
604 				 &obj, &prog_fd);
605 	if (CHECK_FAIL(err))
606 		return;
607 
608 	prog = bpf_object__find_program_by_name(obj, "entry");
609 	if (CHECK_FAIL(!prog))
610 		goto out;
611 
612 	main_fd = bpf_program__fd(prog);
613 	if (CHECK_FAIL(main_fd < 0))
614 		goto out;
615 
616 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
617 	if (CHECK_FAIL(!prog_array))
618 		goto out;
619 
620 	map_fd = bpf_map__fd(prog_array);
621 	if (CHECK_FAIL(map_fd < 0))
622 		goto out;
623 
624 	/* nop -> jmp */
625 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
626 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
627 
628 		prog = bpf_object__find_program_by_name(obj, prog_name);
629 		if (CHECK_FAIL(!prog))
630 			goto out;
631 
632 		prog_fd = bpf_program__fd(prog);
633 		if (CHECK_FAIL(prog_fd < 0))
634 			goto out;
635 
636 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
637 		if (CHECK_FAIL(err))
638 			goto out;
639 	}
640 
641 	err = bpf_prog_test_run_opts(main_fd, &topts);
642 	ASSERT_OK(err, "tailcall");
643 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
644 
645 	/* jmp -> nop, call subprog that will do tailcall */
646 	i = 1;
647 	err = bpf_map_delete_elem(map_fd, &i);
648 	if (CHECK_FAIL(err))
649 		goto out;
650 
651 	err = bpf_prog_test_run_opts(main_fd, &topts);
652 	ASSERT_OK(err, "tailcall");
653 	ASSERT_OK(topts.retval, "tailcall retval");
654 
655 	/* make sure that subprog can access ctx and entry prog that
656 	 * called this subprog can properly return
657 	 */
658 	i = 0;
659 	err = bpf_map_delete_elem(map_fd, &i);
660 	if (CHECK_FAIL(err))
661 		goto out;
662 
663 	err = bpf_prog_test_run_opts(main_fd, &topts);
664 	ASSERT_OK(err, "tailcall");
665 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
666 out:
667 	bpf_object__close(obj);
668 }
669 
670 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
671  * enforcement matches with expectations when tailcall is preceded with
672  * bpf2bpf call.
673  */
674 static void test_tailcall_bpf2bpf_2(void)
675 {
676 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
677 	struct bpf_map *prog_array, *data_map;
678 	struct bpf_program *prog;
679 	struct bpf_object *obj;
680 	char buff[128] = {};
681 	LIBBPF_OPTS(bpf_test_run_opts, topts,
682 		.data_in = buff,
683 		.data_size_in = sizeof(buff),
684 		.repeat = 1,
685 	);
686 
687 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
688 				 &obj, &prog_fd);
689 	if (CHECK_FAIL(err))
690 		return;
691 
692 	prog = bpf_object__find_program_by_name(obj, "entry");
693 	if (CHECK_FAIL(!prog))
694 		goto out;
695 
696 	main_fd = bpf_program__fd(prog);
697 	if (CHECK_FAIL(main_fd < 0))
698 		goto out;
699 
700 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
701 	if (CHECK_FAIL(!prog_array))
702 		goto out;
703 
704 	map_fd = bpf_map__fd(prog_array);
705 	if (CHECK_FAIL(map_fd < 0))
706 		goto out;
707 
708 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
709 	if (CHECK_FAIL(!prog))
710 		goto out;
711 
712 	prog_fd = bpf_program__fd(prog);
713 	if (CHECK_FAIL(prog_fd < 0))
714 		goto out;
715 
716 	i = 0;
717 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
718 	if (CHECK_FAIL(err))
719 		goto out;
720 
721 	err = bpf_prog_test_run_opts(main_fd, &topts);
722 	ASSERT_OK(err, "tailcall");
723 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
724 
725 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
726 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
727 		goto out;
728 
729 	data_fd = bpf_map__fd(data_map);
730 	if (CHECK_FAIL(data_fd < 0))
731 		goto out;
732 
733 	i = 0;
734 	err = bpf_map_lookup_elem(data_fd, &i, &val);
735 	ASSERT_OK(err, "tailcall count");
736 	ASSERT_EQ(val, 33, "tailcall count");
737 
738 	i = 0;
739 	err = bpf_map_delete_elem(map_fd, &i);
740 	if (CHECK_FAIL(err))
741 		goto out;
742 
743 	err = bpf_prog_test_run_opts(main_fd, &topts);
744 	ASSERT_OK(err, "tailcall");
745 	ASSERT_OK(topts.retval, "tailcall retval");
746 out:
747 	bpf_object__close(obj);
748 }
749 
750 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
751  * 256 bytes) can be used within bpf subprograms that have the tailcalls
752  * in them
753  */
754 static void test_tailcall_bpf2bpf_3(void)
755 {
756 	int err, map_fd, prog_fd, main_fd, i;
757 	struct bpf_map *prog_array;
758 	struct bpf_program *prog;
759 	struct bpf_object *obj;
760 	char prog_name[32];
761 	LIBBPF_OPTS(bpf_test_run_opts, topts,
762 		.data_in = &pkt_v4,
763 		.data_size_in = sizeof(pkt_v4),
764 		.repeat = 1,
765 	);
766 
767 	err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
768 				 &obj, &prog_fd);
769 	if (CHECK_FAIL(err))
770 		return;
771 
772 	prog = bpf_object__find_program_by_name(obj, "entry");
773 	if (CHECK_FAIL(!prog))
774 		goto out;
775 
776 	main_fd = bpf_program__fd(prog);
777 	if (CHECK_FAIL(main_fd < 0))
778 		goto out;
779 
780 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
781 	if (CHECK_FAIL(!prog_array))
782 		goto out;
783 
784 	map_fd = bpf_map__fd(prog_array);
785 	if (CHECK_FAIL(map_fd < 0))
786 		goto out;
787 
788 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
789 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
790 
791 		prog = bpf_object__find_program_by_name(obj, prog_name);
792 		if (CHECK_FAIL(!prog))
793 			goto out;
794 
795 		prog_fd = bpf_program__fd(prog);
796 		if (CHECK_FAIL(prog_fd < 0))
797 			goto out;
798 
799 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
800 		if (CHECK_FAIL(err))
801 			goto out;
802 	}
803 
804 	err = bpf_prog_test_run_opts(main_fd, &topts);
805 	ASSERT_OK(err, "tailcall");
806 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
807 
808 	i = 1;
809 	err = bpf_map_delete_elem(map_fd, &i);
810 	if (CHECK_FAIL(err))
811 		goto out;
812 
813 	err = bpf_prog_test_run_opts(main_fd, &topts);
814 	ASSERT_OK(err, "tailcall");
815 	ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
816 
817 	i = 0;
818 	err = bpf_map_delete_elem(map_fd, &i);
819 	if (CHECK_FAIL(err))
820 		goto out;
821 
822 	err = bpf_prog_test_run_opts(main_fd, &topts);
823 	ASSERT_OK(err, "tailcall");
824 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
825 out:
826 	bpf_object__close(obj);
827 }
828 
829 #include "tailcall_bpf2bpf4.skel.h"
830 
831 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
832  * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
833  * counter behaves correctly, bpf program will go through following flow:
834  *
835  * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
836  * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
837  * subprog2 [here bump global counter] --------^
838  *
839  * We go through first two tailcalls and start counting from the subprog2 where
840  * the loop begins. At the end of the test make sure that the global counter is
841  * equal to 31, because tailcall counter includes the first two tailcalls
842  * whereas global counter is incremented only on loop presented on flow above.
843  *
844  * The noise parameter is used to insert bpf_map_update calls into the logic
845  * to force verifier to patch instructions. This allows us to ensure jump
846  * logic remains correct with instruction movement.
847  */
848 static void test_tailcall_bpf2bpf_4(bool noise)
849 {
850 	int err, map_fd, prog_fd, main_fd, data_fd, i;
851 	struct tailcall_bpf2bpf4__bss val;
852 	struct bpf_map *prog_array, *data_map;
853 	struct bpf_program *prog;
854 	struct bpf_object *obj;
855 	char prog_name[32];
856 	LIBBPF_OPTS(bpf_test_run_opts, topts,
857 		.data_in = &pkt_v4,
858 		.data_size_in = sizeof(pkt_v4),
859 		.repeat = 1,
860 	);
861 
862 	err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
863 				 &obj, &prog_fd);
864 	if (CHECK_FAIL(err))
865 		return;
866 
867 	prog = bpf_object__find_program_by_name(obj, "entry");
868 	if (CHECK_FAIL(!prog))
869 		goto out;
870 
871 	main_fd = bpf_program__fd(prog);
872 	if (CHECK_FAIL(main_fd < 0))
873 		goto out;
874 
875 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
876 	if (CHECK_FAIL(!prog_array))
877 		goto out;
878 
879 	map_fd = bpf_map__fd(prog_array);
880 	if (CHECK_FAIL(map_fd < 0))
881 		goto out;
882 
883 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
884 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
885 
886 		prog = bpf_object__find_program_by_name(obj, prog_name);
887 		if (CHECK_FAIL(!prog))
888 			goto out;
889 
890 		prog_fd = bpf_program__fd(prog);
891 		if (CHECK_FAIL(prog_fd < 0))
892 			goto out;
893 
894 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
895 		if (CHECK_FAIL(err))
896 			goto out;
897 	}
898 
899 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
900 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
901 		goto out;
902 
903 	data_fd = bpf_map__fd(data_map);
904 	if (CHECK_FAIL(data_fd < 0))
905 		goto out;
906 
907 	i = 0;
908 	val.noise = noise;
909 	val.count = 0;
910 	err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
911 	if (CHECK_FAIL(err))
912 		goto out;
913 
914 	err = bpf_prog_test_run_opts(main_fd, &topts);
915 	ASSERT_OK(err, "tailcall");
916 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
917 
918 	i = 0;
919 	err = bpf_map_lookup_elem(data_fd, &i, &val);
920 	ASSERT_OK(err, "tailcall count");
921 	ASSERT_EQ(val.count, 31, "tailcall count");
922 
923 out:
924 	bpf_object__close(obj);
925 }
926 
927 #include "tailcall_bpf2bpf6.skel.h"
928 
929 /* Tail call counting works even when there is data on stack which is
930  * not aligned to 8 bytes.
931  */
932 static void test_tailcall_bpf2bpf_6(void)
933 {
934 	struct tailcall_bpf2bpf6 *obj;
935 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
936 	LIBBPF_OPTS(bpf_test_run_opts, topts,
937 		.data_in = &pkt_v4,
938 		.data_size_in = sizeof(pkt_v4),
939 		.repeat = 1,
940 	);
941 
942 	obj = tailcall_bpf2bpf6__open_and_load();
943 	if (!ASSERT_OK_PTR(obj, "open and load"))
944 		return;
945 
946 	main_fd = bpf_program__fd(obj->progs.entry);
947 	if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
948 		goto out;
949 
950 	map_fd = bpf_map__fd(obj->maps.jmp_table);
951 	if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
952 		goto out;
953 
954 	prog_fd = bpf_program__fd(obj->progs.classifier_0);
955 	if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
956 		goto out;
957 
958 	i = 0;
959 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
960 	if (!ASSERT_OK(err, "jmp_table map update"))
961 		goto out;
962 
963 	err = bpf_prog_test_run_opts(main_fd, &topts);
964 	ASSERT_OK(err, "entry prog test run");
965 	ASSERT_EQ(topts.retval, 0, "tailcall retval");
966 
967 	data_fd = bpf_map__fd(obj->maps.bss);
968 	if (!ASSERT_GE(data_fd, 0, "bss map fd"))
969 		goto out;
970 
971 	i = 0;
972 	err = bpf_map_lookup_elem(data_fd, &i, &val);
973 	ASSERT_OK(err, "bss map lookup");
974 	ASSERT_EQ(val, 1, "done flag is set");
975 
976 out:
977 	tailcall_bpf2bpf6__destroy(obj);
978 }
979 
980 /* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
981  * limit enforcement matches with expectations when tailcall is preceded with
982  * bpf2bpf call, and the bpf2bpf call is traced by fentry.
983  */
984 static void test_tailcall_bpf2bpf_fentry(void)
985 {
986 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
987 }
988 
989 /* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
990  * limit enforcement matches with expectations when tailcall is preceded with
991  * bpf2bpf call, and the bpf2bpf call is traced by fexit.
992  */
993 static void test_tailcall_bpf2bpf_fexit(void)
994 {
995 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
996 }
997 
998 /* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
999  * call limit enforcement matches with expectations when tailcall is preceded
1000  * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
1001  */
1002 static void test_tailcall_bpf2bpf_fentry_fexit(void)
1003 {
1004 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
1005 }
1006 
1007 /* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
1008  * call limit enforcement matches with expectations when tailcall is preceded
1009  * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
1010  */
1011 static void test_tailcall_bpf2bpf_fentry_entry(void)
1012 {
1013 	struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
1014 	int err, map_fd, prog_fd, data_fd, i, val;
1015 	struct bpf_map *prog_array, *data_map;
1016 	struct bpf_link *fentry_link = NULL;
1017 	struct bpf_program *prog;
1018 	char buff[128] = {};
1019 
1020 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1021 		.data_in = buff,
1022 		.data_size_in = sizeof(buff),
1023 		.repeat = 1,
1024 	);
1025 
1026 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
1027 				 BPF_PROG_TYPE_SCHED_CLS,
1028 				 &tgt_obj, &prog_fd);
1029 	if (!ASSERT_OK(err, "load tgt_obj"))
1030 		return;
1031 
1032 	prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
1033 	if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
1034 		goto out;
1035 
1036 	map_fd = bpf_map__fd(prog_array);
1037 	if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
1038 		goto out;
1039 
1040 	prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
1041 	if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
1042 		goto out;
1043 
1044 	prog_fd = bpf_program__fd(prog);
1045 	if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
1046 		goto out;
1047 
1048 	i = 0;
1049 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1050 	if (!ASSERT_OK(err, "update jmp_table"))
1051 		goto out;
1052 
1053 	fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1054 					   NULL);
1055 	if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1056 		goto out;
1057 
1058 	prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1059 	if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1060 		goto out;
1061 
1062 	err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
1063 	if (!ASSERT_OK(err, "set_attach_target classifier_0"))
1064 		goto out;
1065 
1066 	err = bpf_object__load(fentry_obj);
1067 	if (!ASSERT_OK(err, "load fentry_obj"))
1068 		goto out;
1069 
1070 	fentry_link = bpf_program__attach_trace(prog);
1071 	if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1072 		goto out;
1073 
1074 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1075 	ASSERT_OK(err, "tailcall");
1076 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1077 
1078 	data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
1079 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1080 			  "find tailcall.bss map"))
1081 		goto out;
1082 
1083 	data_fd = bpf_map__fd(data_map);
1084 	if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
1085 		goto out;
1086 
1087 	i = 0;
1088 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1089 	ASSERT_OK(err, "tailcall count");
1090 	ASSERT_EQ(val, 34, "tailcall count");
1091 
1092 	data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1093 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1094 			  "find tailcall_bpf2bpf_fentry.bss map"))
1095 		goto out;
1096 
1097 	data_fd = bpf_map__fd(data_map);
1098 	if (!ASSERT_FALSE(data_fd < 0,
1099 			  "find tailcall_bpf2bpf_fentry.bss map fd"))
1100 		goto out;
1101 
1102 	i = 0;
1103 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1104 	ASSERT_OK(err, "fentry count");
1105 	ASSERT_EQ(val, 1, "fentry count");
1106 
1107 out:
1108 	bpf_link__destroy(fentry_link);
1109 	bpf_object__close(fentry_obj);
1110 	bpf_object__close(tgt_obj);
1111 }
1112 
1113 #define JMP_TABLE "/sys/fs/bpf/jmp_table"
1114 
1115 static int poke_thread_exit;
1116 
1117 static void *poke_update(void *arg)
1118 {
1119 	__u32 zero = 0, prog1_fd, prog2_fd, map_fd;
1120 	struct tailcall_poke *call = arg;
1121 
1122 	map_fd = bpf_map__fd(call->maps.jmp_table);
1123 	prog1_fd = bpf_program__fd(call->progs.call1);
1124 	prog2_fd = bpf_program__fd(call->progs.call2);
1125 
1126 	while (!poke_thread_exit) {
1127 		bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
1128 		bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
1129 	}
1130 
1131 	return NULL;
1132 }
1133 
1134 /*
1135  * We are trying to hit prog array update during another program load
1136  * that shares the same prog array map.
1137  *
1138  * For that we share the jmp_table map between two skeleton instances
1139  * by pinning the jmp_table to same path. Then first skeleton instance
1140  * periodically updates jmp_table in 'poke update' thread while we load
1141  * the second skeleton instance in the main thread.
1142  */
1143 static void test_tailcall_poke(void)
1144 {
1145 	struct tailcall_poke *call, *test;
1146 	int err, cnt = 10;
1147 	pthread_t thread;
1148 
1149 	unlink(JMP_TABLE);
1150 
1151 	call = tailcall_poke__open_and_load();
1152 	if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
1153 		return;
1154 
1155 	err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
1156 	if (!ASSERT_OK(err, "bpf_map__pin"))
1157 		goto out;
1158 
1159 	err = pthread_create(&thread, NULL, poke_update, call);
1160 	if (!ASSERT_OK(err, "new toggler"))
1161 		goto out;
1162 
1163 	while (cnt--) {
1164 		test = tailcall_poke__open();
1165 		if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
1166 			break;
1167 
1168 		err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
1169 		if (!ASSERT_OK(err, "bpf_map__pin")) {
1170 			tailcall_poke__destroy(test);
1171 			break;
1172 		}
1173 
1174 		bpf_program__set_autoload(test->progs.test, true);
1175 		bpf_program__set_autoload(test->progs.call1, false);
1176 		bpf_program__set_autoload(test->progs.call2, false);
1177 
1178 		err = tailcall_poke__load(test);
1179 		tailcall_poke__destroy(test);
1180 		if (!ASSERT_OK(err, "tailcall_poke__load"))
1181 			break;
1182 	}
1183 
1184 	poke_thread_exit = 1;
1185 	ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
1186 
1187 out:
1188 	bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
1189 	tailcall_poke__destroy(call);
1190 }
1191 
1192 static void test_tailcall_hierarchy_count(const char *which, bool test_fentry,
1193 					  bool test_fexit,
1194 					  bool test_fentry_entry)
1195 {
1196 	int err, map_fd, prog_fd, main_data_fd, fentry_data_fd, fexit_data_fd, i, val;
1197 	struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
1198 	struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
1199 	struct bpf_program *prog, *fentry_prog;
1200 	struct bpf_map *prog_array, *data_map;
1201 	int fentry_prog_fd;
1202 	char buff[128] = {};
1203 
1204 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1205 		.data_in = buff,
1206 		.data_size_in = sizeof(buff),
1207 		.repeat = 1,
1208 	);
1209 
1210 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
1211 				 &prog_fd);
1212 	if (!ASSERT_OK(err, "load obj"))
1213 		return;
1214 
1215 	prog = bpf_object__find_program_by_name(obj, "entry");
1216 	if (!ASSERT_OK_PTR(prog, "find entry prog"))
1217 		goto out;
1218 
1219 	prog_fd = bpf_program__fd(prog);
1220 	if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
1221 		goto out;
1222 
1223 	if (test_fentry_entry) {
1224 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_hierarchy_fentry.bpf.o",
1225 						   NULL);
1226 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1227 			goto out;
1228 
1229 		fentry_prog = bpf_object__find_program_by_name(fentry_obj,
1230 							       "fentry");
1231 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1232 			goto out;
1233 
1234 		err = bpf_program__set_attach_target(fentry_prog, prog_fd,
1235 						     "entry");
1236 		if (!ASSERT_OK(err, "set_attach_target entry"))
1237 			goto out;
1238 
1239 		err = bpf_object__load(fentry_obj);
1240 		if (!ASSERT_OK(err, "load fentry_obj"))
1241 			goto out;
1242 
1243 		fentry_link = bpf_program__attach_trace(fentry_prog);
1244 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1245 			goto out;
1246 
1247 		fentry_prog_fd = bpf_program__fd(fentry_prog);
1248 		if (!ASSERT_GE(fentry_prog_fd, 0, "fentry_prog_fd"))
1249 			goto out;
1250 
1251 		prog_array = bpf_object__find_map_by_name(fentry_obj, "jmp_table");
1252 		if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1253 			goto out;
1254 
1255 		map_fd = bpf_map__fd(prog_array);
1256 		if (!ASSERT_GE(map_fd, 0, "map_fd"))
1257 			goto out;
1258 
1259 		i = 0;
1260 		err = bpf_map_update_elem(map_fd, &i, &fentry_prog_fd, BPF_ANY);
1261 		if (!ASSERT_OK(err, "update jmp_table"))
1262 			goto out;
1263 
1264 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1265 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1266 				  "find data_map"))
1267 			goto out;
1268 
1269 	} else {
1270 		prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
1271 		if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1272 			goto out;
1273 
1274 		map_fd = bpf_map__fd(prog_array);
1275 		if (!ASSERT_GE(map_fd, 0, "map_fd"))
1276 			goto out;
1277 
1278 		i = 0;
1279 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1280 		if (!ASSERT_OK(err, "update jmp_table"))
1281 			goto out;
1282 
1283 		data_map = bpf_object__find_map_by_name(obj, ".bss");
1284 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1285 				  "find data_map"))
1286 			goto out;
1287 	}
1288 
1289 	if (test_fentry) {
1290 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1291 						   NULL);
1292 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1293 			goto out;
1294 
1295 		prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1296 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1297 			goto out;
1298 
1299 		err = bpf_program__set_attach_target(prog, prog_fd,
1300 						     "subprog_tail");
1301 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1302 			goto out;
1303 
1304 		err = bpf_object__load(fentry_obj);
1305 		if (!ASSERT_OK(err, "load fentry_obj"))
1306 			goto out;
1307 
1308 		fentry_link = bpf_program__attach_trace(prog);
1309 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1310 			goto out;
1311 	}
1312 
1313 	if (test_fexit) {
1314 		fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
1315 						  NULL);
1316 		if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
1317 			goto out;
1318 
1319 		prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
1320 		if (!ASSERT_OK_PTR(prog, "find fexit prog"))
1321 			goto out;
1322 
1323 		err = bpf_program__set_attach_target(prog, prog_fd,
1324 						     "subprog_tail");
1325 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1326 			goto out;
1327 
1328 		err = bpf_object__load(fexit_obj);
1329 		if (!ASSERT_OK(err, "load fexit_obj"))
1330 			goto out;
1331 
1332 		fexit_link = bpf_program__attach_trace(prog);
1333 		if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
1334 			goto out;
1335 	}
1336 
1337 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1338 	ASSERT_OK(err, "tailcall");
1339 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1340 
1341 	main_data_fd = bpf_map__fd(data_map);
1342 	if (!ASSERT_GE(main_data_fd, 0, "main_data_fd"))
1343 		goto out;
1344 
1345 	i = 0;
1346 	err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1347 	ASSERT_OK(err, "tailcall count");
1348 	ASSERT_EQ(val, 34, "tailcall count");
1349 
1350 	if (test_fentry) {
1351 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1352 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1353 				  "find tailcall_bpf2bpf_fentry.bss map"))
1354 			goto out;
1355 
1356 		fentry_data_fd = bpf_map__fd(data_map);
1357 		if (!ASSERT_GE(fentry_data_fd, 0,
1358 				  "find tailcall_bpf2bpf_fentry.bss map fd"))
1359 			goto out;
1360 
1361 		i = 0;
1362 		err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1363 		ASSERT_OK(err, "fentry count");
1364 		ASSERT_EQ(val, 68, "fentry count");
1365 	}
1366 
1367 	if (test_fexit) {
1368 		data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
1369 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1370 				  "find tailcall_bpf2bpf_fexit.bss map"))
1371 			goto out;
1372 
1373 		fexit_data_fd = bpf_map__fd(data_map);
1374 		if (!ASSERT_GE(fexit_data_fd, 0,
1375 				  "find tailcall_bpf2bpf_fexit.bss map fd"))
1376 			goto out;
1377 
1378 		i = 0;
1379 		err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1380 		ASSERT_OK(err, "fexit count");
1381 		ASSERT_EQ(val, 68, "fexit count");
1382 	}
1383 
1384 	i = 0;
1385 	err = bpf_map_delete_elem(map_fd, &i);
1386 	if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1387 		goto out;
1388 
1389 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1390 	ASSERT_OK(err, "tailcall");
1391 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1392 
1393 	i = 0;
1394 	err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1395 	ASSERT_OK(err, "tailcall count");
1396 	ASSERT_EQ(val, 35, "tailcall count");
1397 
1398 	if (test_fentry) {
1399 		i = 0;
1400 		err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1401 		ASSERT_OK(err, "fentry count");
1402 		ASSERT_EQ(val, 70, "fentry count");
1403 	}
1404 
1405 	if (test_fexit) {
1406 		i = 0;
1407 		err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1408 		ASSERT_OK(err, "fexit count");
1409 		ASSERT_EQ(val, 70, "fexit count");
1410 	}
1411 
1412 out:
1413 	bpf_link__destroy(fentry_link);
1414 	bpf_link__destroy(fexit_link);
1415 	bpf_object__close(fentry_obj);
1416 	bpf_object__close(fexit_obj);
1417 	bpf_object__close(obj);
1418 }
1419 
1420 /* test_tailcall_bpf2bpf_hierarchy_1 checks that the count value of the tail
1421  * call limit enforcement matches with expectations when tailcalls are preceded
1422  * with two bpf2bpf calls.
1423  *
1424  *         subprog --tailcall-> entry
1425  * entry <
1426  *         subprog --tailcall-> entry
1427  */
1428 static void test_tailcall_bpf2bpf_hierarchy_1(void)
1429 {
1430 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1431 				      false, false, false);
1432 }
1433 
1434 /* test_tailcall_bpf2bpf_hierarchy_fentry checks that the count value of the
1435  * tail call limit enforcement matches with expectations when tailcalls are
1436  * preceded with two bpf2bpf calls, and the two subprogs are traced by fentry.
1437  */
1438 static void test_tailcall_bpf2bpf_hierarchy_fentry(void)
1439 {
1440 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1441 				      true, false, false);
1442 }
1443 
1444 /* test_tailcall_bpf2bpf_hierarchy_fexit checks that the count value of the tail
1445  * call limit enforcement matches with expectations when tailcalls are preceded
1446  * with two bpf2bpf calls, and the two subprogs are traced by fexit.
1447  */
1448 static void test_tailcall_bpf2bpf_hierarchy_fexit(void)
1449 {
1450 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1451 				      false, true, false);
1452 }
1453 
1454 /* test_tailcall_bpf2bpf_hierarchy_fentry_fexit checks that the count value of
1455  * the tail call limit enforcement matches with expectations when tailcalls are
1456  * preceded with two bpf2bpf calls, and the two subprogs are traced by both
1457  * fentry and fexit.
1458  */
1459 static void test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void)
1460 {
1461 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1462 				      true, true, false);
1463 }
1464 
1465 /* test_tailcall_bpf2bpf_hierarchy_fentry_entry checks that the count value of
1466  * the tail call limit enforcement matches with expectations when tailcalls are
1467  * preceded with two bpf2bpf calls in fentry.
1468  */
1469 static void test_tailcall_bpf2bpf_hierarchy_fentry_entry(void)
1470 {
1471 	test_tailcall_hierarchy_count("tc_dummy.bpf.o", false, false, true);
1472 }
1473 
1474 /* test_tailcall_bpf2bpf_hierarchy_2 checks that the count value of the tail
1475  * call limit enforcement matches with expectations:
1476  *
1477  *         subprog_tail0 --tailcall-> classifier_0 -> subprog_tail0
1478  * entry <
1479  *         subprog_tail1 --tailcall-> classifier_1 -> subprog_tail1
1480  */
1481 static void test_tailcall_bpf2bpf_hierarchy_2(void)
1482 {
1483 	RUN_TESTS(tailcall_bpf2bpf_hierarchy2);
1484 }
1485 
1486 /* test_tailcall_bpf2bpf_hierarchy_3 checks that the count value of the tail
1487  * call limit enforcement matches with expectations:
1488  *
1489  *                                   subprog with jmp_table0 to classifier_0
1490  * entry --tailcall-> classifier_0 <
1491  *                                   subprog with jmp_table1 to classifier_0
1492  */
1493 static void test_tailcall_bpf2bpf_hierarchy_3(void)
1494 {
1495 	RUN_TESTS(tailcall_bpf2bpf_hierarchy3);
1496 }
1497 
1498 void test_tailcalls(void)
1499 {
1500 	if (test__start_subtest("tailcall_1"))
1501 		test_tailcall_1();
1502 	if (test__start_subtest("tailcall_2"))
1503 		test_tailcall_2();
1504 	if (test__start_subtest("tailcall_3"))
1505 		test_tailcall_3();
1506 	if (test__start_subtest("tailcall_4"))
1507 		test_tailcall_4();
1508 	if (test__start_subtest("tailcall_5"))
1509 		test_tailcall_5();
1510 	if (test__start_subtest("tailcall_6"))
1511 		test_tailcall_6();
1512 	if (test__start_subtest("tailcall_bpf2bpf_1"))
1513 		test_tailcall_bpf2bpf_1();
1514 	if (test__start_subtest("tailcall_bpf2bpf_2"))
1515 		test_tailcall_bpf2bpf_2();
1516 	if (test__start_subtest("tailcall_bpf2bpf_3"))
1517 		test_tailcall_bpf2bpf_3();
1518 	if (test__start_subtest("tailcall_bpf2bpf_4"))
1519 		test_tailcall_bpf2bpf_4(false);
1520 	if (test__start_subtest("tailcall_bpf2bpf_5"))
1521 		test_tailcall_bpf2bpf_4(true);
1522 	if (test__start_subtest("tailcall_bpf2bpf_6"))
1523 		test_tailcall_bpf2bpf_6();
1524 	if (test__start_subtest("tailcall_bpf2bpf_fentry"))
1525 		test_tailcall_bpf2bpf_fentry();
1526 	if (test__start_subtest("tailcall_bpf2bpf_fexit"))
1527 		test_tailcall_bpf2bpf_fexit();
1528 	if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
1529 		test_tailcall_bpf2bpf_fentry_fexit();
1530 	if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
1531 		test_tailcall_bpf2bpf_fentry_entry();
1532 	if (test__start_subtest("tailcall_poke"))
1533 		test_tailcall_poke();
1534 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_1"))
1535 		test_tailcall_bpf2bpf_hierarchy_1();
1536 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry"))
1537 		test_tailcall_bpf2bpf_hierarchy_fentry();
1538 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fexit"))
1539 		test_tailcall_bpf2bpf_hierarchy_fexit();
1540 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_fexit"))
1541 		test_tailcall_bpf2bpf_hierarchy_fentry_fexit();
1542 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_entry"))
1543 		test_tailcall_bpf2bpf_hierarchy_fentry_entry();
1544 	test_tailcall_bpf2bpf_hierarchy_2();
1545 	test_tailcall_bpf2bpf_hierarchy_3();
1546 }
1547