xref: /linux/tools/testing/selftests/bpf/prog_tests/tailcalls.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <unistd.h>
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "tailcall_poke.skel.h"
6 #include "tailcall_bpf2bpf_hierarchy2.skel.h"
7 #include "tailcall_bpf2bpf_hierarchy3.skel.h"
8 #include "tailcall_freplace.skel.h"
9 #include "tc_bpf2bpf.skel.h"
10 
11 /* test_tailcall_1 checks basic functionality by patching multiple locations
12  * in a single program for a single tail call slot with nop->jmp, jmp->nop
13  * and jmp->jmp rewrites. Also checks for nop->nop.
14  */
15 static void test_tailcall_1(void)
16 {
17 	int err, map_fd, prog_fd, main_fd, i, j;
18 	struct bpf_map *prog_array;
19 	struct bpf_program *prog;
20 	struct bpf_object *obj;
21 	char prog_name[32];
22 	char buff[128] = {};
23 	LIBBPF_OPTS(bpf_test_run_opts, topts,
24 		.data_in = buff,
25 		.data_size_in = sizeof(buff),
26 		.repeat = 1,
27 	);
28 
29 	err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
30 				 &prog_fd);
31 	if (CHECK_FAIL(err))
32 		return;
33 
34 	prog = bpf_object__find_program_by_name(obj, "entry");
35 	if (CHECK_FAIL(!prog))
36 		goto out;
37 
38 	main_fd = bpf_program__fd(prog);
39 	if (CHECK_FAIL(main_fd < 0))
40 		goto out;
41 
42 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
43 	if (CHECK_FAIL(!prog_array))
44 		goto out;
45 
46 	map_fd = bpf_map__fd(prog_array);
47 	if (CHECK_FAIL(map_fd < 0))
48 		goto out;
49 
50 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
51 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
52 
53 		prog = bpf_object__find_program_by_name(obj, prog_name);
54 		if (CHECK_FAIL(!prog))
55 			goto out;
56 
57 		prog_fd = bpf_program__fd(prog);
58 		if (CHECK_FAIL(prog_fd < 0))
59 			goto out;
60 
61 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
62 		if (CHECK_FAIL(err))
63 			goto out;
64 	}
65 
66 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
67 		err = bpf_prog_test_run_opts(main_fd, &topts);
68 		ASSERT_OK(err, "tailcall");
69 		ASSERT_EQ(topts.retval, i, "tailcall retval");
70 
71 		err = bpf_map_delete_elem(map_fd, &i);
72 		if (CHECK_FAIL(err))
73 			goto out;
74 	}
75 
76 	err = bpf_prog_test_run_opts(main_fd, &topts);
77 	ASSERT_OK(err, "tailcall");
78 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
79 
80 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
81 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
82 
83 		prog = bpf_object__find_program_by_name(obj, prog_name);
84 		if (CHECK_FAIL(!prog))
85 			goto out;
86 
87 		prog_fd = bpf_program__fd(prog);
88 		if (CHECK_FAIL(prog_fd < 0))
89 			goto out;
90 
91 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
92 		if (CHECK_FAIL(err))
93 			goto out;
94 	}
95 
96 	err = bpf_prog_test_run_opts(main_fd, &topts);
97 	ASSERT_OK(err, "tailcall");
98 	ASSERT_OK(topts.retval, "tailcall retval");
99 
100 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
101 		j = bpf_map__max_entries(prog_array) - 1 - i;
102 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
103 
104 		prog = bpf_object__find_program_by_name(obj, prog_name);
105 		if (CHECK_FAIL(!prog))
106 			goto out;
107 
108 		prog_fd = bpf_program__fd(prog);
109 		if (CHECK_FAIL(prog_fd < 0))
110 			goto out;
111 
112 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
113 		if (CHECK_FAIL(err))
114 			goto out;
115 	}
116 
117 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
118 		j = bpf_map__max_entries(prog_array) - 1 - i;
119 
120 		err = bpf_prog_test_run_opts(main_fd, &topts);
121 		ASSERT_OK(err, "tailcall");
122 		ASSERT_EQ(topts.retval, j, "tailcall retval");
123 
124 		err = bpf_map_delete_elem(map_fd, &i);
125 		if (CHECK_FAIL(err))
126 			goto out;
127 	}
128 
129 	err = bpf_prog_test_run_opts(main_fd, &topts);
130 	ASSERT_OK(err, "tailcall");
131 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
132 
133 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
134 		err = bpf_map_delete_elem(map_fd, &i);
135 		if (CHECK_FAIL(err >= 0 || errno != ENOENT))
136 			goto out;
137 
138 		err = bpf_prog_test_run_opts(main_fd, &topts);
139 		ASSERT_OK(err, "tailcall");
140 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
141 	}
142 
143 out:
144 	bpf_object__close(obj);
145 }
146 
147 /* test_tailcall_2 checks that patching multiple programs for a single
148  * tail call slot works. It also jumps through several programs and tests
149  * the tail call limit counter.
150  */
151 static void test_tailcall_2(void)
152 {
153 	int err, map_fd, prog_fd, main_fd, i;
154 	struct bpf_map *prog_array;
155 	struct bpf_program *prog;
156 	struct bpf_object *obj;
157 	char prog_name[32];
158 	char buff[128] = {};
159 	LIBBPF_OPTS(bpf_test_run_opts, topts,
160 		.data_in = buff,
161 		.data_size_in = sizeof(buff),
162 		.repeat = 1,
163 	);
164 
165 	err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
166 				 &prog_fd);
167 	if (CHECK_FAIL(err))
168 		return;
169 
170 	prog = bpf_object__find_program_by_name(obj, "entry");
171 	if (CHECK_FAIL(!prog))
172 		goto out;
173 
174 	main_fd = bpf_program__fd(prog);
175 	if (CHECK_FAIL(main_fd < 0))
176 		goto out;
177 
178 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
179 	if (CHECK_FAIL(!prog_array))
180 		goto out;
181 
182 	map_fd = bpf_map__fd(prog_array);
183 	if (CHECK_FAIL(map_fd < 0))
184 		goto out;
185 
186 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
187 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
188 
189 		prog = bpf_object__find_program_by_name(obj, prog_name);
190 		if (CHECK_FAIL(!prog))
191 			goto out;
192 
193 		prog_fd = bpf_program__fd(prog);
194 		if (CHECK_FAIL(prog_fd < 0))
195 			goto out;
196 
197 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
198 		if (CHECK_FAIL(err))
199 			goto out;
200 	}
201 
202 	err = bpf_prog_test_run_opts(main_fd, &topts);
203 	ASSERT_OK(err, "tailcall");
204 	ASSERT_EQ(topts.retval, 2, "tailcall retval");
205 
206 	i = 2;
207 	err = bpf_map_delete_elem(map_fd, &i);
208 	if (CHECK_FAIL(err))
209 		goto out;
210 
211 	err = bpf_prog_test_run_opts(main_fd, &topts);
212 	ASSERT_OK(err, "tailcall");
213 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
214 
215 	i = 0;
216 	err = bpf_map_delete_elem(map_fd, &i);
217 	if (CHECK_FAIL(err))
218 		goto out;
219 
220 	err = bpf_prog_test_run_opts(main_fd, &topts);
221 	ASSERT_OK(err, "tailcall");
222 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
223 out:
224 	bpf_object__close(obj);
225 }
226 
227 static void test_tailcall_count(const char *which, bool test_fentry,
228 				bool test_fexit)
229 {
230 	struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
231 	struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
232 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
233 	struct bpf_map *prog_array, *data_map;
234 	struct bpf_program *prog;
235 	char buff[128] = {};
236 	LIBBPF_OPTS(bpf_test_run_opts, topts,
237 		.data_in = buff,
238 		.data_size_in = sizeof(buff),
239 		.repeat = 1,
240 	);
241 
242 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
243 			    &prog_fd);
244 	if (CHECK_FAIL(err))
245 		return;
246 
247 	prog = bpf_object__find_program_by_name(obj, "entry");
248 	if (CHECK_FAIL(!prog))
249 		goto out;
250 
251 	main_fd = bpf_program__fd(prog);
252 	if (CHECK_FAIL(main_fd < 0))
253 		goto out;
254 
255 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
256 	if (CHECK_FAIL(!prog_array))
257 		goto out;
258 
259 	map_fd = bpf_map__fd(prog_array);
260 	if (CHECK_FAIL(map_fd < 0))
261 		goto out;
262 
263 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
264 	if (CHECK_FAIL(!prog))
265 		goto out;
266 
267 	prog_fd = bpf_program__fd(prog);
268 	if (CHECK_FAIL(prog_fd < 0))
269 		goto out;
270 
271 	i = 0;
272 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
273 	if (CHECK_FAIL(err))
274 		goto out;
275 
276 	if (test_fentry) {
277 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
278 						   NULL);
279 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
280 			goto out;
281 
282 		prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
283 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
284 			goto out;
285 
286 		err = bpf_program__set_attach_target(prog, prog_fd,
287 						     "subprog_tail");
288 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
289 			goto out;
290 
291 		err = bpf_object__load(fentry_obj);
292 		if (!ASSERT_OK(err, "load fentry_obj"))
293 			goto out;
294 
295 		fentry_link = bpf_program__attach_trace(prog);
296 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
297 			goto out;
298 	}
299 
300 	if (test_fexit) {
301 		fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
302 						  NULL);
303 		if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
304 			goto out;
305 
306 		prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
307 		if (!ASSERT_OK_PTR(prog, "find fexit prog"))
308 			goto out;
309 
310 		err = bpf_program__set_attach_target(prog, prog_fd,
311 						     "subprog_tail");
312 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
313 			goto out;
314 
315 		err = bpf_object__load(fexit_obj);
316 		if (!ASSERT_OK(err, "load fexit_obj"))
317 			goto out;
318 
319 		fexit_link = bpf_program__attach_trace(prog);
320 		if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
321 			goto out;
322 	}
323 
324 	err = bpf_prog_test_run_opts(main_fd, &topts);
325 	ASSERT_OK(err, "tailcall");
326 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
327 
328 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
329 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
330 		goto out;
331 
332 	data_fd = bpf_map__fd(data_map);
333 	if (CHECK_FAIL(data_fd < 0))
334 		goto out;
335 
336 	i = 0;
337 	err = bpf_map_lookup_elem(data_fd, &i, &val);
338 	ASSERT_OK(err, "tailcall count");
339 	ASSERT_EQ(val, 33, "tailcall count");
340 
341 	if (test_fentry) {
342 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
343 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
344 				  "find tailcall_bpf2bpf_fentry.bss map"))
345 			goto out;
346 
347 		data_fd = bpf_map__fd(data_map);
348 		if (!ASSERT_FALSE(data_fd < 0,
349 				  "find tailcall_bpf2bpf_fentry.bss map fd"))
350 			goto out;
351 
352 		i = 0;
353 		err = bpf_map_lookup_elem(data_fd, &i, &val);
354 		ASSERT_OK(err, "fentry count");
355 		ASSERT_EQ(val, 33, "fentry count");
356 	}
357 
358 	if (test_fexit) {
359 		data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
360 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
361 				  "find tailcall_bpf2bpf_fexit.bss map"))
362 			goto out;
363 
364 		data_fd = bpf_map__fd(data_map);
365 		if (!ASSERT_FALSE(data_fd < 0,
366 				  "find tailcall_bpf2bpf_fexit.bss map fd"))
367 			goto out;
368 
369 		i = 0;
370 		err = bpf_map_lookup_elem(data_fd, &i, &val);
371 		ASSERT_OK(err, "fexit count");
372 		ASSERT_EQ(val, 33, "fexit count");
373 	}
374 
375 	i = 0;
376 	err = bpf_map_delete_elem(map_fd, &i);
377 	if (CHECK_FAIL(err))
378 		goto out;
379 
380 	err = bpf_prog_test_run_opts(main_fd, &topts);
381 	ASSERT_OK(err, "tailcall");
382 	ASSERT_OK(topts.retval, "tailcall retval");
383 out:
384 	bpf_link__destroy(fentry_link);
385 	bpf_link__destroy(fexit_link);
386 	bpf_object__close(fentry_obj);
387 	bpf_object__close(fexit_obj);
388 	bpf_object__close(obj);
389 }
390 
391 /* test_tailcall_3 checks that the count value of the tail call limit
392  * enforcement matches with expectations. JIT uses direct jump.
393  */
394 static void test_tailcall_3(void)
395 {
396 	test_tailcall_count("tailcall3.bpf.o", false, false);
397 }
398 
399 /* test_tailcall_6 checks that the count value of the tail call limit
400  * enforcement matches with expectations. JIT uses indirect jump.
401  */
402 static void test_tailcall_6(void)
403 {
404 	test_tailcall_count("tailcall6.bpf.o", false, false);
405 }
406 
407 /* test_tailcall_4 checks that the kernel properly selects indirect jump
408  * for the case where the key is not known. Latter is passed via global
409  * data to select different targets we can compare return value of.
410  */
411 static void test_tailcall_4(void)
412 {
413 	int err, map_fd, prog_fd, main_fd, data_fd, i;
414 	struct bpf_map *prog_array, *data_map;
415 	struct bpf_program *prog;
416 	struct bpf_object *obj;
417 	static const int zero = 0;
418 	char buff[128] = {};
419 	char prog_name[32];
420 	LIBBPF_OPTS(bpf_test_run_opts, topts,
421 		.data_in = buff,
422 		.data_size_in = sizeof(buff),
423 		.repeat = 1,
424 	);
425 
426 	err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
427 				 &prog_fd);
428 	if (CHECK_FAIL(err))
429 		return;
430 
431 	prog = bpf_object__find_program_by_name(obj, "entry");
432 	if (CHECK_FAIL(!prog))
433 		goto out;
434 
435 	main_fd = bpf_program__fd(prog);
436 	if (CHECK_FAIL(main_fd < 0))
437 		goto out;
438 
439 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
440 	if (CHECK_FAIL(!prog_array))
441 		goto out;
442 
443 	map_fd = bpf_map__fd(prog_array);
444 	if (CHECK_FAIL(map_fd < 0))
445 		goto out;
446 
447 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
448 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
449 		goto out;
450 
451 	data_fd = bpf_map__fd(data_map);
452 	if (CHECK_FAIL(data_fd < 0))
453 		goto out;
454 
455 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
456 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
457 
458 		prog = bpf_object__find_program_by_name(obj, prog_name);
459 		if (CHECK_FAIL(!prog))
460 			goto out;
461 
462 		prog_fd = bpf_program__fd(prog);
463 		if (CHECK_FAIL(prog_fd < 0))
464 			goto out;
465 
466 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
467 		if (CHECK_FAIL(err))
468 			goto out;
469 	}
470 
471 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
472 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
473 		if (CHECK_FAIL(err))
474 			goto out;
475 
476 		err = bpf_prog_test_run_opts(main_fd, &topts);
477 		ASSERT_OK(err, "tailcall");
478 		ASSERT_EQ(topts.retval, i, "tailcall retval");
479 	}
480 
481 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
482 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
483 		if (CHECK_FAIL(err))
484 			goto out;
485 
486 		err = bpf_map_delete_elem(map_fd, &i);
487 		if (CHECK_FAIL(err))
488 			goto out;
489 
490 		err = bpf_prog_test_run_opts(main_fd, &topts);
491 		ASSERT_OK(err, "tailcall");
492 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
493 	}
494 out:
495 	bpf_object__close(obj);
496 }
497 
498 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
499  * an indirect jump when the keys are const but different from different branches.
500  */
501 static void test_tailcall_5(void)
502 {
503 	int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
504 	struct bpf_map *prog_array, *data_map;
505 	struct bpf_program *prog;
506 	struct bpf_object *obj;
507 	static const int zero = 0;
508 	char buff[128] = {};
509 	char prog_name[32];
510 	LIBBPF_OPTS(bpf_test_run_opts, topts,
511 		.data_in = buff,
512 		.data_size_in = sizeof(buff),
513 		.repeat = 1,
514 	);
515 
516 	err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
517 				 &prog_fd);
518 	if (CHECK_FAIL(err))
519 		return;
520 
521 	prog = bpf_object__find_program_by_name(obj, "entry");
522 	if (CHECK_FAIL(!prog))
523 		goto out;
524 
525 	main_fd = bpf_program__fd(prog);
526 	if (CHECK_FAIL(main_fd < 0))
527 		goto out;
528 
529 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
530 	if (CHECK_FAIL(!prog_array))
531 		goto out;
532 
533 	map_fd = bpf_map__fd(prog_array);
534 	if (CHECK_FAIL(map_fd < 0))
535 		goto out;
536 
537 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
538 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
539 		goto out;
540 
541 	data_fd = bpf_map__fd(data_map);
542 	if (CHECK_FAIL(data_fd < 0))
543 		goto out;
544 
545 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
546 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
547 
548 		prog = bpf_object__find_program_by_name(obj, prog_name);
549 		if (CHECK_FAIL(!prog))
550 			goto out;
551 
552 		prog_fd = bpf_program__fd(prog);
553 		if (CHECK_FAIL(prog_fd < 0))
554 			goto out;
555 
556 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
557 		if (CHECK_FAIL(err))
558 			goto out;
559 	}
560 
561 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
562 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
563 		if (CHECK_FAIL(err))
564 			goto out;
565 
566 		err = bpf_prog_test_run_opts(main_fd, &topts);
567 		ASSERT_OK(err, "tailcall");
568 		ASSERT_EQ(topts.retval, i, "tailcall retval");
569 	}
570 
571 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
572 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
573 		if (CHECK_FAIL(err))
574 			goto out;
575 
576 		err = bpf_map_delete_elem(map_fd, &i);
577 		if (CHECK_FAIL(err))
578 			goto out;
579 
580 		err = bpf_prog_test_run_opts(main_fd, &topts);
581 		ASSERT_OK(err, "tailcall");
582 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
583 	}
584 out:
585 	bpf_object__close(obj);
586 }
587 
588 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
589  * correctly in correlation with BPF subprograms
590  */
591 static void test_tailcall_bpf2bpf_1(void)
592 {
593 	int err, map_fd, prog_fd, main_fd, i;
594 	struct bpf_map *prog_array;
595 	struct bpf_program *prog;
596 	struct bpf_object *obj;
597 	char prog_name[32];
598 	LIBBPF_OPTS(bpf_test_run_opts, topts,
599 		.data_in = &pkt_v4,
600 		.data_size_in = sizeof(pkt_v4),
601 		.repeat = 1,
602 	);
603 
604 	err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
605 				 &obj, &prog_fd);
606 	if (CHECK_FAIL(err))
607 		return;
608 
609 	prog = bpf_object__find_program_by_name(obj, "entry");
610 	if (CHECK_FAIL(!prog))
611 		goto out;
612 
613 	main_fd = bpf_program__fd(prog);
614 	if (CHECK_FAIL(main_fd < 0))
615 		goto out;
616 
617 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
618 	if (CHECK_FAIL(!prog_array))
619 		goto out;
620 
621 	map_fd = bpf_map__fd(prog_array);
622 	if (CHECK_FAIL(map_fd < 0))
623 		goto out;
624 
625 	/* nop -> jmp */
626 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
627 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
628 
629 		prog = bpf_object__find_program_by_name(obj, prog_name);
630 		if (CHECK_FAIL(!prog))
631 			goto out;
632 
633 		prog_fd = bpf_program__fd(prog);
634 		if (CHECK_FAIL(prog_fd < 0))
635 			goto out;
636 
637 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
638 		if (CHECK_FAIL(err))
639 			goto out;
640 	}
641 
642 	err = bpf_prog_test_run_opts(main_fd, &topts);
643 	ASSERT_OK(err, "tailcall");
644 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
645 
646 	/* jmp -> nop, call subprog that will do tailcall */
647 	i = 1;
648 	err = bpf_map_delete_elem(map_fd, &i);
649 	if (CHECK_FAIL(err))
650 		goto out;
651 
652 	err = bpf_prog_test_run_opts(main_fd, &topts);
653 	ASSERT_OK(err, "tailcall");
654 	ASSERT_OK(topts.retval, "tailcall retval");
655 
656 	/* make sure that subprog can access ctx and entry prog that
657 	 * called this subprog can properly return
658 	 */
659 	i = 0;
660 	err = bpf_map_delete_elem(map_fd, &i);
661 	if (CHECK_FAIL(err))
662 		goto out;
663 
664 	err = bpf_prog_test_run_opts(main_fd, &topts);
665 	ASSERT_OK(err, "tailcall");
666 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
667 out:
668 	bpf_object__close(obj);
669 }
670 
671 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
672  * enforcement matches with expectations when tailcall is preceded with
673  * bpf2bpf call.
674  */
675 static void test_tailcall_bpf2bpf_2(void)
676 {
677 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
678 	struct bpf_map *prog_array, *data_map;
679 	struct bpf_program *prog;
680 	struct bpf_object *obj;
681 	char buff[128] = {};
682 	LIBBPF_OPTS(bpf_test_run_opts, topts,
683 		.data_in = buff,
684 		.data_size_in = sizeof(buff),
685 		.repeat = 1,
686 	);
687 
688 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
689 				 &obj, &prog_fd);
690 	if (CHECK_FAIL(err))
691 		return;
692 
693 	prog = bpf_object__find_program_by_name(obj, "entry");
694 	if (CHECK_FAIL(!prog))
695 		goto out;
696 
697 	main_fd = bpf_program__fd(prog);
698 	if (CHECK_FAIL(main_fd < 0))
699 		goto out;
700 
701 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
702 	if (CHECK_FAIL(!prog_array))
703 		goto out;
704 
705 	map_fd = bpf_map__fd(prog_array);
706 	if (CHECK_FAIL(map_fd < 0))
707 		goto out;
708 
709 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
710 	if (CHECK_FAIL(!prog))
711 		goto out;
712 
713 	prog_fd = bpf_program__fd(prog);
714 	if (CHECK_FAIL(prog_fd < 0))
715 		goto out;
716 
717 	i = 0;
718 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
719 	if (CHECK_FAIL(err))
720 		goto out;
721 
722 	err = bpf_prog_test_run_opts(main_fd, &topts);
723 	ASSERT_OK(err, "tailcall");
724 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
725 
726 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
727 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
728 		goto out;
729 
730 	data_fd = bpf_map__fd(data_map);
731 	if (CHECK_FAIL(data_fd < 0))
732 		goto out;
733 
734 	i = 0;
735 	err = bpf_map_lookup_elem(data_fd, &i, &val);
736 	ASSERT_OK(err, "tailcall count");
737 	ASSERT_EQ(val, 33, "tailcall count");
738 
739 	i = 0;
740 	err = bpf_map_delete_elem(map_fd, &i);
741 	if (CHECK_FAIL(err))
742 		goto out;
743 
744 	err = bpf_prog_test_run_opts(main_fd, &topts);
745 	ASSERT_OK(err, "tailcall");
746 	ASSERT_OK(topts.retval, "tailcall retval");
747 out:
748 	bpf_object__close(obj);
749 }
750 
751 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
752  * 256 bytes) can be used within bpf subprograms that have the tailcalls
753  * in them
754  */
755 static void test_tailcall_bpf2bpf_3(void)
756 {
757 	int err, map_fd, prog_fd, main_fd, i;
758 	struct bpf_map *prog_array;
759 	struct bpf_program *prog;
760 	struct bpf_object *obj;
761 	char prog_name[32];
762 	LIBBPF_OPTS(bpf_test_run_opts, topts,
763 		.data_in = &pkt_v4,
764 		.data_size_in = sizeof(pkt_v4),
765 		.repeat = 1,
766 	);
767 
768 	err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
769 				 &obj, &prog_fd);
770 	if (CHECK_FAIL(err))
771 		return;
772 
773 	prog = bpf_object__find_program_by_name(obj, "entry");
774 	if (CHECK_FAIL(!prog))
775 		goto out;
776 
777 	main_fd = bpf_program__fd(prog);
778 	if (CHECK_FAIL(main_fd < 0))
779 		goto out;
780 
781 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
782 	if (CHECK_FAIL(!prog_array))
783 		goto out;
784 
785 	map_fd = bpf_map__fd(prog_array);
786 	if (CHECK_FAIL(map_fd < 0))
787 		goto out;
788 
789 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
790 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
791 
792 		prog = bpf_object__find_program_by_name(obj, prog_name);
793 		if (CHECK_FAIL(!prog))
794 			goto out;
795 
796 		prog_fd = bpf_program__fd(prog);
797 		if (CHECK_FAIL(prog_fd < 0))
798 			goto out;
799 
800 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
801 		if (CHECK_FAIL(err))
802 			goto out;
803 	}
804 
805 	err = bpf_prog_test_run_opts(main_fd, &topts);
806 	ASSERT_OK(err, "tailcall");
807 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
808 
809 	i = 1;
810 	err = bpf_map_delete_elem(map_fd, &i);
811 	if (CHECK_FAIL(err))
812 		goto out;
813 
814 	err = bpf_prog_test_run_opts(main_fd, &topts);
815 	ASSERT_OK(err, "tailcall");
816 	ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
817 
818 	i = 0;
819 	err = bpf_map_delete_elem(map_fd, &i);
820 	if (CHECK_FAIL(err))
821 		goto out;
822 
823 	err = bpf_prog_test_run_opts(main_fd, &topts);
824 	ASSERT_OK(err, "tailcall");
825 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
826 out:
827 	bpf_object__close(obj);
828 }
829 
830 #include "tailcall_bpf2bpf4.skel.h"
831 
832 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
833  * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
834  * counter behaves correctly, bpf program will go through following flow:
835  *
836  * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
837  * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
838  * subprog2 [here bump global counter] --------^
839  *
840  * We go through first two tailcalls and start counting from the subprog2 where
841  * the loop begins. At the end of the test make sure that the global counter is
842  * equal to 31, because tailcall counter includes the first two tailcalls
843  * whereas global counter is incremented only on loop presented on flow above.
844  *
845  * The noise parameter is used to insert bpf_map_update calls into the logic
846  * to force verifier to patch instructions. This allows us to ensure jump
847  * logic remains correct with instruction movement.
848  */
849 static void test_tailcall_bpf2bpf_4(bool noise)
850 {
851 	int err, map_fd, prog_fd, main_fd, data_fd, i;
852 	struct tailcall_bpf2bpf4__bss val;
853 	struct bpf_map *prog_array, *data_map;
854 	struct bpf_program *prog;
855 	struct bpf_object *obj;
856 	char prog_name[32];
857 	LIBBPF_OPTS(bpf_test_run_opts, topts,
858 		.data_in = &pkt_v4,
859 		.data_size_in = sizeof(pkt_v4),
860 		.repeat = 1,
861 	);
862 
863 	err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
864 				 &obj, &prog_fd);
865 	if (CHECK_FAIL(err))
866 		return;
867 
868 	prog = bpf_object__find_program_by_name(obj, "entry");
869 	if (CHECK_FAIL(!prog))
870 		goto out;
871 
872 	main_fd = bpf_program__fd(prog);
873 	if (CHECK_FAIL(main_fd < 0))
874 		goto out;
875 
876 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
877 	if (CHECK_FAIL(!prog_array))
878 		goto out;
879 
880 	map_fd = bpf_map__fd(prog_array);
881 	if (CHECK_FAIL(map_fd < 0))
882 		goto out;
883 
884 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
885 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
886 
887 		prog = bpf_object__find_program_by_name(obj, prog_name);
888 		if (CHECK_FAIL(!prog))
889 			goto out;
890 
891 		prog_fd = bpf_program__fd(prog);
892 		if (CHECK_FAIL(prog_fd < 0))
893 			goto out;
894 
895 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
896 		if (CHECK_FAIL(err))
897 			goto out;
898 	}
899 
900 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
901 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
902 		goto out;
903 
904 	data_fd = bpf_map__fd(data_map);
905 	if (CHECK_FAIL(data_fd < 0))
906 		goto out;
907 
908 	i = 0;
909 	val.noise = noise;
910 	val.count = 0;
911 	err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
912 	if (CHECK_FAIL(err))
913 		goto out;
914 
915 	err = bpf_prog_test_run_opts(main_fd, &topts);
916 	ASSERT_OK(err, "tailcall");
917 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
918 
919 	i = 0;
920 	err = bpf_map_lookup_elem(data_fd, &i, &val);
921 	ASSERT_OK(err, "tailcall count");
922 	ASSERT_EQ(val.count, 31, "tailcall count");
923 
924 out:
925 	bpf_object__close(obj);
926 }
927 
928 #include "tailcall_bpf2bpf6.skel.h"
929 
930 /* Tail call counting works even when there is data on stack which is
931  * not aligned to 8 bytes.
932  */
933 static void test_tailcall_bpf2bpf_6(void)
934 {
935 	struct tailcall_bpf2bpf6 *obj;
936 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
937 	LIBBPF_OPTS(bpf_test_run_opts, topts,
938 		.data_in = &pkt_v4,
939 		.data_size_in = sizeof(pkt_v4),
940 		.repeat = 1,
941 	);
942 
943 	obj = tailcall_bpf2bpf6__open_and_load();
944 	if (!ASSERT_OK_PTR(obj, "open and load"))
945 		return;
946 
947 	main_fd = bpf_program__fd(obj->progs.entry);
948 	if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
949 		goto out;
950 
951 	map_fd = bpf_map__fd(obj->maps.jmp_table);
952 	if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
953 		goto out;
954 
955 	prog_fd = bpf_program__fd(obj->progs.classifier_0);
956 	if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
957 		goto out;
958 
959 	i = 0;
960 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
961 	if (!ASSERT_OK(err, "jmp_table map update"))
962 		goto out;
963 
964 	err = bpf_prog_test_run_opts(main_fd, &topts);
965 	ASSERT_OK(err, "entry prog test run");
966 	ASSERT_EQ(topts.retval, 0, "tailcall retval");
967 
968 	data_fd = bpf_map__fd(obj->maps.bss);
969 	if (!ASSERT_GE(data_fd, 0, "bss map fd"))
970 		goto out;
971 
972 	i = 0;
973 	err = bpf_map_lookup_elem(data_fd, &i, &val);
974 	ASSERT_OK(err, "bss map lookup");
975 	ASSERT_EQ(val, 1, "done flag is set");
976 
977 out:
978 	tailcall_bpf2bpf6__destroy(obj);
979 }
980 
981 /* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
982  * limit enforcement matches with expectations when tailcall is preceded with
983  * bpf2bpf call, and the bpf2bpf call is traced by fentry.
984  */
985 static void test_tailcall_bpf2bpf_fentry(void)
986 {
987 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
988 }
989 
990 /* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
991  * limit enforcement matches with expectations when tailcall is preceded with
992  * bpf2bpf call, and the bpf2bpf call is traced by fexit.
993  */
994 static void test_tailcall_bpf2bpf_fexit(void)
995 {
996 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
997 }
998 
999 /* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
1000  * call limit enforcement matches with expectations when tailcall is preceded
1001  * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
1002  */
1003 static void test_tailcall_bpf2bpf_fentry_fexit(void)
1004 {
1005 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
1006 }
1007 
1008 /* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
1009  * call limit enforcement matches with expectations when tailcall is preceded
1010  * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
1011  */
1012 static void test_tailcall_bpf2bpf_fentry_entry(void)
1013 {
1014 	struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
1015 	int err, map_fd, prog_fd, data_fd, i, val;
1016 	struct bpf_map *prog_array, *data_map;
1017 	struct bpf_link *fentry_link = NULL;
1018 	struct bpf_program *prog;
1019 	char buff[128] = {};
1020 
1021 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1022 		.data_in = buff,
1023 		.data_size_in = sizeof(buff),
1024 		.repeat = 1,
1025 	);
1026 
1027 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
1028 				 BPF_PROG_TYPE_SCHED_CLS,
1029 				 &tgt_obj, &prog_fd);
1030 	if (!ASSERT_OK(err, "load tgt_obj"))
1031 		return;
1032 
1033 	prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
1034 	if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
1035 		goto out;
1036 
1037 	map_fd = bpf_map__fd(prog_array);
1038 	if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
1039 		goto out;
1040 
1041 	prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
1042 	if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
1043 		goto out;
1044 
1045 	prog_fd = bpf_program__fd(prog);
1046 	if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
1047 		goto out;
1048 
1049 	i = 0;
1050 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1051 	if (!ASSERT_OK(err, "update jmp_table"))
1052 		goto out;
1053 
1054 	fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1055 					   NULL);
1056 	if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1057 		goto out;
1058 
1059 	prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1060 	if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1061 		goto out;
1062 
1063 	err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
1064 	if (!ASSERT_OK(err, "set_attach_target classifier_0"))
1065 		goto out;
1066 
1067 	err = bpf_object__load(fentry_obj);
1068 	if (!ASSERT_OK(err, "load fentry_obj"))
1069 		goto out;
1070 
1071 	fentry_link = bpf_program__attach_trace(prog);
1072 	if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1073 		goto out;
1074 
1075 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1076 	ASSERT_OK(err, "tailcall");
1077 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1078 
1079 	data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
1080 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1081 			  "find tailcall.bss map"))
1082 		goto out;
1083 
1084 	data_fd = bpf_map__fd(data_map);
1085 	if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
1086 		goto out;
1087 
1088 	i = 0;
1089 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1090 	ASSERT_OK(err, "tailcall count");
1091 	ASSERT_EQ(val, 34, "tailcall count");
1092 
1093 	data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1094 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1095 			  "find tailcall_bpf2bpf_fentry.bss map"))
1096 		goto out;
1097 
1098 	data_fd = bpf_map__fd(data_map);
1099 	if (!ASSERT_FALSE(data_fd < 0,
1100 			  "find tailcall_bpf2bpf_fentry.bss map fd"))
1101 		goto out;
1102 
1103 	i = 0;
1104 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1105 	ASSERT_OK(err, "fentry count");
1106 	ASSERT_EQ(val, 1, "fentry count");
1107 
1108 out:
1109 	bpf_link__destroy(fentry_link);
1110 	bpf_object__close(fentry_obj);
1111 	bpf_object__close(tgt_obj);
1112 }
1113 
1114 #define JMP_TABLE "/sys/fs/bpf/jmp_table"
1115 
1116 static int poke_thread_exit;
1117 
1118 static void *poke_update(void *arg)
1119 {
1120 	__u32 zero = 0, prog1_fd, prog2_fd, map_fd;
1121 	struct tailcall_poke *call = arg;
1122 
1123 	map_fd = bpf_map__fd(call->maps.jmp_table);
1124 	prog1_fd = bpf_program__fd(call->progs.call1);
1125 	prog2_fd = bpf_program__fd(call->progs.call2);
1126 
1127 	while (!poke_thread_exit) {
1128 		bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
1129 		bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
1130 	}
1131 
1132 	return NULL;
1133 }
1134 
1135 /*
1136  * We are trying to hit prog array update during another program load
1137  * that shares the same prog array map.
1138  *
1139  * For that we share the jmp_table map between two skeleton instances
1140  * by pinning the jmp_table to same path. Then first skeleton instance
1141  * periodically updates jmp_table in 'poke update' thread while we load
1142  * the second skeleton instance in the main thread.
1143  */
1144 static void test_tailcall_poke(void)
1145 {
1146 	struct tailcall_poke *call, *test;
1147 	int err, cnt = 10;
1148 	pthread_t thread;
1149 
1150 	unlink(JMP_TABLE);
1151 
1152 	call = tailcall_poke__open_and_load();
1153 	if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
1154 		return;
1155 
1156 	err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
1157 	if (!ASSERT_OK(err, "bpf_map__pin"))
1158 		goto out;
1159 
1160 	err = pthread_create(&thread, NULL, poke_update, call);
1161 	if (!ASSERT_OK(err, "new toggler"))
1162 		goto out;
1163 
1164 	while (cnt--) {
1165 		test = tailcall_poke__open();
1166 		if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
1167 			break;
1168 
1169 		err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
1170 		if (!ASSERT_OK(err, "bpf_map__pin")) {
1171 			tailcall_poke__destroy(test);
1172 			break;
1173 		}
1174 
1175 		bpf_program__set_autoload(test->progs.test, true);
1176 		bpf_program__set_autoload(test->progs.call1, false);
1177 		bpf_program__set_autoload(test->progs.call2, false);
1178 
1179 		err = tailcall_poke__load(test);
1180 		tailcall_poke__destroy(test);
1181 		if (!ASSERT_OK(err, "tailcall_poke__load"))
1182 			break;
1183 	}
1184 
1185 	poke_thread_exit = 1;
1186 	ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
1187 
1188 out:
1189 	bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
1190 	tailcall_poke__destroy(call);
1191 }
1192 
1193 static void test_tailcall_hierarchy_count(const char *which, bool test_fentry,
1194 					  bool test_fexit,
1195 					  bool test_fentry_entry)
1196 {
1197 	int err, map_fd, prog_fd, main_data_fd, fentry_data_fd, fexit_data_fd, i, val;
1198 	struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
1199 	struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
1200 	struct bpf_program *prog, *fentry_prog;
1201 	struct bpf_map *prog_array, *data_map;
1202 	int fentry_prog_fd;
1203 	char buff[128] = {};
1204 
1205 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1206 		.data_in = buff,
1207 		.data_size_in = sizeof(buff),
1208 		.repeat = 1,
1209 	);
1210 
1211 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
1212 				 &prog_fd);
1213 	if (!ASSERT_OK(err, "load obj"))
1214 		return;
1215 
1216 	prog = bpf_object__find_program_by_name(obj, "entry");
1217 	if (!ASSERT_OK_PTR(prog, "find entry prog"))
1218 		goto out;
1219 
1220 	prog_fd = bpf_program__fd(prog);
1221 	if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
1222 		goto out;
1223 
1224 	if (test_fentry_entry) {
1225 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_hierarchy_fentry.bpf.o",
1226 						   NULL);
1227 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1228 			goto out;
1229 
1230 		fentry_prog = bpf_object__find_program_by_name(fentry_obj,
1231 							       "fentry");
1232 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1233 			goto out;
1234 
1235 		err = bpf_program__set_attach_target(fentry_prog, prog_fd,
1236 						     "entry");
1237 		if (!ASSERT_OK(err, "set_attach_target entry"))
1238 			goto out;
1239 
1240 		err = bpf_object__load(fentry_obj);
1241 		if (!ASSERT_OK(err, "load fentry_obj"))
1242 			goto out;
1243 
1244 		fentry_link = bpf_program__attach_trace(fentry_prog);
1245 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1246 			goto out;
1247 
1248 		fentry_prog_fd = bpf_program__fd(fentry_prog);
1249 		if (!ASSERT_GE(fentry_prog_fd, 0, "fentry_prog_fd"))
1250 			goto out;
1251 
1252 		prog_array = bpf_object__find_map_by_name(fentry_obj, "jmp_table");
1253 		if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1254 			goto out;
1255 
1256 		map_fd = bpf_map__fd(prog_array);
1257 		if (!ASSERT_GE(map_fd, 0, "map_fd"))
1258 			goto out;
1259 
1260 		i = 0;
1261 		err = bpf_map_update_elem(map_fd, &i, &fentry_prog_fd, BPF_ANY);
1262 		if (!ASSERT_OK(err, "update jmp_table"))
1263 			goto out;
1264 
1265 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1266 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1267 				  "find data_map"))
1268 			goto out;
1269 
1270 	} else {
1271 		prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
1272 		if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1273 			goto out;
1274 
1275 		map_fd = bpf_map__fd(prog_array);
1276 		if (!ASSERT_GE(map_fd, 0, "map_fd"))
1277 			goto out;
1278 
1279 		i = 0;
1280 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1281 		if (!ASSERT_OK(err, "update jmp_table"))
1282 			goto out;
1283 
1284 		data_map = bpf_object__find_map_by_name(obj, ".bss");
1285 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1286 				  "find data_map"))
1287 			goto out;
1288 	}
1289 
1290 	if (test_fentry) {
1291 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1292 						   NULL);
1293 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1294 			goto out;
1295 
1296 		prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1297 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1298 			goto out;
1299 
1300 		err = bpf_program__set_attach_target(prog, prog_fd,
1301 						     "subprog_tail");
1302 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1303 			goto out;
1304 
1305 		err = bpf_object__load(fentry_obj);
1306 		if (!ASSERT_OK(err, "load fentry_obj"))
1307 			goto out;
1308 
1309 		fentry_link = bpf_program__attach_trace(prog);
1310 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1311 			goto out;
1312 	}
1313 
1314 	if (test_fexit) {
1315 		fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
1316 						  NULL);
1317 		if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
1318 			goto out;
1319 
1320 		prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
1321 		if (!ASSERT_OK_PTR(prog, "find fexit prog"))
1322 			goto out;
1323 
1324 		err = bpf_program__set_attach_target(prog, prog_fd,
1325 						     "subprog_tail");
1326 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1327 			goto out;
1328 
1329 		err = bpf_object__load(fexit_obj);
1330 		if (!ASSERT_OK(err, "load fexit_obj"))
1331 			goto out;
1332 
1333 		fexit_link = bpf_program__attach_trace(prog);
1334 		if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
1335 			goto out;
1336 	}
1337 
1338 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1339 	ASSERT_OK(err, "tailcall");
1340 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1341 
1342 	main_data_fd = bpf_map__fd(data_map);
1343 	if (!ASSERT_GE(main_data_fd, 0, "main_data_fd"))
1344 		goto out;
1345 
1346 	i = 0;
1347 	err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1348 	ASSERT_OK(err, "tailcall count");
1349 	ASSERT_EQ(val, 34, "tailcall count");
1350 
1351 	if (test_fentry) {
1352 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1353 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1354 				  "find tailcall_bpf2bpf_fentry.bss map"))
1355 			goto out;
1356 
1357 		fentry_data_fd = bpf_map__fd(data_map);
1358 		if (!ASSERT_GE(fentry_data_fd, 0,
1359 				  "find tailcall_bpf2bpf_fentry.bss map fd"))
1360 			goto out;
1361 
1362 		i = 0;
1363 		err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1364 		ASSERT_OK(err, "fentry count");
1365 		ASSERT_EQ(val, 68, "fentry count");
1366 	}
1367 
1368 	if (test_fexit) {
1369 		data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
1370 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1371 				  "find tailcall_bpf2bpf_fexit.bss map"))
1372 			goto out;
1373 
1374 		fexit_data_fd = bpf_map__fd(data_map);
1375 		if (!ASSERT_GE(fexit_data_fd, 0,
1376 				  "find tailcall_bpf2bpf_fexit.bss map fd"))
1377 			goto out;
1378 
1379 		i = 0;
1380 		err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1381 		ASSERT_OK(err, "fexit count");
1382 		ASSERT_EQ(val, 68, "fexit count");
1383 	}
1384 
1385 	i = 0;
1386 	err = bpf_map_delete_elem(map_fd, &i);
1387 	if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1388 		goto out;
1389 
1390 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1391 	ASSERT_OK(err, "tailcall");
1392 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1393 
1394 	i = 0;
1395 	err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1396 	ASSERT_OK(err, "tailcall count");
1397 	ASSERT_EQ(val, 35, "tailcall count");
1398 
1399 	if (test_fentry) {
1400 		i = 0;
1401 		err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1402 		ASSERT_OK(err, "fentry count");
1403 		ASSERT_EQ(val, 70, "fentry count");
1404 	}
1405 
1406 	if (test_fexit) {
1407 		i = 0;
1408 		err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1409 		ASSERT_OK(err, "fexit count");
1410 		ASSERT_EQ(val, 70, "fexit count");
1411 	}
1412 
1413 out:
1414 	bpf_link__destroy(fentry_link);
1415 	bpf_link__destroy(fexit_link);
1416 	bpf_object__close(fentry_obj);
1417 	bpf_object__close(fexit_obj);
1418 	bpf_object__close(obj);
1419 }
1420 
1421 /* test_tailcall_bpf2bpf_hierarchy_1 checks that the count value of the tail
1422  * call limit enforcement matches with expectations when tailcalls are preceded
1423  * with two bpf2bpf calls.
1424  *
1425  *         subprog --tailcall-> entry
1426  * entry <
1427  *         subprog --tailcall-> entry
1428  */
1429 static void test_tailcall_bpf2bpf_hierarchy_1(void)
1430 {
1431 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1432 				      false, false, false);
1433 }
1434 
1435 /* test_tailcall_bpf2bpf_hierarchy_fentry checks that the count value of the
1436  * tail call limit enforcement matches with expectations when tailcalls are
1437  * preceded with two bpf2bpf calls, and the two subprogs are traced by fentry.
1438  */
1439 static void test_tailcall_bpf2bpf_hierarchy_fentry(void)
1440 {
1441 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1442 				      true, false, false);
1443 }
1444 
1445 /* test_tailcall_bpf2bpf_hierarchy_fexit checks that the count value of the tail
1446  * call limit enforcement matches with expectations when tailcalls are preceded
1447  * with two bpf2bpf calls, and the two subprogs are traced by fexit.
1448  */
1449 static void test_tailcall_bpf2bpf_hierarchy_fexit(void)
1450 {
1451 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1452 				      false, true, false);
1453 }
1454 
1455 /* test_tailcall_bpf2bpf_hierarchy_fentry_fexit checks that the count value of
1456  * the tail call limit enforcement matches with expectations when tailcalls are
1457  * preceded with two bpf2bpf calls, and the two subprogs are traced by both
1458  * fentry and fexit.
1459  */
1460 static void test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void)
1461 {
1462 	test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1463 				      true, true, false);
1464 }
1465 
1466 /* test_tailcall_bpf2bpf_hierarchy_fentry_entry checks that the count value of
1467  * the tail call limit enforcement matches with expectations when tailcalls are
1468  * preceded with two bpf2bpf calls in fentry.
1469  */
1470 static void test_tailcall_bpf2bpf_hierarchy_fentry_entry(void)
1471 {
1472 	test_tailcall_hierarchy_count("tc_dummy.bpf.o", false, false, true);
1473 }
1474 
1475 /* test_tailcall_bpf2bpf_hierarchy_2 checks that the count value of the tail
1476  * call limit enforcement matches with expectations:
1477  *
1478  *         subprog_tail0 --tailcall-> classifier_0 -> subprog_tail0
1479  * entry <
1480  *         subprog_tail1 --tailcall-> classifier_1 -> subprog_tail1
1481  */
1482 static void test_tailcall_bpf2bpf_hierarchy_2(void)
1483 {
1484 	RUN_TESTS(tailcall_bpf2bpf_hierarchy2);
1485 }
1486 
1487 /* test_tailcall_bpf2bpf_hierarchy_3 checks that the count value of the tail
1488  * call limit enforcement matches with expectations:
1489  *
1490  *                                   subprog with jmp_table0 to classifier_0
1491  * entry --tailcall-> classifier_0 <
1492  *                                   subprog with jmp_table1 to classifier_0
1493  */
1494 static void test_tailcall_bpf2bpf_hierarchy_3(void)
1495 {
1496 	RUN_TESTS(tailcall_bpf2bpf_hierarchy3);
1497 }
1498 
1499 /* test_tailcall_freplace checks that the attached freplace prog is OK to
1500  * update the prog_array map.
1501  */
1502 static void test_tailcall_freplace(void)
1503 {
1504 	struct tailcall_freplace *freplace_skel = NULL;
1505 	struct bpf_link *freplace_link = NULL;
1506 	struct bpf_program *freplace_prog;
1507 	struct tc_bpf2bpf *tc_skel = NULL;
1508 	int prog_fd, map_fd;
1509 	char buff[128] = {};
1510 	int err, key;
1511 
1512 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1513 		    .data_in = buff,
1514 		    .data_size_in = sizeof(buff),
1515 		    .repeat = 1,
1516 	);
1517 
1518 	freplace_skel = tailcall_freplace__open();
1519 	if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
1520 		return;
1521 
1522 	tc_skel = tc_bpf2bpf__open_and_load();
1523 	if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
1524 		goto out;
1525 
1526 	prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
1527 	freplace_prog = freplace_skel->progs.entry_freplace;
1528 	err = bpf_program__set_attach_target(freplace_prog, prog_fd, "subprog");
1529 	if (!ASSERT_OK(err, "set_attach_target"))
1530 		goto out;
1531 
1532 	err = tailcall_freplace__load(freplace_skel);
1533 	if (!ASSERT_OK(err, "tailcall_freplace__load"))
1534 		goto out;
1535 
1536 	freplace_link = bpf_program__attach_freplace(freplace_prog, prog_fd,
1537 						     "subprog");
1538 	if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1539 		goto out;
1540 
1541 	map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
1542 	prog_fd = bpf_program__fd(freplace_prog);
1543 	key = 0;
1544 	err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1545 	if (!ASSERT_OK(err, "update jmp_table"))
1546 		goto out;
1547 
1548 	prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
1549 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1550 	ASSERT_OK(err, "test_run");
1551 	ASSERT_EQ(topts.retval, 34, "test_run retval");
1552 
1553 out:
1554 	bpf_link__destroy(freplace_link);
1555 	tc_bpf2bpf__destroy(tc_skel);
1556 	tailcall_freplace__destroy(freplace_skel);
1557 }
1558 
1559 void test_tailcalls(void)
1560 {
1561 	if (test__start_subtest("tailcall_1"))
1562 		test_tailcall_1();
1563 	if (test__start_subtest("tailcall_2"))
1564 		test_tailcall_2();
1565 	if (test__start_subtest("tailcall_3"))
1566 		test_tailcall_3();
1567 	if (test__start_subtest("tailcall_4"))
1568 		test_tailcall_4();
1569 	if (test__start_subtest("tailcall_5"))
1570 		test_tailcall_5();
1571 	if (test__start_subtest("tailcall_6"))
1572 		test_tailcall_6();
1573 	if (test__start_subtest("tailcall_bpf2bpf_1"))
1574 		test_tailcall_bpf2bpf_1();
1575 	if (test__start_subtest("tailcall_bpf2bpf_2"))
1576 		test_tailcall_bpf2bpf_2();
1577 	if (test__start_subtest("tailcall_bpf2bpf_3"))
1578 		test_tailcall_bpf2bpf_3();
1579 	if (test__start_subtest("tailcall_bpf2bpf_4"))
1580 		test_tailcall_bpf2bpf_4(false);
1581 	if (test__start_subtest("tailcall_bpf2bpf_5"))
1582 		test_tailcall_bpf2bpf_4(true);
1583 	if (test__start_subtest("tailcall_bpf2bpf_6"))
1584 		test_tailcall_bpf2bpf_6();
1585 	if (test__start_subtest("tailcall_bpf2bpf_fentry"))
1586 		test_tailcall_bpf2bpf_fentry();
1587 	if (test__start_subtest("tailcall_bpf2bpf_fexit"))
1588 		test_tailcall_bpf2bpf_fexit();
1589 	if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
1590 		test_tailcall_bpf2bpf_fentry_fexit();
1591 	if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
1592 		test_tailcall_bpf2bpf_fentry_entry();
1593 	if (test__start_subtest("tailcall_poke"))
1594 		test_tailcall_poke();
1595 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_1"))
1596 		test_tailcall_bpf2bpf_hierarchy_1();
1597 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry"))
1598 		test_tailcall_bpf2bpf_hierarchy_fentry();
1599 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fexit"))
1600 		test_tailcall_bpf2bpf_hierarchy_fexit();
1601 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_fexit"))
1602 		test_tailcall_bpf2bpf_hierarchy_fentry_fexit();
1603 	if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_entry"))
1604 		test_tailcall_bpf2bpf_hierarchy_fentry_entry();
1605 	test_tailcall_bpf2bpf_hierarchy_2();
1606 	test_tailcall_bpf2bpf_hierarchy_3();
1607 	if (test__start_subtest("tailcall_freplace"))
1608 		test_tailcall_freplace();
1609 }
1610