xref: /linux/tools/testing/selftests/bpf/prog_tests/tailcalls.c (revision 58cd34772a30d5cbe9fec7d199772149946a4032)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 
5 /* test_tailcall_1 checks basic functionality by patching multiple locations
6  * in a single program for a single tail call slot with nop->jmp, jmp->nop
7  * and jmp->jmp rewrites. Also checks for nop->nop.
8  */
9 static void test_tailcall_1(void)
10 {
11 	int err, map_fd, prog_fd, main_fd, i, j;
12 	struct bpf_map *prog_array;
13 	struct bpf_program *prog;
14 	struct bpf_object *obj;
15 	char prog_name[32];
16 	char buff[128] = {};
17 	LIBBPF_OPTS(bpf_test_run_opts, topts,
18 		.data_in = buff,
19 		.data_size_in = sizeof(buff),
20 		.repeat = 1,
21 	);
22 
23 	err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
24 				 &prog_fd);
25 	if (CHECK_FAIL(err))
26 		return;
27 
28 	prog = bpf_object__find_program_by_name(obj, "entry");
29 	if (CHECK_FAIL(!prog))
30 		goto out;
31 
32 	main_fd = bpf_program__fd(prog);
33 	if (CHECK_FAIL(main_fd < 0))
34 		goto out;
35 
36 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
37 	if (CHECK_FAIL(!prog_array))
38 		goto out;
39 
40 	map_fd = bpf_map__fd(prog_array);
41 	if (CHECK_FAIL(map_fd < 0))
42 		goto out;
43 
44 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
45 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
46 
47 		prog = bpf_object__find_program_by_name(obj, prog_name);
48 		if (CHECK_FAIL(!prog))
49 			goto out;
50 
51 		prog_fd = bpf_program__fd(prog);
52 		if (CHECK_FAIL(prog_fd < 0))
53 			goto out;
54 
55 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
56 		if (CHECK_FAIL(err))
57 			goto out;
58 	}
59 
60 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
61 		err = bpf_prog_test_run_opts(main_fd, &topts);
62 		ASSERT_OK(err, "tailcall");
63 		ASSERT_EQ(topts.retval, i, "tailcall retval");
64 
65 		err = bpf_map_delete_elem(map_fd, &i);
66 		if (CHECK_FAIL(err))
67 			goto out;
68 	}
69 
70 	err = bpf_prog_test_run_opts(main_fd, &topts);
71 	ASSERT_OK(err, "tailcall");
72 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
73 
74 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
75 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
76 
77 		prog = bpf_object__find_program_by_name(obj, prog_name);
78 		if (CHECK_FAIL(!prog))
79 			goto out;
80 
81 		prog_fd = bpf_program__fd(prog);
82 		if (CHECK_FAIL(prog_fd < 0))
83 			goto out;
84 
85 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
86 		if (CHECK_FAIL(err))
87 			goto out;
88 	}
89 
90 	err = bpf_prog_test_run_opts(main_fd, &topts);
91 	ASSERT_OK(err, "tailcall");
92 	ASSERT_OK(topts.retval, "tailcall retval");
93 
94 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
95 		j = bpf_map__max_entries(prog_array) - 1 - i;
96 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
97 
98 		prog = bpf_object__find_program_by_name(obj, prog_name);
99 		if (CHECK_FAIL(!prog))
100 			goto out;
101 
102 		prog_fd = bpf_program__fd(prog);
103 		if (CHECK_FAIL(prog_fd < 0))
104 			goto out;
105 
106 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
107 		if (CHECK_FAIL(err))
108 			goto out;
109 	}
110 
111 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
112 		j = bpf_map__max_entries(prog_array) - 1 - i;
113 
114 		err = bpf_prog_test_run_opts(main_fd, &topts);
115 		ASSERT_OK(err, "tailcall");
116 		ASSERT_EQ(topts.retval, j, "tailcall retval");
117 
118 		err = bpf_map_delete_elem(map_fd, &i);
119 		if (CHECK_FAIL(err))
120 			goto out;
121 	}
122 
123 	err = bpf_prog_test_run_opts(main_fd, &topts);
124 	ASSERT_OK(err, "tailcall");
125 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
126 
127 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
128 		err = bpf_map_delete_elem(map_fd, &i);
129 		if (CHECK_FAIL(err >= 0 || errno != ENOENT))
130 			goto out;
131 
132 		err = bpf_prog_test_run_opts(main_fd, &topts);
133 		ASSERT_OK(err, "tailcall");
134 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
135 	}
136 
137 out:
138 	bpf_object__close(obj);
139 }
140 
141 /* test_tailcall_2 checks that patching multiple programs for a single
142  * tail call slot works. It also jumps through several programs and tests
143  * the tail call limit counter.
144  */
145 static void test_tailcall_2(void)
146 {
147 	int err, map_fd, prog_fd, main_fd, i;
148 	struct bpf_map *prog_array;
149 	struct bpf_program *prog;
150 	struct bpf_object *obj;
151 	char prog_name[32];
152 	char buff[128] = {};
153 	LIBBPF_OPTS(bpf_test_run_opts, topts,
154 		.data_in = buff,
155 		.data_size_in = sizeof(buff),
156 		.repeat = 1,
157 	);
158 
159 	err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
160 				 &prog_fd);
161 	if (CHECK_FAIL(err))
162 		return;
163 
164 	prog = bpf_object__find_program_by_name(obj, "entry");
165 	if (CHECK_FAIL(!prog))
166 		goto out;
167 
168 	main_fd = bpf_program__fd(prog);
169 	if (CHECK_FAIL(main_fd < 0))
170 		goto out;
171 
172 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
173 	if (CHECK_FAIL(!prog_array))
174 		goto out;
175 
176 	map_fd = bpf_map__fd(prog_array);
177 	if (CHECK_FAIL(map_fd < 0))
178 		goto out;
179 
180 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
181 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
182 
183 		prog = bpf_object__find_program_by_name(obj, prog_name);
184 		if (CHECK_FAIL(!prog))
185 			goto out;
186 
187 		prog_fd = bpf_program__fd(prog);
188 		if (CHECK_FAIL(prog_fd < 0))
189 			goto out;
190 
191 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
192 		if (CHECK_FAIL(err))
193 			goto out;
194 	}
195 
196 	err = bpf_prog_test_run_opts(main_fd, &topts);
197 	ASSERT_OK(err, "tailcall");
198 	ASSERT_EQ(topts.retval, 2, "tailcall retval");
199 
200 	i = 2;
201 	err = bpf_map_delete_elem(map_fd, &i);
202 	if (CHECK_FAIL(err))
203 		goto out;
204 
205 	err = bpf_prog_test_run_opts(main_fd, &topts);
206 	ASSERT_OK(err, "tailcall");
207 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
208 
209 	i = 0;
210 	err = bpf_map_delete_elem(map_fd, &i);
211 	if (CHECK_FAIL(err))
212 		goto out;
213 
214 	err = bpf_prog_test_run_opts(main_fd, &topts);
215 	ASSERT_OK(err, "tailcall");
216 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
217 out:
218 	bpf_object__close(obj);
219 }
220 
221 static void test_tailcall_count(const char *which, bool test_fentry,
222 				bool test_fexit)
223 {
224 	struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
225 	struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
226 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
227 	struct bpf_map *prog_array, *data_map;
228 	struct bpf_program *prog;
229 	char buff[128] = {};
230 	LIBBPF_OPTS(bpf_test_run_opts, topts,
231 		.data_in = buff,
232 		.data_size_in = sizeof(buff),
233 		.repeat = 1,
234 	);
235 
236 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
237 			    &prog_fd);
238 	if (CHECK_FAIL(err))
239 		return;
240 
241 	prog = bpf_object__find_program_by_name(obj, "entry");
242 	if (CHECK_FAIL(!prog))
243 		goto out;
244 
245 	main_fd = bpf_program__fd(prog);
246 	if (CHECK_FAIL(main_fd < 0))
247 		goto out;
248 
249 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
250 	if (CHECK_FAIL(!prog_array))
251 		goto out;
252 
253 	map_fd = bpf_map__fd(prog_array);
254 	if (CHECK_FAIL(map_fd < 0))
255 		goto out;
256 
257 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
258 	if (CHECK_FAIL(!prog))
259 		goto out;
260 
261 	prog_fd = bpf_program__fd(prog);
262 	if (CHECK_FAIL(prog_fd < 0))
263 		goto out;
264 
265 	i = 0;
266 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
267 	if (CHECK_FAIL(err))
268 		goto out;
269 
270 	if (test_fentry) {
271 		fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
272 						   NULL);
273 		if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
274 			goto out;
275 
276 		prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
277 		if (!ASSERT_OK_PTR(prog, "find fentry prog"))
278 			goto out;
279 
280 		err = bpf_program__set_attach_target(prog, prog_fd,
281 						     "subprog_tail");
282 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
283 			goto out;
284 
285 		err = bpf_object__load(fentry_obj);
286 		if (!ASSERT_OK(err, "load fentry_obj"))
287 			goto out;
288 
289 		fentry_link = bpf_program__attach_trace(prog);
290 		if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
291 			goto out;
292 	}
293 
294 	if (test_fexit) {
295 		fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
296 						  NULL);
297 		if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
298 			goto out;
299 
300 		prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
301 		if (!ASSERT_OK_PTR(prog, "find fexit prog"))
302 			goto out;
303 
304 		err = bpf_program__set_attach_target(prog, prog_fd,
305 						     "subprog_tail");
306 		if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
307 			goto out;
308 
309 		err = bpf_object__load(fexit_obj);
310 		if (!ASSERT_OK(err, "load fexit_obj"))
311 			goto out;
312 
313 		fexit_link = bpf_program__attach_trace(prog);
314 		if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
315 			goto out;
316 	}
317 
318 	err = bpf_prog_test_run_opts(main_fd, &topts);
319 	ASSERT_OK(err, "tailcall");
320 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
321 
322 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
323 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
324 		goto out;
325 
326 	data_fd = bpf_map__fd(data_map);
327 	if (CHECK_FAIL(data_fd < 0))
328 		goto out;
329 
330 	i = 0;
331 	err = bpf_map_lookup_elem(data_fd, &i, &val);
332 	ASSERT_OK(err, "tailcall count");
333 	ASSERT_EQ(val, 33, "tailcall count");
334 
335 	if (test_fentry) {
336 		data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
337 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
338 				  "find tailcall_bpf2bpf_fentry.bss map"))
339 			goto out;
340 
341 		data_fd = bpf_map__fd(data_map);
342 		if (!ASSERT_FALSE(data_fd < 0,
343 				  "find tailcall_bpf2bpf_fentry.bss map fd"))
344 			goto out;
345 
346 		i = 0;
347 		err = bpf_map_lookup_elem(data_fd, &i, &val);
348 		ASSERT_OK(err, "fentry count");
349 		ASSERT_EQ(val, 33, "fentry count");
350 	}
351 
352 	if (test_fexit) {
353 		data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
354 		if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
355 				  "find tailcall_bpf2bpf_fexit.bss map"))
356 			goto out;
357 
358 		data_fd = bpf_map__fd(data_map);
359 		if (!ASSERT_FALSE(data_fd < 0,
360 				  "find tailcall_bpf2bpf_fexit.bss map fd"))
361 			goto out;
362 
363 		i = 0;
364 		err = bpf_map_lookup_elem(data_fd, &i, &val);
365 		ASSERT_OK(err, "fexit count");
366 		ASSERT_EQ(val, 33, "fexit count");
367 	}
368 
369 	i = 0;
370 	err = bpf_map_delete_elem(map_fd, &i);
371 	if (CHECK_FAIL(err))
372 		goto out;
373 
374 	err = bpf_prog_test_run_opts(main_fd, &topts);
375 	ASSERT_OK(err, "tailcall");
376 	ASSERT_OK(topts.retval, "tailcall retval");
377 out:
378 	bpf_link__destroy(fentry_link);
379 	bpf_link__destroy(fexit_link);
380 	bpf_object__close(fentry_obj);
381 	bpf_object__close(fexit_obj);
382 	bpf_object__close(obj);
383 }
384 
385 /* test_tailcall_3 checks that the count value of the tail call limit
386  * enforcement matches with expectations. JIT uses direct jump.
387  */
388 static void test_tailcall_3(void)
389 {
390 	test_tailcall_count("tailcall3.bpf.o", false, false);
391 }
392 
393 /* test_tailcall_6 checks that the count value of the tail call limit
394  * enforcement matches with expectations. JIT uses indirect jump.
395  */
396 static void test_tailcall_6(void)
397 {
398 	test_tailcall_count("tailcall6.bpf.o", false, false);
399 }
400 
401 /* test_tailcall_4 checks that the kernel properly selects indirect jump
402  * for the case where the key is not known. Latter is passed via global
403  * data to select different targets we can compare return value of.
404  */
405 static void test_tailcall_4(void)
406 {
407 	int err, map_fd, prog_fd, main_fd, data_fd, i;
408 	struct bpf_map *prog_array, *data_map;
409 	struct bpf_program *prog;
410 	struct bpf_object *obj;
411 	static const int zero = 0;
412 	char buff[128] = {};
413 	char prog_name[32];
414 	LIBBPF_OPTS(bpf_test_run_opts, topts,
415 		.data_in = buff,
416 		.data_size_in = sizeof(buff),
417 		.repeat = 1,
418 	);
419 
420 	err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
421 				 &prog_fd);
422 	if (CHECK_FAIL(err))
423 		return;
424 
425 	prog = bpf_object__find_program_by_name(obj, "entry");
426 	if (CHECK_FAIL(!prog))
427 		goto out;
428 
429 	main_fd = bpf_program__fd(prog);
430 	if (CHECK_FAIL(main_fd < 0))
431 		goto out;
432 
433 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
434 	if (CHECK_FAIL(!prog_array))
435 		goto out;
436 
437 	map_fd = bpf_map__fd(prog_array);
438 	if (CHECK_FAIL(map_fd < 0))
439 		goto out;
440 
441 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
442 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
443 		goto out;
444 
445 	data_fd = bpf_map__fd(data_map);
446 	if (CHECK_FAIL(data_fd < 0))
447 		goto out;
448 
449 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
450 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
451 
452 		prog = bpf_object__find_program_by_name(obj, prog_name);
453 		if (CHECK_FAIL(!prog))
454 			goto out;
455 
456 		prog_fd = bpf_program__fd(prog);
457 		if (CHECK_FAIL(prog_fd < 0))
458 			goto out;
459 
460 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
461 		if (CHECK_FAIL(err))
462 			goto out;
463 	}
464 
465 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
466 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
467 		if (CHECK_FAIL(err))
468 			goto out;
469 
470 		err = bpf_prog_test_run_opts(main_fd, &topts);
471 		ASSERT_OK(err, "tailcall");
472 		ASSERT_EQ(topts.retval, i, "tailcall retval");
473 	}
474 
475 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
476 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
477 		if (CHECK_FAIL(err))
478 			goto out;
479 
480 		err = bpf_map_delete_elem(map_fd, &i);
481 		if (CHECK_FAIL(err))
482 			goto out;
483 
484 		err = bpf_prog_test_run_opts(main_fd, &topts);
485 		ASSERT_OK(err, "tailcall");
486 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
487 	}
488 out:
489 	bpf_object__close(obj);
490 }
491 
492 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
493  * an indirect jump when the keys are const but different from different branches.
494  */
495 static void test_tailcall_5(void)
496 {
497 	int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
498 	struct bpf_map *prog_array, *data_map;
499 	struct bpf_program *prog;
500 	struct bpf_object *obj;
501 	static const int zero = 0;
502 	char buff[128] = {};
503 	char prog_name[32];
504 	LIBBPF_OPTS(bpf_test_run_opts, topts,
505 		.data_in = buff,
506 		.data_size_in = sizeof(buff),
507 		.repeat = 1,
508 	);
509 
510 	err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
511 				 &prog_fd);
512 	if (CHECK_FAIL(err))
513 		return;
514 
515 	prog = bpf_object__find_program_by_name(obj, "entry");
516 	if (CHECK_FAIL(!prog))
517 		goto out;
518 
519 	main_fd = bpf_program__fd(prog);
520 	if (CHECK_FAIL(main_fd < 0))
521 		goto out;
522 
523 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
524 	if (CHECK_FAIL(!prog_array))
525 		goto out;
526 
527 	map_fd = bpf_map__fd(prog_array);
528 	if (CHECK_FAIL(map_fd < 0))
529 		goto out;
530 
531 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
532 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
533 		goto out;
534 
535 	data_fd = bpf_map__fd(data_map);
536 	if (CHECK_FAIL(data_fd < 0))
537 		goto out;
538 
539 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
540 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
541 
542 		prog = bpf_object__find_program_by_name(obj, prog_name);
543 		if (CHECK_FAIL(!prog))
544 			goto out;
545 
546 		prog_fd = bpf_program__fd(prog);
547 		if (CHECK_FAIL(prog_fd < 0))
548 			goto out;
549 
550 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
551 		if (CHECK_FAIL(err))
552 			goto out;
553 	}
554 
555 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
556 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
557 		if (CHECK_FAIL(err))
558 			goto out;
559 
560 		err = bpf_prog_test_run_opts(main_fd, &topts);
561 		ASSERT_OK(err, "tailcall");
562 		ASSERT_EQ(topts.retval, i, "tailcall retval");
563 	}
564 
565 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
566 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
567 		if (CHECK_FAIL(err))
568 			goto out;
569 
570 		err = bpf_map_delete_elem(map_fd, &i);
571 		if (CHECK_FAIL(err))
572 			goto out;
573 
574 		err = bpf_prog_test_run_opts(main_fd, &topts);
575 		ASSERT_OK(err, "tailcall");
576 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
577 	}
578 out:
579 	bpf_object__close(obj);
580 }
581 
582 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
583  * correctly in correlation with BPF subprograms
584  */
585 static void test_tailcall_bpf2bpf_1(void)
586 {
587 	int err, map_fd, prog_fd, main_fd, i;
588 	struct bpf_map *prog_array;
589 	struct bpf_program *prog;
590 	struct bpf_object *obj;
591 	char prog_name[32];
592 	LIBBPF_OPTS(bpf_test_run_opts, topts,
593 		.data_in = &pkt_v4,
594 		.data_size_in = sizeof(pkt_v4),
595 		.repeat = 1,
596 	);
597 
598 	err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
599 				 &obj, &prog_fd);
600 	if (CHECK_FAIL(err))
601 		return;
602 
603 	prog = bpf_object__find_program_by_name(obj, "entry");
604 	if (CHECK_FAIL(!prog))
605 		goto out;
606 
607 	main_fd = bpf_program__fd(prog);
608 	if (CHECK_FAIL(main_fd < 0))
609 		goto out;
610 
611 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
612 	if (CHECK_FAIL(!prog_array))
613 		goto out;
614 
615 	map_fd = bpf_map__fd(prog_array);
616 	if (CHECK_FAIL(map_fd < 0))
617 		goto out;
618 
619 	/* nop -> jmp */
620 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
621 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
622 
623 		prog = bpf_object__find_program_by_name(obj, prog_name);
624 		if (CHECK_FAIL(!prog))
625 			goto out;
626 
627 		prog_fd = bpf_program__fd(prog);
628 		if (CHECK_FAIL(prog_fd < 0))
629 			goto out;
630 
631 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
632 		if (CHECK_FAIL(err))
633 			goto out;
634 	}
635 
636 	err = bpf_prog_test_run_opts(main_fd, &topts);
637 	ASSERT_OK(err, "tailcall");
638 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
639 
640 	/* jmp -> nop, call subprog that will do tailcall */
641 	i = 1;
642 	err = bpf_map_delete_elem(map_fd, &i);
643 	if (CHECK_FAIL(err))
644 		goto out;
645 
646 	err = bpf_prog_test_run_opts(main_fd, &topts);
647 	ASSERT_OK(err, "tailcall");
648 	ASSERT_OK(topts.retval, "tailcall retval");
649 
650 	/* make sure that subprog can access ctx and entry prog that
651 	 * called this subprog can properly return
652 	 */
653 	i = 0;
654 	err = bpf_map_delete_elem(map_fd, &i);
655 	if (CHECK_FAIL(err))
656 		goto out;
657 
658 	err = bpf_prog_test_run_opts(main_fd, &topts);
659 	ASSERT_OK(err, "tailcall");
660 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
661 out:
662 	bpf_object__close(obj);
663 }
664 
665 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
666  * enforcement matches with expectations when tailcall is preceded with
667  * bpf2bpf call.
668  */
669 static void test_tailcall_bpf2bpf_2(void)
670 {
671 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
672 	struct bpf_map *prog_array, *data_map;
673 	struct bpf_program *prog;
674 	struct bpf_object *obj;
675 	char buff[128] = {};
676 	LIBBPF_OPTS(bpf_test_run_opts, topts,
677 		.data_in = buff,
678 		.data_size_in = sizeof(buff),
679 		.repeat = 1,
680 	);
681 
682 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
683 				 &obj, &prog_fd);
684 	if (CHECK_FAIL(err))
685 		return;
686 
687 	prog = bpf_object__find_program_by_name(obj, "entry");
688 	if (CHECK_FAIL(!prog))
689 		goto out;
690 
691 	main_fd = bpf_program__fd(prog);
692 	if (CHECK_FAIL(main_fd < 0))
693 		goto out;
694 
695 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
696 	if (CHECK_FAIL(!prog_array))
697 		goto out;
698 
699 	map_fd = bpf_map__fd(prog_array);
700 	if (CHECK_FAIL(map_fd < 0))
701 		goto out;
702 
703 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
704 	if (CHECK_FAIL(!prog))
705 		goto out;
706 
707 	prog_fd = bpf_program__fd(prog);
708 	if (CHECK_FAIL(prog_fd < 0))
709 		goto out;
710 
711 	i = 0;
712 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
713 	if (CHECK_FAIL(err))
714 		goto out;
715 
716 	err = bpf_prog_test_run_opts(main_fd, &topts);
717 	ASSERT_OK(err, "tailcall");
718 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
719 
720 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
721 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
722 		goto out;
723 
724 	data_fd = bpf_map__fd(data_map);
725 	if (CHECK_FAIL(data_fd < 0))
726 		goto out;
727 
728 	i = 0;
729 	err = bpf_map_lookup_elem(data_fd, &i, &val);
730 	ASSERT_OK(err, "tailcall count");
731 	ASSERT_EQ(val, 33, "tailcall count");
732 
733 	i = 0;
734 	err = bpf_map_delete_elem(map_fd, &i);
735 	if (CHECK_FAIL(err))
736 		goto out;
737 
738 	err = bpf_prog_test_run_opts(main_fd, &topts);
739 	ASSERT_OK(err, "tailcall");
740 	ASSERT_OK(topts.retval, "tailcall retval");
741 out:
742 	bpf_object__close(obj);
743 }
744 
745 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
746  * 256 bytes) can be used within bpf subprograms that have the tailcalls
747  * in them
748  */
749 static void test_tailcall_bpf2bpf_3(void)
750 {
751 	int err, map_fd, prog_fd, main_fd, i;
752 	struct bpf_map *prog_array;
753 	struct bpf_program *prog;
754 	struct bpf_object *obj;
755 	char prog_name[32];
756 	LIBBPF_OPTS(bpf_test_run_opts, topts,
757 		.data_in = &pkt_v4,
758 		.data_size_in = sizeof(pkt_v4),
759 		.repeat = 1,
760 	);
761 
762 	err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
763 				 &obj, &prog_fd);
764 	if (CHECK_FAIL(err))
765 		return;
766 
767 	prog = bpf_object__find_program_by_name(obj, "entry");
768 	if (CHECK_FAIL(!prog))
769 		goto out;
770 
771 	main_fd = bpf_program__fd(prog);
772 	if (CHECK_FAIL(main_fd < 0))
773 		goto out;
774 
775 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
776 	if (CHECK_FAIL(!prog_array))
777 		goto out;
778 
779 	map_fd = bpf_map__fd(prog_array);
780 	if (CHECK_FAIL(map_fd < 0))
781 		goto out;
782 
783 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
784 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
785 
786 		prog = bpf_object__find_program_by_name(obj, prog_name);
787 		if (CHECK_FAIL(!prog))
788 			goto out;
789 
790 		prog_fd = bpf_program__fd(prog);
791 		if (CHECK_FAIL(prog_fd < 0))
792 			goto out;
793 
794 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
795 		if (CHECK_FAIL(err))
796 			goto out;
797 	}
798 
799 	err = bpf_prog_test_run_opts(main_fd, &topts);
800 	ASSERT_OK(err, "tailcall");
801 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
802 
803 	i = 1;
804 	err = bpf_map_delete_elem(map_fd, &i);
805 	if (CHECK_FAIL(err))
806 		goto out;
807 
808 	err = bpf_prog_test_run_opts(main_fd, &topts);
809 	ASSERT_OK(err, "tailcall");
810 	ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
811 
812 	i = 0;
813 	err = bpf_map_delete_elem(map_fd, &i);
814 	if (CHECK_FAIL(err))
815 		goto out;
816 
817 	err = bpf_prog_test_run_opts(main_fd, &topts);
818 	ASSERT_OK(err, "tailcall");
819 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
820 out:
821 	bpf_object__close(obj);
822 }
823 
824 #include "tailcall_bpf2bpf4.skel.h"
825 
826 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
827  * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
828  * counter behaves correctly, bpf program will go through following flow:
829  *
830  * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
831  * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
832  * subprog2 [here bump global counter] --------^
833  *
834  * We go through first two tailcalls and start counting from the subprog2 where
835  * the loop begins. At the end of the test make sure that the global counter is
836  * equal to 31, because tailcall counter includes the first two tailcalls
837  * whereas global counter is incremented only on loop presented on flow above.
838  *
839  * The noise parameter is used to insert bpf_map_update calls into the logic
840  * to force verifier to patch instructions. This allows us to ensure jump
841  * logic remains correct with instruction movement.
842  */
843 static void test_tailcall_bpf2bpf_4(bool noise)
844 {
845 	int err, map_fd, prog_fd, main_fd, data_fd, i;
846 	struct tailcall_bpf2bpf4__bss val;
847 	struct bpf_map *prog_array, *data_map;
848 	struct bpf_program *prog;
849 	struct bpf_object *obj;
850 	char prog_name[32];
851 	LIBBPF_OPTS(bpf_test_run_opts, topts,
852 		.data_in = &pkt_v4,
853 		.data_size_in = sizeof(pkt_v4),
854 		.repeat = 1,
855 	);
856 
857 	err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
858 				 &obj, &prog_fd);
859 	if (CHECK_FAIL(err))
860 		return;
861 
862 	prog = bpf_object__find_program_by_name(obj, "entry");
863 	if (CHECK_FAIL(!prog))
864 		goto out;
865 
866 	main_fd = bpf_program__fd(prog);
867 	if (CHECK_FAIL(main_fd < 0))
868 		goto out;
869 
870 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
871 	if (CHECK_FAIL(!prog_array))
872 		goto out;
873 
874 	map_fd = bpf_map__fd(prog_array);
875 	if (CHECK_FAIL(map_fd < 0))
876 		goto out;
877 
878 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
879 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
880 
881 		prog = bpf_object__find_program_by_name(obj, prog_name);
882 		if (CHECK_FAIL(!prog))
883 			goto out;
884 
885 		prog_fd = bpf_program__fd(prog);
886 		if (CHECK_FAIL(prog_fd < 0))
887 			goto out;
888 
889 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
890 		if (CHECK_FAIL(err))
891 			goto out;
892 	}
893 
894 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
895 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
896 		goto out;
897 
898 	data_fd = bpf_map__fd(data_map);
899 	if (CHECK_FAIL(data_fd < 0))
900 		goto out;
901 
902 	i = 0;
903 	val.noise = noise;
904 	val.count = 0;
905 	err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
906 	if (CHECK_FAIL(err))
907 		goto out;
908 
909 	err = bpf_prog_test_run_opts(main_fd, &topts);
910 	ASSERT_OK(err, "tailcall");
911 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
912 
913 	i = 0;
914 	err = bpf_map_lookup_elem(data_fd, &i, &val);
915 	ASSERT_OK(err, "tailcall count");
916 	ASSERT_EQ(val.count, 31, "tailcall count");
917 
918 out:
919 	bpf_object__close(obj);
920 }
921 
922 #include "tailcall_bpf2bpf6.skel.h"
923 
924 /* Tail call counting works even when there is data on stack which is
925  * not aligned to 8 bytes.
926  */
927 static void test_tailcall_bpf2bpf_6(void)
928 {
929 	struct tailcall_bpf2bpf6 *obj;
930 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
931 	LIBBPF_OPTS(bpf_test_run_opts, topts,
932 		.data_in = &pkt_v4,
933 		.data_size_in = sizeof(pkt_v4),
934 		.repeat = 1,
935 	);
936 
937 	obj = tailcall_bpf2bpf6__open_and_load();
938 	if (!ASSERT_OK_PTR(obj, "open and load"))
939 		return;
940 
941 	main_fd = bpf_program__fd(obj->progs.entry);
942 	if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
943 		goto out;
944 
945 	map_fd = bpf_map__fd(obj->maps.jmp_table);
946 	if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
947 		goto out;
948 
949 	prog_fd = bpf_program__fd(obj->progs.classifier_0);
950 	if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
951 		goto out;
952 
953 	i = 0;
954 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
955 	if (!ASSERT_OK(err, "jmp_table map update"))
956 		goto out;
957 
958 	err = bpf_prog_test_run_opts(main_fd, &topts);
959 	ASSERT_OK(err, "entry prog test run");
960 	ASSERT_EQ(topts.retval, 0, "tailcall retval");
961 
962 	data_fd = bpf_map__fd(obj->maps.bss);
963 	if (!ASSERT_GE(data_fd, 0, "bss map fd"))
964 		goto out;
965 
966 	i = 0;
967 	err = bpf_map_lookup_elem(data_fd, &i, &val);
968 	ASSERT_OK(err, "bss map lookup");
969 	ASSERT_EQ(val, 1, "done flag is set");
970 
971 out:
972 	tailcall_bpf2bpf6__destroy(obj);
973 }
974 
975 /* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
976  * limit enforcement matches with expectations when tailcall is preceded with
977  * bpf2bpf call, and the bpf2bpf call is traced by fentry.
978  */
979 static void test_tailcall_bpf2bpf_fentry(void)
980 {
981 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
982 }
983 
984 /* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
985  * limit enforcement matches with expectations when tailcall is preceded with
986  * bpf2bpf call, and the bpf2bpf call is traced by fexit.
987  */
988 static void test_tailcall_bpf2bpf_fexit(void)
989 {
990 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
991 }
992 
993 /* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
994  * call limit enforcement matches with expectations when tailcall is preceded
995  * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
996  */
997 static void test_tailcall_bpf2bpf_fentry_fexit(void)
998 {
999 	test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
1000 }
1001 
1002 /* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
1003  * call limit enforcement matches with expectations when tailcall is preceded
1004  * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
1005  */
1006 static void test_tailcall_bpf2bpf_fentry_entry(void)
1007 {
1008 	struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
1009 	int err, map_fd, prog_fd, data_fd, i, val;
1010 	struct bpf_map *prog_array, *data_map;
1011 	struct bpf_link *fentry_link = NULL;
1012 	struct bpf_program *prog;
1013 	char buff[128] = {};
1014 
1015 	LIBBPF_OPTS(bpf_test_run_opts, topts,
1016 		.data_in = buff,
1017 		.data_size_in = sizeof(buff),
1018 		.repeat = 1,
1019 	);
1020 
1021 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
1022 				 BPF_PROG_TYPE_SCHED_CLS,
1023 				 &tgt_obj, &prog_fd);
1024 	if (!ASSERT_OK(err, "load tgt_obj"))
1025 		return;
1026 
1027 	prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
1028 	if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
1029 		goto out;
1030 
1031 	map_fd = bpf_map__fd(prog_array);
1032 	if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
1033 		goto out;
1034 
1035 	prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
1036 	if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
1037 		goto out;
1038 
1039 	prog_fd = bpf_program__fd(prog);
1040 	if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
1041 		goto out;
1042 
1043 	i = 0;
1044 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1045 	if (!ASSERT_OK(err, "update jmp_table"))
1046 		goto out;
1047 
1048 	fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1049 					   NULL);
1050 	if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1051 		goto out;
1052 
1053 	prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1054 	if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1055 		goto out;
1056 
1057 	err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
1058 	if (!ASSERT_OK(err, "set_attach_target classifier_0"))
1059 		goto out;
1060 
1061 	err = bpf_object__load(fentry_obj);
1062 	if (!ASSERT_OK(err, "load fentry_obj"))
1063 		goto out;
1064 
1065 	fentry_link = bpf_program__attach_trace(prog);
1066 	if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1067 		goto out;
1068 
1069 	err = bpf_prog_test_run_opts(prog_fd, &topts);
1070 	ASSERT_OK(err, "tailcall");
1071 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
1072 
1073 	data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
1074 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1075 			  "find tailcall.bss map"))
1076 		goto out;
1077 
1078 	data_fd = bpf_map__fd(data_map);
1079 	if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
1080 		goto out;
1081 
1082 	i = 0;
1083 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1084 	ASSERT_OK(err, "tailcall count");
1085 	ASSERT_EQ(val, 34, "tailcall count");
1086 
1087 	data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1088 	if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1089 			  "find tailcall_bpf2bpf_fentry.bss map"))
1090 		goto out;
1091 
1092 	data_fd = bpf_map__fd(data_map);
1093 	if (!ASSERT_FALSE(data_fd < 0,
1094 			  "find tailcall_bpf2bpf_fentry.bss map fd"))
1095 		goto out;
1096 
1097 	i = 0;
1098 	err = bpf_map_lookup_elem(data_fd, &i, &val);
1099 	ASSERT_OK(err, "fentry count");
1100 	ASSERT_EQ(val, 1, "fentry count");
1101 
1102 out:
1103 	bpf_link__destroy(fentry_link);
1104 	bpf_object__close(fentry_obj);
1105 	bpf_object__close(tgt_obj);
1106 }
1107 
1108 void test_tailcalls(void)
1109 {
1110 	if (test__start_subtest("tailcall_1"))
1111 		test_tailcall_1();
1112 	if (test__start_subtest("tailcall_2"))
1113 		test_tailcall_2();
1114 	if (test__start_subtest("tailcall_3"))
1115 		test_tailcall_3();
1116 	if (test__start_subtest("tailcall_4"))
1117 		test_tailcall_4();
1118 	if (test__start_subtest("tailcall_5"))
1119 		test_tailcall_5();
1120 	if (test__start_subtest("tailcall_6"))
1121 		test_tailcall_6();
1122 	if (test__start_subtest("tailcall_bpf2bpf_1"))
1123 		test_tailcall_bpf2bpf_1();
1124 	if (test__start_subtest("tailcall_bpf2bpf_2"))
1125 		test_tailcall_bpf2bpf_2();
1126 	if (test__start_subtest("tailcall_bpf2bpf_3"))
1127 		test_tailcall_bpf2bpf_3();
1128 	if (test__start_subtest("tailcall_bpf2bpf_4"))
1129 		test_tailcall_bpf2bpf_4(false);
1130 	if (test__start_subtest("tailcall_bpf2bpf_5"))
1131 		test_tailcall_bpf2bpf_4(true);
1132 	if (test__start_subtest("tailcall_bpf2bpf_6"))
1133 		test_tailcall_bpf2bpf_6();
1134 	if (test__start_subtest("tailcall_bpf2bpf_fentry"))
1135 		test_tailcall_bpf2bpf_fentry();
1136 	if (test__start_subtest("tailcall_bpf2bpf_fexit"))
1137 		test_tailcall_bpf2bpf_fexit();
1138 	if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
1139 		test_tailcall_bpf2bpf_fentry_fexit();
1140 	if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
1141 		test_tailcall_bpf2bpf_fentry_entry();
1142 }
1143