xref: /linux/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bpf.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include "../../../include/linux/filter.h"
7 #include "bpf_misc.h"
8 #include <stdbool.h>
9 #include "bpf_kfuncs.h"
10 
11 SEC("raw_tp")
12 __arch_x86_64
13 __log_level(4) __msg("stack depth 8")
14 __xlated("4: r5 = 5")
15 __xlated("5: w0 = ")
16 __xlated("6: r0 = &(void __percpu *)(r0)")
17 __xlated("7: r0 = *(u32 *)(r0 +0)")
18 __xlated("8: exit")
19 __success
20 __naked void simple(void)
21 {
22 	asm volatile (
23 	"r1 = 1;"
24 	"r2 = 2;"
25 	"r3 = 3;"
26 	"r4 = 4;"
27 	"r5 = 5;"
28 	"*(u64 *)(r10 - 16) = r1;"
29 	"*(u64 *)(r10 - 24) = r2;"
30 	"*(u64 *)(r10 - 32) = r3;"
31 	"*(u64 *)(r10 - 40) = r4;"
32 	"*(u64 *)(r10 - 48) = r5;"
33 	"call %[bpf_get_smp_processor_id];"
34 	"r5 = *(u64 *)(r10 - 48);"
35 	"r4 = *(u64 *)(r10 - 40);"
36 	"r3 = *(u64 *)(r10 - 32);"
37 	"r2 = *(u64 *)(r10 - 24);"
38 	"r1 = *(u64 *)(r10 - 16);"
39 	"exit;"
40 	:
41 	: __imm(bpf_get_smp_processor_id)
42 	: __clobber_all);
43 }
44 
45 /* The logic for detecting and verifying bpf_fastcall pattern is the same for
46  * any arch, however x86 differs from arm64 or riscv64 in a way
47  * bpf_get_smp_processor_id is rewritten:
48  * - on x86 it is done by verifier
49  * - on arm64 and riscv64 it is done by jit
50  *
51  * Which leads to different xlated patterns for different archs:
52  * - on x86 the call is expanded as 3 instructions
53  * - on arm64 and riscv64 the call remains as is
54  *   (but spills/fills are still removed)
55  *
56  * It is really desirable to check instruction indexes in the xlated
57  * patterns, so add this canary test to check that function rewrite by
58  * jit is correctly processed by bpf_fastcall logic, keep the rest of the
59  * tests as x86.
60  */
61 SEC("raw_tp")
62 __arch_arm64
63 __arch_riscv64
64 __xlated("0: r1 = 1")
65 __xlated("1: call bpf_get_smp_processor_id")
66 __xlated("2: exit")
67 __success
68 __naked void canary_arm64_riscv64(void)
69 {
70 	asm volatile (
71 	"r1 = 1;"
72 	"*(u64 *)(r10 - 16) = r1;"
73 	"call %[bpf_get_smp_processor_id];"
74 	"r1 = *(u64 *)(r10 - 16);"
75 	"exit;"
76 	:
77 	: __imm(bpf_get_smp_processor_id)
78 	: __clobber_all);
79 }
80 
81 SEC("raw_tp")
82 __arch_x86_64
83 __xlated("1: r0 = &(void __percpu *)(r0)")
84 __xlated("...")
85 __xlated("3: exit")
86 __success
87 __naked void canary_zero_spills(void)
88 {
89 	asm volatile (
90 	"call %[bpf_get_smp_processor_id];"
91 	"exit;"
92 	:
93 	: __imm(bpf_get_smp_processor_id)
94 	: __clobber_all);
95 }
96 
97 SEC("raw_tp")
98 __arch_x86_64
99 __log_level(4) __msg("stack depth 16")
100 __xlated("1: *(u64 *)(r10 -16) = r1")
101 __xlated("...")
102 __xlated("3: r0 = &(void __percpu *)(r0)")
103 __xlated("...")
104 __xlated("5: r2 = *(u64 *)(r10 -16)")
105 __success
106 __naked void wrong_reg_in_pattern1(void)
107 {
108 	asm volatile (
109 	"r1 = 1;"
110 	"*(u64 *)(r10 - 16) = r1;"
111 	"call %[bpf_get_smp_processor_id];"
112 	"r2 = *(u64 *)(r10 - 16);"
113 	"exit;"
114 	:
115 	: __imm(bpf_get_smp_processor_id)
116 	: __clobber_all);
117 }
118 
119 SEC("raw_tp")
120 __arch_x86_64
121 __xlated("1: *(u64 *)(r10 -16) = r6")
122 __xlated("...")
123 __xlated("3: r0 = &(void __percpu *)(r0)")
124 __xlated("...")
125 __xlated("5: r6 = *(u64 *)(r10 -16)")
126 __success
127 __naked void wrong_reg_in_pattern2(void)
128 {
129 	asm volatile (
130 	"r6 = 1;"
131 	"*(u64 *)(r10 - 16) = r6;"
132 	"call %[bpf_get_smp_processor_id];"
133 	"r6 = *(u64 *)(r10 - 16);"
134 	"exit;"
135 	:
136 	: __imm(bpf_get_smp_processor_id)
137 	: __clobber_all);
138 }
139 
140 SEC("raw_tp")
141 __arch_x86_64
142 __xlated("1: *(u64 *)(r10 -16) = r0")
143 __xlated("...")
144 __xlated("3: r0 = &(void __percpu *)(r0)")
145 __xlated("...")
146 __xlated("5: r0 = *(u64 *)(r10 -16)")
147 __success
148 __naked void wrong_reg_in_pattern3(void)
149 {
150 	asm volatile (
151 	"r0 = 1;"
152 	"*(u64 *)(r10 - 16) = r0;"
153 	"call %[bpf_get_smp_processor_id];"
154 	"r0 = *(u64 *)(r10 - 16);"
155 	"exit;"
156 	:
157 	: __imm(bpf_get_smp_processor_id)
158 	: __clobber_all);
159 }
160 
161 SEC("raw_tp")
162 __arch_x86_64
163 __xlated("2: *(u64 *)(r2 -16) = r1")
164 __xlated("...")
165 __xlated("4: r0 = &(void __percpu *)(r0)")
166 __xlated("...")
167 __xlated("6: r1 = *(u64 *)(r10 -16)")
168 __success
169 __naked void wrong_base_in_pattern(void)
170 {
171 	asm volatile (
172 	"r1 = 1;"
173 	"r2 = r10;"
174 	"*(u64 *)(r2 - 16) = r1;"
175 	"call %[bpf_get_smp_processor_id];"
176 	"r1 = *(u64 *)(r10 - 16);"
177 	"exit;"
178 	:
179 	: __imm(bpf_get_smp_processor_id)
180 	: __clobber_all);
181 }
182 
183 SEC("raw_tp")
184 __arch_x86_64
185 __xlated("1: *(u64 *)(r10 -16) = r1")
186 __xlated("...")
187 __xlated("3: r0 = &(void __percpu *)(r0)")
188 __xlated("...")
189 __xlated("5: r2 = 1")
190 __success
191 __naked void wrong_insn_in_pattern(void)
192 {
193 	asm volatile (
194 	"r1 = 1;"
195 	"*(u64 *)(r10 - 16) = r1;"
196 	"call %[bpf_get_smp_processor_id];"
197 	"r2 = 1;"
198 	"r1 = *(u64 *)(r10 - 16);"
199 	"exit;"
200 	:
201 	: __imm(bpf_get_smp_processor_id)
202 	: __clobber_all);
203 }
204 
205 SEC("raw_tp")
206 __arch_x86_64
207 __xlated("2: *(u64 *)(r10 -16) = r1")
208 __xlated("...")
209 __xlated("4: r0 = &(void __percpu *)(r0)")
210 __xlated("...")
211 __xlated("6: r1 = *(u64 *)(r10 -8)")
212 __success
213 __naked void wrong_off_in_pattern1(void)
214 {
215 	asm volatile (
216 	"r1 = 1;"
217 	"*(u64 *)(r10 - 8) = r1;"
218 	"*(u64 *)(r10 - 16) = r1;"
219 	"call %[bpf_get_smp_processor_id];"
220 	"r1 = *(u64 *)(r10 - 8);"
221 	"exit;"
222 	:
223 	: __imm(bpf_get_smp_processor_id)
224 	: __clobber_all);
225 }
226 
227 SEC("raw_tp")
228 __arch_x86_64
229 __xlated("1: *(u32 *)(r10 -4) = r1")
230 __xlated("...")
231 __xlated("3: r0 = &(void __percpu *)(r0)")
232 __xlated("...")
233 __xlated("5: r1 = *(u32 *)(r10 -4)")
234 __success
235 __naked void wrong_off_in_pattern2(void)
236 {
237 	asm volatile (
238 	"r1 = 1;"
239 	"*(u32 *)(r10 - 4) = r1;"
240 	"call %[bpf_get_smp_processor_id];"
241 	"r1 = *(u32 *)(r10 - 4);"
242 	"exit;"
243 	:
244 	: __imm(bpf_get_smp_processor_id)
245 	: __clobber_all);
246 }
247 
248 SEC("raw_tp")
249 __arch_x86_64
250 __xlated("1: *(u32 *)(r10 -16) = r1")
251 __xlated("...")
252 __xlated("3: r0 = &(void __percpu *)(r0)")
253 __xlated("...")
254 __xlated("5: r1 = *(u32 *)(r10 -16)")
255 __success
256 __naked void wrong_size_in_pattern(void)
257 {
258 	asm volatile (
259 	"r1 = 1;"
260 	"*(u32 *)(r10 - 16) = r1;"
261 	"call %[bpf_get_smp_processor_id];"
262 	"r1 = *(u32 *)(r10 - 16);"
263 	"exit;"
264 	:
265 	: __imm(bpf_get_smp_processor_id)
266 	: __clobber_all);
267 }
268 
269 SEC("raw_tp")
270 __arch_x86_64
271 __xlated("2: *(u32 *)(r10 -8) = r1")
272 __xlated("...")
273 __xlated("4: r0 = &(void __percpu *)(r0)")
274 __xlated("...")
275 __xlated("6: r1 = *(u32 *)(r10 -8)")
276 __success
277 __naked void partial_pattern(void)
278 {
279 	asm volatile (
280 	"r1 = 1;"
281 	"r2 = 2;"
282 	"*(u32 *)(r10 - 8) = r1;"
283 	"*(u64 *)(r10 - 16) = r2;"
284 	"call %[bpf_get_smp_processor_id];"
285 	"r2 = *(u64 *)(r10 - 16);"
286 	"r1 = *(u32 *)(r10 - 8);"
287 	"exit;"
288 	:
289 	: __imm(bpf_get_smp_processor_id)
290 	: __clobber_all);
291 }
292 
293 SEC("raw_tp")
294 __arch_x86_64
295 __xlated("0: r1 = 1")
296 __xlated("1: r2 = 2")
297 /* not patched, spills for -8, -16 not removed */
298 __xlated("2: *(u64 *)(r10 -8) = r1")
299 __xlated("3: *(u64 *)(r10 -16) = r2")
300 __xlated("...")
301 __xlated("5: r0 = &(void __percpu *)(r0)")
302 __xlated("...")
303 __xlated("7: r2 = *(u64 *)(r10 -16)")
304 __xlated("8: r1 = *(u64 *)(r10 -8)")
305 /* patched, spills for -24, -32 removed */
306 __xlated("...")
307 __xlated("10: r0 = &(void __percpu *)(r0)")
308 __xlated("...")
309 __xlated("12: exit")
310 __success
311 __naked void min_stack_offset(void)
312 {
313 	asm volatile (
314 	"r1 = 1;"
315 	"r2 = 2;"
316 	/* this call won't be patched */
317 	"*(u64 *)(r10 - 8) = r1;"
318 	"*(u64 *)(r10 - 16) = r2;"
319 	"call %[bpf_get_smp_processor_id];"
320 	"r2 = *(u64 *)(r10 - 16);"
321 	"r1 = *(u64 *)(r10 - 8);"
322 	/* this call would be patched */
323 	"*(u64 *)(r10 - 24) = r1;"
324 	"*(u64 *)(r10 - 32) = r2;"
325 	"call %[bpf_get_smp_processor_id];"
326 	"r2 = *(u64 *)(r10 - 32);"
327 	"r1 = *(u64 *)(r10 - 24);"
328 	"exit;"
329 	:
330 	: __imm(bpf_get_smp_processor_id)
331 	: __clobber_all);
332 }
333 
334 SEC("raw_tp")
335 __arch_x86_64
336 __xlated("1: *(u64 *)(r10 -8) = r1")
337 __xlated("...")
338 __xlated("3: r0 = &(void __percpu *)(r0)")
339 __xlated("...")
340 __xlated("5: r1 = *(u64 *)(r10 -8)")
341 __success
342 __naked void bad_fixed_read(void)
343 {
344 	asm volatile (
345 	"r1 = 1;"
346 	"*(u64 *)(r10 - 8) = r1;"
347 	"call %[bpf_get_smp_processor_id];"
348 	"r1 = *(u64 *)(r10 - 8);"
349 	"r1 = r10;"
350 	"r1 += -8;"
351 	"r1 = *(u64 *)(r1 - 0);"
352 	"exit;"
353 	:
354 	: __imm(bpf_get_smp_processor_id)
355 	: __clobber_all);
356 }
357 
358 SEC("raw_tp")
359 __arch_x86_64
360 __xlated("1: *(u64 *)(r10 -8) = r1")
361 __xlated("...")
362 __xlated("3: r0 = &(void __percpu *)(r0)")
363 __xlated("...")
364 __xlated("5: r1 = *(u64 *)(r10 -8)")
365 __success
366 __naked void bad_fixed_write(void)
367 {
368 	asm volatile (
369 	"r1 = 1;"
370 	"*(u64 *)(r10 - 8) = r1;"
371 	"call %[bpf_get_smp_processor_id];"
372 	"r1 = *(u64 *)(r10 - 8);"
373 	"r1 = r10;"
374 	"r1 += -8;"
375 	"*(u64 *)(r1 - 0) = r1;"
376 	"exit;"
377 	:
378 	: __imm(bpf_get_smp_processor_id)
379 	: __clobber_all);
380 }
381 
382 SEC("raw_tp")
383 __arch_x86_64
384 __xlated("6: *(u64 *)(r10 -16) = r1")
385 __xlated("...")
386 __xlated("8: r0 = &(void __percpu *)(r0)")
387 __xlated("...")
388 __xlated("10: r1 = *(u64 *)(r10 -16)")
389 __success
390 __naked void bad_varying_read(void)
391 {
392 	asm volatile (
393 	"r6 = *(u64 *)(r1 + 0);" /* random scalar value */
394 	"r6 &= 0x7;"		 /* r6 range [0..7] */
395 	"r6 += 0x2;"		 /* r6 range [2..9] */
396 	"r7 = 0;"
397 	"r7 -= r6;"		 /* r7 range [-9..-2] */
398 	"r1 = 1;"
399 	"*(u64 *)(r10 - 16) = r1;"
400 	"call %[bpf_get_smp_processor_id];"
401 	"r1 = *(u64 *)(r10 - 16);"
402 	"r1 = r10;"
403 	"r1 += r7;"
404 	"r1 = *(u8 *)(r1 - 0);" /* touches slot [-16..-9] where spills are stored */
405 	"exit;"
406 	:
407 	: __imm(bpf_get_smp_processor_id)
408 	: __clobber_all);
409 }
410 
411 SEC("raw_tp")
412 __arch_x86_64
413 __xlated("6: *(u64 *)(r10 -16) = r1")
414 __xlated("...")
415 __xlated("8: r0 = &(void __percpu *)(r0)")
416 __xlated("...")
417 __xlated("10: r1 = *(u64 *)(r10 -16)")
418 __success
419 __naked void bad_varying_write(void)
420 {
421 	asm volatile (
422 	"r6 = *(u64 *)(r1 + 0);" /* random scalar value */
423 	"r6 &= 0x7;"		 /* r6 range [0..7] */
424 	"r6 += 0x2;"		 /* r6 range [2..9] */
425 	"r7 = 0;"
426 	"r7 -= r6;"		 /* r7 range [-9..-2] */
427 	"r1 = 1;"
428 	"*(u64 *)(r10 - 16) = r1;"
429 	"call %[bpf_get_smp_processor_id];"
430 	"r1 = *(u64 *)(r10 - 16);"
431 	"r1 = r10;"
432 	"r1 += r7;"
433 	"*(u8 *)(r1 - 0) = r7;" /* touches slot [-16..-9] where spills are stored */
434 	"exit;"
435 	:
436 	: __imm(bpf_get_smp_processor_id)
437 	: __clobber_all);
438 }
439 
440 SEC("raw_tp")
441 __arch_x86_64
442 __xlated("1: *(u64 *)(r10 -8) = r1")
443 __xlated("...")
444 __xlated("3: r0 = &(void __percpu *)(r0)")
445 __xlated("...")
446 __xlated("5: r1 = *(u64 *)(r10 -8)")
447 __success
448 __naked void bad_write_in_subprog(void)
449 {
450 	asm volatile (
451 	"r1 = 1;"
452 	"*(u64 *)(r10 - 8) = r1;"
453 	"call %[bpf_get_smp_processor_id];"
454 	"r1 = *(u64 *)(r10 - 8);"
455 	"r1 = r10;"
456 	"r1 += -8;"
457 	"call bad_write_in_subprog_aux;"
458 	"exit;"
459 	:
460 	: __imm(bpf_get_smp_processor_id)
461 	: __clobber_all);
462 }
463 
464 __used
465 __naked static void bad_write_in_subprog_aux(void)
466 {
467 	asm volatile (
468 	"r0 = 1;"
469 	"*(u64 *)(r1 - 0) = r0;"	/* invalidates bpf_fastcall contract for caller: */
470 	"exit;"				/* caller stack at -8 used outside of the pattern */
471 	::: __clobber_all);
472 }
473 
474 SEC("raw_tp")
475 __arch_x86_64
476 __xlated("1: *(u64 *)(r10 -8) = r1")
477 __xlated("...")
478 __xlated("3: r0 = &(void __percpu *)(r0)")
479 __xlated("...")
480 __xlated("5: r1 = *(u64 *)(r10 -8)")
481 __success
482 __naked void bad_helper_write(void)
483 {
484 	asm volatile (
485 	"r1 = 1;"
486 	/* bpf_fastcall pattern with stack offset -8 */
487 	"*(u64 *)(r10 - 8) = r1;"
488 	"call %[bpf_get_smp_processor_id];"
489 	"r1 = *(u64 *)(r10 - 8);"
490 	"r1 = r10;"
491 	"r1 += -8;"
492 	"r2 = 1;"
493 	"r3 = 42;"
494 	/* read dst is fp[-8], thus bpf_fastcall rewrite not applied */
495 	"call %[bpf_probe_read_kernel];"
496 	"exit;"
497 	:
498 	: __imm(bpf_get_smp_processor_id),
499 	  __imm(bpf_probe_read_kernel)
500 	: __clobber_all);
501 }
502 
503 SEC("raw_tp")
504 __arch_x86_64
505 /* main, not patched */
506 __xlated("1: *(u64 *)(r10 -8) = r1")
507 __xlated("...")
508 __xlated("3: r0 = &(void __percpu *)(r0)")
509 __xlated("...")
510 __xlated("5: r1 = *(u64 *)(r10 -8)")
511 __xlated("...")
512 __xlated("9: call pc+1")
513 __xlated("...")
514 __xlated("10: exit")
515 /* subprogram, patched */
516 __xlated("11: r1 = 1")
517 __xlated("...")
518 __xlated("13: r0 = &(void __percpu *)(r0)")
519 __xlated("...")
520 __xlated("15: exit")
521 __success
522 __naked void invalidate_one_subprog(void)
523 {
524 	asm volatile (
525 	"r1 = 1;"
526 	"*(u64 *)(r10 - 8) = r1;"
527 	"call %[bpf_get_smp_processor_id];"
528 	"r1 = *(u64 *)(r10 - 8);"
529 	"r1 = r10;"
530 	"r1 += -8;"
531 	"r1 = *(u64 *)(r1 - 0);"
532 	"call invalidate_one_subprog_aux;"
533 	"exit;"
534 	:
535 	: __imm(bpf_get_smp_processor_id)
536 	: __clobber_all);
537 }
538 
539 __used
540 __naked static void invalidate_one_subprog_aux(void)
541 {
542 	asm volatile (
543 	"r1 = 1;"
544 	"*(u64 *)(r10 - 8) = r1;"
545 	"call %[bpf_get_smp_processor_id];"
546 	"r1 = *(u64 *)(r10 - 8);"
547 	"exit;"
548 	:
549 	: __imm(bpf_get_smp_processor_id)
550 	: __clobber_all);
551 }
552 
553 SEC("raw_tp")
554 __arch_x86_64
555 /* main */
556 __xlated("0: r1 = 1")
557 __xlated("...")
558 __xlated("2: r0 = &(void __percpu *)(r0)")
559 __xlated("...")
560 __xlated("4: call pc+1")
561 __xlated("5: exit")
562 /* subprogram */
563 __xlated("6: r1 = 1")
564 __xlated("...")
565 __xlated("8: r0 = &(void __percpu *)(r0)")
566 __xlated("...")
567 __xlated("10: *(u64 *)(r10 -16) = r1")
568 __xlated("11: exit")
569 __success
570 __naked void subprogs_use_independent_offsets(void)
571 {
572 	asm volatile (
573 	"r1 = 1;"
574 	"*(u64 *)(r10 - 16) = r1;"
575 	"call %[bpf_get_smp_processor_id];"
576 	"r1 = *(u64 *)(r10 - 16);"
577 	"call subprogs_use_independent_offsets_aux;"
578 	"exit;"
579 	:
580 	: __imm(bpf_get_smp_processor_id)
581 	: __clobber_all);
582 }
583 
584 __used
585 __naked static void subprogs_use_independent_offsets_aux(void)
586 {
587 	asm volatile (
588 	"r1 = 1;"
589 	"*(u64 *)(r10 - 24) = r1;"
590 	"call %[bpf_get_smp_processor_id];"
591 	"r1 = *(u64 *)(r10 - 24);"
592 	"*(u64 *)(r10 - 16) = r1;"
593 	"exit;"
594 	:
595 	: __imm(bpf_get_smp_processor_id)
596 	: __clobber_all);
597 }
598 
599 SEC("raw_tp")
600 __arch_x86_64
601 __log_level(4) __msg("stack depth 8")
602 __xlated("2: r0 = &(void __percpu *)(r0)")
603 __success
604 __naked void helper_call_does_not_prevent_bpf_fastcall(void)
605 {
606 	asm volatile (
607 	"r1 = 1;"
608 	"*(u64 *)(r10 - 8) = r1;"
609 	"call %[bpf_get_smp_processor_id];"
610 	"r1 = *(u64 *)(r10 - 8);"
611 	"*(u64 *)(r10 - 8) = r1;"
612 	"call %[bpf_get_prandom_u32];"
613 	"r1 = *(u64 *)(r10 - 8);"
614 	"exit;"
615 	:
616 	: __imm(bpf_get_smp_processor_id),
617 	  __imm(bpf_get_prandom_u32)
618 	: __clobber_all);
619 }
620 
621 SEC("raw_tp")
622 __arch_x86_64
623 __log_level(4) __msg("stack depth 16")
624 /* may_goto counter at -16 */
625 __xlated("0: *(u64 *)(r10 -16) =")
626 __xlated("1: r1 = 1")
627 __xlated("...")
628 __xlated("3: r0 = &(void __percpu *)(r0)")
629 __xlated("...")
630 /* may_goto expansion starts */
631 __xlated("5: r11 = *(u64 *)(r10 -16)")
632 __xlated("6: if r11 == 0x0 goto pc+3")
633 __xlated("7: r11 -= 1")
634 __xlated("8: *(u64 *)(r10 -16) = r11")
635 /* may_goto expansion ends */
636 __xlated("9: *(u64 *)(r10 -8) = r1")
637 __xlated("10: exit")
638 __success
639 __naked void may_goto_interaction(void)
640 {
641 	asm volatile (
642 	"r1 = 1;"
643 	"*(u64 *)(r10 - 16) = r1;"
644 	"call %[bpf_get_smp_processor_id];"
645 	"r1 = *(u64 *)(r10 - 16);"
646 	".8byte %[may_goto];"
647 	/* just touch some stack at -8 */
648 	"*(u64 *)(r10 - 8) = r1;"
649 	"exit;"
650 	:
651 	: __imm(bpf_get_smp_processor_id),
652 	  __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0))
653 	: __clobber_all);
654 }
655 
656 __used
657 __naked static void dummy_loop_callback(void)
658 {
659 	asm volatile (
660 	"r0 = 0;"
661 	"exit;"
662 	::: __clobber_all);
663 }
664 
665 SEC("raw_tp")
666 __arch_x86_64
667 __log_level(4) __msg("stack depth 32+0")
668 __xlated("2: r1 = 1")
669 __xlated("3: w0 =")
670 __xlated("4: r0 = &(void __percpu *)(r0)")
671 __xlated("5: r0 = *(u32 *)(r0 +0)")
672 /* bpf_loop params setup */
673 __xlated("6: r2 =")
674 __xlated("7: r3 = 0")
675 __xlated("8: r4 = 0")
676 __xlated("...")
677 /* ... part of the inlined bpf_loop */
678 __xlated("12: *(u64 *)(r10 -32) = r6")
679 __xlated("13: *(u64 *)(r10 -24) = r7")
680 __xlated("14: *(u64 *)(r10 -16) = r8")
681 __xlated("...")
682 __xlated("21: call pc+8") /* dummy_loop_callback */
683 /* ... last insns of the bpf_loop_interaction1 */
684 __xlated("...")
685 __xlated("28: r0 = 0")
686 __xlated("29: exit")
687 /* dummy_loop_callback */
688 __xlated("30: r0 = 0")
689 __xlated("31: exit")
690 __success
691 __naked int bpf_loop_interaction1(void)
692 {
693 	asm volatile (
694 	"r1 = 1;"
695 	/* bpf_fastcall stack region at -16, but could be removed */
696 	"*(u64 *)(r10 - 16) = r1;"
697 	"call %[bpf_get_smp_processor_id];"
698 	"r1 = *(u64 *)(r10 - 16);"
699 	"r2 = %[dummy_loop_callback];"
700 	"r3 = 0;"
701 	"r4 = 0;"
702 	"call %[bpf_loop];"
703 	"r0 = 0;"
704 	"exit;"
705 	:
706 	: __imm_ptr(dummy_loop_callback),
707 	  __imm(bpf_get_smp_processor_id),
708 	  __imm(bpf_loop)
709 	: __clobber_common
710 	);
711 }
712 
713 SEC("raw_tp")
714 __arch_x86_64
715 __log_level(4) __msg("stack depth 40+0")
716 /* call bpf_get_smp_processor_id */
717 __xlated("2: r1 = 42")
718 __xlated("3: w0 =")
719 __xlated("4: r0 = &(void __percpu *)(r0)")
720 __xlated("5: r0 = *(u32 *)(r0 +0)")
721 /* call bpf_get_prandom_u32 */
722 __xlated("6: *(u64 *)(r10 -16) = r1")
723 __xlated("7: call")
724 __xlated("8: r1 = *(u64 *)(r10 -16)")
725 __xlated("...")
726 /* ... part of the inlined bpf_loop */
727 __xlated("15: *(u64 *)(r10 -40) = r6")
728 __xlated("16: *(u64 *)(r10 -32) = r7")
729 __xlated("17: *(u64 *)(r10 -24) = r8")
730 __success
731 __naked int bpf_loop_interaction2(void)
732 {
733 	asm volatile (
734 	"r1 = 42;"
735 	/* bpf_fastcall stack region at -16, cannot be removed */
736 	"*(u64 *)(r10 - 16) = r1;"
737 	"call %[bpf_get_smp_processor_id];"
738 	"r1 = *(u64 *)(r10 - 16);"
739 	"*(u64 *)(r10 - 16) = r1;"
740 	"call %[bpf_get_prandom_u32];"
741 	"r1 = *(u64 *)(r10 - 16);"
742 	"r2 = %[dummy_loop_callback];"
743 	"r3 = 0;"
744 	"r4 = 0;"
745 	"call %[bpf_loop];"
746 	"r0 = 0;"
747 	"exit;"
748 	:
749 	: __imm_ptr(dummy_loop_callback),
750 	  __imm(bpf_get_smp_processor_id),
751 	  __imm(bpf_get_prandom_u32),
752 	  __imm(bpf_loop)
753 	: __clobber_common
754 	);
755 }
756 
757 SEC("raw_tp")
758 __arch_x86_64
759 __log_level(4)
760 __msg("stack depth 512+0")
761 /* just to print xlated version when debugging */
762 __xlated("r0 = &(void __percpu *)(r0)")
763 __success
764 /* cumulative_stack_depth() stack usage is MAX_BPF_STACK,
765  * called subprogram uses an additional slot for bpf_fastcall spill/fill,
766  * since bpf_fastcall spill/fill could be removed the program still fits
767  * in MAX_BPF_STACK and should be accepted.
768  */
769 __naked int cumulative_stack_depth(void)
770 {
771 	asm volatile(
772 	"r1 = 42;"
773 	"*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
774 	"call cumulative_stack_depth_subprog;"
775 	"exit;"
776 	:
777 	: __imm_const(max_bpf_stack, MAX_BPF_STACK)
778 	: __clobber_all
779 	);
780 }
781 
782 __used
783 __naked static void cumulative_stack_depth_subprog(void)
784 {
785 	asm volatile (
786 	"*(u64 *)(r10 - 8) = r1;"
787 	"call %[bpf_get_smp_processor_id];"
788 	"r1 = *(u64 *)(r10 - 8);"
789 	"exit;"
790 	:: __imm(bpf_get_smp_processor_id) : __clobber_all);
791 }
792 
793 SEC("cgroup/getsockname_unix")
794 __xlated("0: r2 = 1")
795 /* bpf_cast_to_kern_ctx is replaced by a single assignment */
796 __xlated("1: r0 = r1")
797 __xlated("2: r0 = r2")
798 __xlated("3: exit")
799 __success
800 __naked void kfunc_bpf_cast_to_kern_ctx(void)
801 {
802 	asm volatile (
803 	"r2 = 1;"
804 	"*(u64 *)(r10 - 32) = r2;"
805 	"call %[bpf_cast_to_kern_ctx];"
806 	"r2 = *(u64 *)(r10 - 32);"
807 	"r0 = r2;"
808 	"exit;"
809 	:
810 	: __imm(bpf_cast_to_kern_ctx)
811 	: __clobber_all);
812 }
813 
814 SEC("raw_tp")
815 __xlated("3: r3 = 1")
816 /* bpf_rdonly_cast is replaced by a single assignment */
817 __xlated("4: r0 = r1")
818 __xlated("5: r0 = r3")
819 void kfunc_bpf_rdonly_cast(void)
820 {
821 	asm volatile (
822 	"r2 = %[btf_id];"
823 	"r3 = 1;"
824 	"*(u64 *)(r10 - 32) = r3;"
825 	"call %[bpf_rdonly_cast];"
826 	"r3 = *(u64 *)(r10 - 32);"
827 	"r0 = r3;"
828 	:
829 	: __imm(bpf_rdonly_cast),
830 	 [btf_id]"r"(bpf_core_type_id_kernel(union bpf_attr))
831 	: __clobber_common);
832 }
833 
834 /* BTF FUNC records are not generated for kfuncs referenced
835  * from inline assembly. These records are necessary for
836  * libbpf to link the program. The function below is a hack
837  * to ensure that BTF FUNC records are generated.
838  */
839 void kfunc_root(void)
840 {
841 	bpf_cast_to_kern_ctx(0);
842 	bpf_rdonly_cast(0, 0);
843 }
844 
845 char _license[] SEC("license") = "GPL";
846