xref: /linux/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c (revision 753c8608f3e579307493a63b9242667aee35a751)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 
4 #include <errno.h>
5 #include <string.h>
6 #include <linux/bpf.h>
7 #include <bpf/bpf_helpers.h>
8 #include "bpf_misc.h"
9 
10 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
11 
12 int vals[] SEC(".data.vals") = {1, 2, 3, 4};
13 
14 __naked __noinline __used
15 static unsigned long identity_subprog()
16 {
17 	/* the simplest *static* 64-bit identity function */
18 	asm volatile (
19 		"r0 = r1;"
20 		"exit;"
21 	);
22 }
23 
24 __noinline __used
25 unsigned long global_identity_subprog(__u64 x)
26 {
27 	/* the simplest *global* 64-bit identity function */
28 	return x;
29 }
30 
31 __naked __noinline __used
32 static unsigned long callback_subprog()
33 {
34 	/* the simplest callback function */
35 	asm volatile (
36 		"r0 = 0;"
37 		"exit;"
38 	);
39 }
40 
41 SEC("?raw_tp")
42 __success __log_level(2)
43 __msg("7: (0f) r1 += r0")
44 __msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7")
45 __msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4")
46 __msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
47 __msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1")
48 __msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5")
49 __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
50 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
51 __naked int subprog_result_precise(void)
52 {
53 	asm volatile (
54 		"r6 = 3;"
55 		/* pass r6 through r1 into subprog to get it back as r0;
56 		 * this whole chain will have to be marked as precise later
57 		 */
58 		"r1 = r6;"
59 		"call identity_subprog;"
60 		/* now use subprog's returned value (which is a
61 		 * r6 -> r1 -> r0 chain), as index into vals array, forcing
62 		 * all of that to be known precisely
63 		 */
64 		"r0 *= 4;"
65 		"r1 = %[vals];"
66 		/* here r0->r1->r6 chain is forced to be precise and has to be
67 		 * propagated back to the beginning, including through the
68 		 * subprog call
69 		 */
70 		"r1 += r0;"
71 		"r0 = *(u32 *)(r1 + 0);"
72 		"exit;"
73 		:
74 		: __imm_ptr(vals)
75 		: __clobber_common, "r6"
76 	);
77 }
78 
79 SEC("?raw_tp")
80 __success __log_level(2)
81 __msg("9: (0f) r1 += r0")
82 __msg("mark_precise: frame0: last_idx 9 first_idx 0")
83 __msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7")
84 __msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4")
85 __msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1")
86 __msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7")
87 __naked int global_subprog_result_precise(void)
88 {
89 	asm volatile (
90 		"r6 = 3;"
91 		/* pass r6 through r1 into subprog to get it back as r0;
92 		 * given global_identity_subprog is global, precision won't
93 		 * propagate all the way back to r6
94 		 */
95 		"r1 = r6;"
96 		"call global_identity_subprog;"
97 		/* now use subprog's returned value (which is unknown now, so
98 		 * we need to clamp it), as index into vals array, forcing r0
99 		 * to be marked precise (with no effect on r6, though)
100 		 */
101 		"if r0 < %[vals_arr_sz] goto 1f;"
102 		"r0 = %[vals_arr_sz] - 1;"
103 	"1:"
104 		"r0 *= 4;"
105 		"r1 = %[vals];"
106 		/* here r0 is forced to be precise and has to be
107 		 * propagated back to the global subprog call, but it
108 		 * shouldn't go all the way to mark r6 as precise
109 		 */
110 		"r1 += r0;"
111 		"r0 = *(u32 *)(r1 + 0);"
112 		"exit;"
113 		:
114 		: __imm_ptr(vals),
115 		  __imm_const(vals_arr_sz, ARRAY_SIZE(vals))
116 		: __clobber_common, "r6"
117 	);
118 }
119 
120 SEC("?raw_tp")
121 __success __log_level(2)
122 /* First simulated path does not include callback body,
123  * r1 and r4 are always precise for bpf_loop() calls.
124  */
125 __msg("9: (85) call bpf_loop#181")
126 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
127 __msg("mark_precise: frame0: parent state regs=r4 stack=:")
128 __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
129 __msg("mark_precise: frame0: regs=r4 stack= before 8: (b7) r4 = 0")
130 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
131 __msg("mark_precise: frame0: parent state regs=r1 stack=:")
132 __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
133 __msg("mark_precise: frame0: regs=r1 stack= before 8: (b7) r4 = 0")
134 __msg("mark_precise: frame0: regs=r1 stack= before 7: (b7) r3 = 0")
135 __msg("mark_precise: frame0: regs=r1 stack= before 6: (bf) r2 = r8")
136 __msg("mark_precise: frame0: regs=r1 stack= before 5: (bf) r1 = r6")
137 __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
138 /* r6 precision propagation */
139 __msg("14: (0f) r1 += r6")
140 __msg("mark_precise: frame0: last_idx 14 first_idx 9")
141 __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
142 __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
143 __msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
144 __msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
145 __msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop")
146 /* State entering callback body popped from states stack */
147 __msg("from 9 to 17: frame1:")
148 __msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
149 __msg("17: (b7) r0 = 0")
150 __msg("18: (95) exit")
151 __msg("returning from callee:")
152 __msg("to caller at 9:")
153 __msg("frame 0: propagating r1,r4")
154 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
155 __msg("mark_precise: frame0: regs=r1,r4 stack= before 18: (95) exit")
156 __msg("from 18 to 9: safe")
157 __naked int callback_result_precise(void)
158 {
159 	asm volatile (
160 		"r6 = 3;"
161 
162 		/* call subprog and use result; r0 shouldn't propagate back to
163 		 * callback_subprog
164 		 */
165 		"r1 = r6;"			/* nr_loops */
166 		"r2 = %[callback_subprog];"	/* callback_fn */
167 		"r3 = 0;"			/* callback_ctx */
168 		"r4 = 0;"			/* flags */
169 		"call %[bpf_loop];"
170 
171 		"r6 = r0;"
172 		"if r6 > 3 goto 1f;"
173 		"r6 *= 4;"
174 		"r1 = %[vals];"
175 		/* here r6 is forced to be precise and has to be propagated
176 		 * back to the bpf_loop() call, but not beyond
177 		 */
178 		"r1 += r6;"
179 		"r0 = *(u32 *)(r1 + 0);"
180 	"1:"
181 		"exit;"
182 		:
183 		: __imm_ptr(vals),
184 		  __imm_ptr(callback_subprog),
185 		  __imm(bpf_loop)
186 		: __clobber_common, "r6"
187 	);
188 }
189 
190 SEC("?raw_tp")
191 __success __log_level(2)
192 __msg("7: (0f) r1 += r6")
193 __msg("mark_precise: frame0: last_idx 7 first_idx 0")
194 __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
195 __msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
196 __msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit")
197 __msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1")
198 __msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5")
199 __msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
200 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
201 __naked int parent_callee_saved_reg_precise(void)
202 {
203 	asm volatile (
204 		"r6 = 3;"
205 
206 		/* call subprog and ignore result; we need this call only to
207 		 * complicate jump history
208 		 */
209 		"r1 = 0;"
210 		"call identity_subprog;"
211 
212 		"r6 *= 4;"
213 		"r1 = %[vals];"
214 		/* here r6 is forced to be precise and has to be propagated
215 		 * back to the beginning, handling (and ignoring) subprog call
216 		 */
217 		"r1 += r6;"
218 		"r0 = *(u32 *)(r1 + 0);"
219 		"exit;"
220 		:
221 		: __imm_ptr(vals)
222 		: __clobber_common, "r6"
223 	);
224 }
225 
226 SEC("?raw_tp")
227 __success __log_level(2)
228 __msg("7: (0f) r1 += r6")
229 __msg("mark_precise: frame0: last_idx 7 first_idx 0")
230 __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
231 __msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
232 __msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5")
233 __msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
234 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
235 __naked int parent_callee_saved_reg_precise_global(void)
236 {
237 	asm volatile (
238 		"r6 = 3;"
239 
240 		/* call subprog and ignore result; we need this call only to
241 		 * complicate jump history
242 		 */
243 		"r1 = 0;"
244 		"call global_identity_subprog;"
245 
246 		"r6 *= 4;"
247 		"r1 = %[vals];"
248 		/* here r6 is forced to be precise and has to be propagated
249 		 * back to the beginning, handling (and ignoring) subprog call
250 		 */
251 		"r1 += r6;"
252 		"r0 = *(u32 *)(r1 + 0);"
253 		"exit;"
254 		:
255 		: __imm_ptr(vals)
256 		: __clobber_common, "r6"
257 	);
258 }
259 
260 SEC("?raw_tp")
261 __success __log_level(2)
262 /* First simulated path does not include callback body */
263 __msg("12: (0f) r1 += r6")
264 __msg("mark_precise: frame0: last_idx 12 first_idx 9")
265 __msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
266 __msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
267 __msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop")
268 __msg("mark_precise: frame0: parent state regs=r6 stack=:")
269 __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
270 __msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
271 __msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
272 __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
273 __msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
274 __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
275 /* State entering callback body popped from states stack */
276 __msg("from 9 to 15: frame1:")
277 __msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb")
278 __msg("15: (b7) r0 = 0")
279 __msg("16: (95) exit")
280 __msg("returning from callee:")
281 __msg("to caller at 9:")
282 /* r1, r4 are always precise for bpf_loop(),
283  * r6 was marked before backtracking to callback body.
284  */
285 __msg("frame 0: propagating r1,r4,r6")
286 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
287 __msg("mark_precise: frame0: regs=r1,r4,r6 stack= before 16: (95) exit")
288 __msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
289 __msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop")
290 __msg("mark_precise: frame0: parent state regs= stack=:")
291 __msg("from 16 to 9: safe")
292 __naked int parent_callee_saved_reg_precise_with_callback(void)
293 {
294 	asm volatile (
295 		"r6 = 3;"
296 
297 		/* call subprog and ignore result; we need this call only to
298 		 * complicate jump history
299 		 */
300 		"r1 = 1;"			/* nr_loops */
301 		"r2 = %[callback_subprog];"	/* callback_fn */
302 		"r3 = 0;"			/* callback_ctx */
303 		"r4 = 0;"			/* flags */
304 		"call %[bpf_loop];"
305 
306 		"r6 *= 4;"
307 		"r1 = %[vals];"
308 		/* here r6 is forced to be precise and has to be propagated
309 		 * back to the beginning, handling (and ignoring) callback call
310 		 */
311 		"r1 += r6;"
312 		"r0 = *(u32 *)(r1 + 0);"
313 		"exit;"
314 		:
315 		: __imm_ptr(vals),
316 		  __imm_ptr(callback_subprog),
317 		  __imm(bpf_loop)
318 		: __clobber_common, "r6"
319 	);
320 }
321 
322 SEC("?raw_tp")
323 __success __log_level(2)
324 __msg("9: (0f) r1 += r6")
325 __msg("mark_precise: frame0: last_idx 9 first_idx 6")
326 __msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
327 __msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
328 __msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
329 __msg("mark_precise: frame0: parent state regs= stack=-8:")
330 __msg("mark_precise: frame0: last_idx 13 first_idx 0")
331 __msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit")
332 __msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1")
333 __msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6")
334 __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
335 __msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
336 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
337 __naked int parent_stack_slot_precise(void)
338 {
339 	asm volatile (
340 		/* spill reg */
341 		"r6 = 3;"
342 		"*(u64 *)(r10 - 8) = r6;"
343 
344 		/* call subprog and ignore result; we need this call only to
345 		 * complicate jump history
346 		 */
347 		"r1 = 0;"
348 		"call identity_subprog;"
349 
350 		/* restore reg from stack; in this case we'll be carrying
351 		 * stack mask when going back into subprog through jump
352 		 * history
353 		 */
354 		"r6 = *(u64 *)(r10 - 8);"
355 
356 		"r6 *= 4;"
357 		"r1 = %[vals];"
358 		/* here r6 is forced to be precise and has to be propagated
359 		 * back to the beginning, handling (and ignoring) subprog call
360 		 */
361 		"r1 += r6;"
362 		"r0 = *(u32 *)(r1 + 0);"
363 		"exit;"
364 		:
365 		: __imm_ptr(vals)
366 		: __clobber_common, "r6"
367 	);
368 }
369 
370 SEC("?raw_tp")
371 __success __log_level(2)
372 __msg("9: (0f) r1 += r6")
373 __msg("mark_precise: frame0: last_idx 9 first_idx 0")
374 __msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
375 __msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
376 __msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
377 __msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6")
378 __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
379 __msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
380 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
381 __naked int parent_stack_slot_precise_global(void)
382 {
383 	asm volatile (
384 		/* spill reg */
385 		"r6 = 3;"
386 		"*(u64 *)(r10 - 8) = r6;"
387 
388 		/* call subprog and ignore result; we need this call only to
389 		 * complicate jump history
390 		 */
391 		"r1 = 0;"
392 		"call global_identity_subprog;"
393 
394 		/* restore reg from stack; in this case we'll be carrying
395 		 * stack mask when going back into subprog through jump
396 		 * history
397 		 */
398 		"r6 = *(u64 *)(r10 - 8);"
399 
400 		"r6 *= 4;"
401 		"r1 = %[vals];"
402 		/* here r6 is forced to be precise and has to be propagated
403 		 * back to the beginning, handling (and ignoring) subprog call
404 		 */
405 		"r1 += r6;"
406 		"r0 = *(u32 *)(r1 + 0);"
407 		"exit;"
408 		:
409 		: __imm_ptr(vals)
410 		: __clobber_common, "r6"
411 	);
412 }
413 
414 SEC("?raw_tp")
415 __success __log_level(2)
416 /* First simulated path does not include callback body */
417 __msg("14: (0f) r1 += r6")
418 __msg("mark_precise: frame0: last_idx 14 first_idx 10")
419 __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
420 __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
421 __msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
422 __msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop")
423 __msg("mark_precise: frame0: parent state regs= stack=-8:")
424 __msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10")
425 __msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
426 __msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
427 __msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
428 __msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
429 __msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
430 __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
431 /* State entering callback body popped from states stack */
432 __msg("from 10 to 17: frame1:")
433 __msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
434 __msg("17: (b7) r0 = 0")
435 __msg("18: (95) exit")
436 __msg("returning from callee:")
437 __msg("to caller at 10:")
438 /* r1, r4 are always precise for bpf_loop(),
439  * fp-8 was marked before backtracking to callback body.
440  */
441 __msg("frame 0: propagating r1,r4,fp-8")
442 __msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1")
443 __msg("mark_precise: frame0: regs=r1,r4 stack=-8 before 18: (95) exit")
444 __msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
445 __msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
446 __msg("mark_precise: frame0: parent state regs= stack=:")
447 __msg("from 18 to 10: safe")
448 __naked int parent_stack_slot_precise_with_callback(void)
449 {
450 	asm volatile (
451 		/* spill reg */
452 		"r6 = 3;"
453 		"*(u64 *)(r10 - 8) = r6;"
454 
455 		/* ensure we have callback frame in jump history */
456 		"r1 = r6;"			/* nr_loops */
457 		"r2 = %[callback_subprog];"	/* callback_fn */
458 		"r3 = 0;"			/* callback_ctx */
459 		"r4 = 0;"			/* flags */
460 		"call %[bpf_loop];"
461 
462 		/* restore reg from stack; in this case we'll be carrying
463 		 * stack mask when going back into subprog through jump
464 		 * history
465 		 */
466 		"r6 = *(u64 *)(r10 - 8);"
467 
468 		"r6 *= 4;"
469 		"r1 = %[vals];"
470 		/* here r6 is forced to be precise and has to be propagated
471 		 * back to the beginning, handling (and ignoring) subprog call
472 		 */
473 		"r1 += r6;"
474 		"r0 = *(u32 *)(r1 + 0);"
475 		"exit;"
476 		:
477 		: __imm_ptr(vals),
478 		  __imm_ptr(callback_subprog),
479 		  __imm(bpf_loop)
480 		: __clobber_common, "r6"
481 	);
482 }
483 
484 __noinline __used
485 static __u64 subprog_with_precise_arg(__u64 x)
486 {
487 	return vals[x]; /* x is forced to be precise */
488 }
489 
490 SEC("?raw_tp")
491 __success __log_level(2)
492 __msg("8: (0f) r2 += r1")
493 __msg("mark_precise: frame1: last_idx 8 first_idx 0")
494 __msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ")
495 __msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2")
496 __msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2")
497 __msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6")
498 __msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3")
499 __naked int subprog_arg_precise(void)
500 {
501 	asm volatile (
502 		"r6 = 3;"
503 		"r1 = r6;"
504 		/* subprog_with_precise_arg expects its argument to be
505 		 * precise, so r1->r6 will be marked precise from inside the
506 		 * subprog
507 		 */
508 		"call subprog_with_precise_arg;"
509 		"r0 += r6;"
510 		"exit;"
511 		:
512 		:
513 		: __clobber_common, "r6"
514 	);
515 }
516 
517 /* r1 is pointer to stack slot;
518  * r2 is a register to spill into that slot
519  * subprog also spills r2 into its own stack slot
520  */
521 __naked __noinline __used
522 static __u64 subprog_spill_reg_precise(void)
523 {
524 	asm volatile (
525 		/* spill to parent stack */
526 		"*(u64 *)(r1 + 0) = r2;"
527 		/* spill to subprog stack (we use -16 offset to avoid
528 		 * accidental confusion with parent's -8 stack slot in
529 		 * verifier log output)
530 		 */
531 		"*(u64 *)(r10 - 16) = r2;"
532 		/* use both spills as return result to propagete precision everywhere */
533 		"r0 = *(u64 *)(r10 - 16);"
534 		"r2 = *(u64 *)(r1 + 0);"
535 		"r0 += r2;"
536 		"exit;"
537 	);
538 }
539 
540 SEC("?raw_tp")
541 __success __log_level(2)
542 /* precision backtracking can't currently handle stack access not through r10,
543  * so we won't be able to mark stack slot fp-8 as precise, and so will
544  * fallback to forcing all as precise
545  */
546 __msg("mark_precise: frame0: falling back to forcing all scalars precise")
547 __naked int subprog_spill_into_parent_stack_slot_precise(void)
548 {
549 	asm volatile (
550 		"r6 = 1;"
551 
552 		/* pass pointer to stack slot and r6 to subprog;
553 		 * r6 will be marked precise and spilled into fp-8 slot, which
554 		 * also should be marked precise
555 		 */
556 		"r1 = r10;"
557 		"r1 += -8;"
558 		"r2 = r6;"
559 		"call subprog_spill_reg_precise;"
560 
561 		/* restore reg from stack; in this case we'll be carrying
562 		 * stack mask when going back into subprog through jump
563 		 * history
564 		 */
565 		"r7 = *(u64 *)(r10 - 8);"
566 
567 		"r7 *= 4;"
568 		"r1 = %[vals];"
569 		/* here r7 is forced to be precise and has to be propagated
570 		 * back to the beginning, handling subprog call and logic
571 		 */
572 		"r1 += r7;"
573 		"r0 = *(u32 *)(r1 + 0);"
574 		"exit;"
575 		:
576 		: __imm_ptr(vals)
577 		: __clobber_common, "r6", "r7"
578 	);
579 }
580 
581 __naked __noinline __used
582 static __u64 subprog_with_checkpoint(void)
583 {
584 	asm volatile (
585 		"r0 = 0;"
586 		/* guaranteed checkpoint if BPF_F_TEST_STATE_FREQ is used */
587 		"goto +0;"
588 		"exit;"
589 	);
590 }
591 
592 char _license[] SEC("license") = "GPL";
593