xref: /linux/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c (revision 06a130e42a5bfc84795464bff023bff4c16f58c5)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 
4 #include <errno.h>
5 #include <string.h>
6 #include <linux/bpf.h>
7 #include <bpf/bpf_helpers.h>
8 #include "bpf_misc.h"
9 #include <../../../tools/include/linux/filter.h>
10 
11 int vals[] SEC(".data.vals") = {1, 2, 3, 4};
12 
13 __naked __noinline __used
14 static unsigned long identity_subprog()
15 {
16 	/* the simplest *static* 64-bit identity function */
17 	asm volatile (
18 		"r0 = r1;"
19 		"exit;"
20 	);
21 }
22 
23 __noinline __used
24 unsigned long global_identity_subprog(__u64 x)
25 {
26 	/* the simplest *global* 64-bit identity function */
27 	return x;
28 }
29 
30 __naked __noinline __used
31 static unsigned long callback_subprog()
32 {
33 	/* the simplest callback function */
34 	asm volatile (
35 		"r0 = 0;"
36 		"exit;"
37 	);
38 }
39 
40 SEC("?raw_tp")
41 __success __log_level(2)
42 __msg("7: (0f) r1 += r0")
43 __msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7")
44 __msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4")
45 __msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
46 __msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1")
47 __msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5")
48 __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
49 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
50 __naked int subprog_result_precise(void)
51 {
52 	asm volatile (
53 		"r6 = 3;"
54 		/* pass r6 through r1 into subprog to get it back as r0;
55 		 * this whole chain will have to be marked as precise later
56 		 */
57 		"r1 = r6;"
58 		"call identity_subprog;"
59 		/* now use subprog's returned value (which is a
60 		 * r6 -> r1 -> r0 chain), as index into vals array, forcing
61 		 * all of that to be known precisely
62 		 */
63 		"r0 *= 4;"
64 		"r1 = %[vals];"
65 		/* here r0->r1->r6 chain is forced to be precise and has to be
66 		 * propagated back to the beginning, including through the
67 		 * subprog call
68 		 */
69 		"r1 += r0;"
70 		"r0 = *(u32 *)(r1 + 0);"
71 		"exit;"
72 		:
73 		: __imm_ptr(vals)
74 		: __clobber_common, "r6"
75 	);
76 }
77 
78 __naked __noinline __used
79 static unsigned long fp_leaking_subprog()
80 {
81 	asm volatile (
82 		".8byte %[r0_eq_r10_cast_s8];"
83 		"exit;"
84 		:: __imm_insn(r0_eq_r10_cast_s8, BPF_MOVSX64_REG(BPF_REG_0, BPF_REG_10, 8))
85 	);
86 }
87 
88 __naked __noinline __used
89 static unsigned long sneaky_fp_leaking_subprog()
90 {
91 	asm volatile (
92 		"r1 = r10;"
93 		".8byte %[r0_eq_r1_cast_s8];"
94 		"exit;"
95 		:: __imm_insn(r0_eq_r1_cast_s8, BPF_MOVSX64_REG(BPF_REG_0, BPF_REG_1, 8))
96 	);
97 }
98 
99 SEC("?raw_tp")
100 __success __log_level(2)
101 __msg("6: (0f) r1 += r0")
102 __msg("mark_precise: frame0: last_idx 6 first_idx 0 subseq_idx -1")
103 __msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6")
104 __msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4")
105 __msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3")
106 __msg("mark_precise: frame0: regs=r0 stack= before 10: (95) exit")
107 __msg("mark_precise: frame1: regs=r0 stack= before 9: (bf) r0 = (s8)r10")
108 __msg("7: R0_w=scalar")
109 __naked int fp_precise_subprog_result(void)
110 {
111 	asm volatile (
112 		"call fp_leaking_subprog;"
113 		/* use subprog's returned value (which is derived from r10=fp
114 		 * register), as index into vals array, forcing all of that to
115 		 * be known precisely
116 		 */
117 		"r0 &= 3;"
118 		"r0 *= 4;"
119 		"r1 = %[vals];"
120 		/* force precision marking */
121 		"r1 += r0;"
122 		"r0 = *(u32 *)(r1 + 0);"
123 		"exit;"
124 		:
125 		: __imm_ptr(vals)
126 		: __clobber_common
127 	);
128 }
129 
130 SEC("?raw_tp")
131 __success __log_level(2)
132 __msg("6: (0f) r1 += r0")
133 __msg("mark_precise: frame0: last_idx 6 first_idx 0 subseq_idx -1")
134 __msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6")
135 __msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4")
136 __msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3")
137 __msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
138 __msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = (s8)r1")
139 /* here r1 is marked precise, even though it's fp register, but that's fine
140  * because by the time we get out of subprogram it has to be derived from r10
141  * anyways, at which point we'll break precision chain
142  */
143 __msg("mark_precise: frame1: regs=r1 stack= before 9: (bf) r1 = r10")
144 __msg("7: R0_w=scalar")
145 __naked int sneaky_fp_precise_subprog_result(void)
146 {
147 	asm volatile (
148 		"call sneaky_fp_leaking_subprog;"
149 		/* use subprog's returned value (which is derived from r10=fp
150 		 * register), as index into vals array, forcing all of that to
151 		 * be known precisely
152 		 */
153 		"r0 &= 3;"
154 		"r0 *= 4;"
155 		"r1 = %[vals];"
156 		/* force precision marking */
157 		"r1 += r0;"
158 		"r0 = *(u32 *)(r1 + 0);"
159 		"exit;"
160 		:
161 		: __imm_ptr(vals)
162 		: __clobber_common
163 	);
164 }
165 
166 SEC("?raw_tp")
167 __success __log_level(2)
168 __msg("9: (0f) r1 += r0")
169 __msg("mark_precise: frame0: last_idx 9 first_idx 0")
170 __msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7")
171 __msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4")
172 __msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1")
173 __msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7")
174 __naked int global_subprog_result_precise(void)
175 {
176 	asm volatile (
177 		"r6 = 3;"
178 		/* pass r6 through r1 into subprog to get it back as r0;
179 		 * given global_identity_subprog is global, precision won't
180 		 * propagate all the way back to r6
181 		 */
182 		"r1 = r6;"
183 		"call global_identity_subprog;"
184 		/* now use subprog's returned value (which is unknown now, so
185 		 * we need to clamp it), as index into vals array, forcing r0
186 		 * to be marked precise (with no effect on r6, though)
187 		 */
188 		"if r0 < %[vals_arr_sz] goto 1f;"
189 		"r0 = %[vals_arr_sz] - 1;"
190 	"1:"
191 		"r0 *= 4;"
192 		"r1 = %[vals];"
193 		/* here r0 is forced to be precise and has to be
194 		 * propagated back to the global subprog call, but it
195 		 * shouldn't go all the way to mark r6 as precise
196 		 */
197 		"r1 += r0;"
198 		"r0 = *(u32 *)(r1 + 0);"
199 		"exit;"
200 		:
201 		: __imm_ptr(vals),
202 		  __imm_const(vals_arr_sz, ARRAY_SIZE(vals))
203 		: __clobber_common, "r6"
204 	);
205 }
206 
207 __naked __noinline __used
208 static unsigned long loop_callback_bad()
209 {
210 	/* bpf_loop() callback that can return values outside of [0, 1] range */
211 	asm volatile (
212 		"call %[bpf_get_prandom_u32];"
213 		"if r0 s> 1000 goto 1f;"
214 		"r0 = 0;"
215 	"1:"
216 		"goto +0;" /* checkpoint */
217 		/* bpf_loop() expects [0, 1] values, so branch above skipping
218 		 * r0 = 0; should lead to a failure, but if exit instruction
219 		 * doesn't enforce r0's precision, this callback will be
220 		 * successfully verified
221 		 */
222 		"exit;"
223 		:
224 		: __imm(bpf_get_prandom_u32)
225 		: __clobber_common
226 	);
227 }
228 
229 SEC("?raw_tp")
230 __failure __log_level(2)
231 __flag(BPF_F_TEST_STATE_FREQ)
232 /* check that fallthrough code path marks r0 as precise */
233 __msg("mark_precise: frame1: regs=r0 stack= before 11: (b7) r0 = 0")
234 /* check that we have branch code path doing its own validation */
235 __msg("from 10 to 12: frame1: R0=scalar(smin=umin=1001")
236 /* check that branch code path marks r0 as precise, before failing */
237 __msg("mark_precise: frame1: regs=r0 stack= before 9: (85) call bpf_get_prandom_u32#7")
238 __msg("At callback return the register R0 has smin=1001 should have been in [0, 1]")
239 __naked int callback_precise_return_fail(void)
240 {
241 	asm volatile (
242 		"r1 = 1;"			/* nr_loops */
243 		"r2 = %[loop_callback_bad];"	/* callback_fn */
244 		"r3 = 0;"			/* callback_ctx */
245 		"r4 = 0;"			/* flags */
246 		"call %[bpf_loop];"
247 
248 		"r0 = 0;"
249 		"exit;"
250 		:
251 		: __imm_ptr(loop_callback_bad),
252 		  __imm(bpf_loop)
253 		: __clobber_common
254 	);
255 }
256 
257 SEC("?raw_tp")
258 __success __log_level(2)
259 /* First simulated path does not include callback body,
260  * r1 and r4 are always precise for bpf_loop() calls.
261  */
262 __msg("9: (85) call bpf_loop#181")
263 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
264 __msg("mark_precise: frame0: parent state regs=r4 stack=:")
265 __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
266 __msg("mark_precise: frame0: regs=r4 stack= before 8: (b7) r4 = 0")
267 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
268 __msg("mark_precise: frame0: parent state regs=r1 stack=:")
269 __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
270 __msg("mark_precise: frame0: regs=r1 stack= before 8: (b7) r4 = 0")
271 __msg("mark_precise: frame0: regs=r1 stack= before 7: (b7) r3 = 0")
272 __msg("mark_precise: frame0: regs=r1 stack= before 6: (bf) r2 = r8")
273 __msg("mark_precise: frame0: regs=r1 stack= before 5: (bf) r1 = r6")
274 __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
275 /* r6 precision propagation */
276 __msg("14: (0f) r1 += r6")
277 __msg("mark_precise: frame0: last_idx 14 first_idx 9")
278 __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
279 __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
280 __msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
281 __msg("mark_precise: frame0: regs=r0,r6 stack= before 10: (bf) r6 = r0")
282 __msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop")
283 /* State entering callback body popped from states stack */
284 __msg("from 9 to 17: frame1:")
285 __msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
286 __msg("17: (b7) r0 = 0")
287 __msg("18: (95) exit")
288 __msg("returning from callee:")
289 __msg("to caller at 9:")
290 __msg("frame 0: propagating r1,r4")
291 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
292 __msg("mark_precise: frame0: regs=r1,r4 stack= before 18: (95) exit")
293 __msg("from 18 to 9: safe")
294 __naked int callback_result_precise(void)
295 {
296 	asm volatile (
297 		"r6 = 3;"
298 
299 		/* call subprog and use result; r0 shouldn't propagate back to
300 		 * callback_subprog
301 		 */
302 		"r1 = r6;"			/* nr_loops */
303 		"r2 = %[callback_subprog];"	/* callback_fn */
304 		"r3 = 0;"			/* callback_ctx */
305 		"r4 = 0;"			/* flags */
306 		"call %[bpf_loop];"
307 
308 		"r6 = r0;"
309 		"if r6 > 3 goto 1f;"
310 		"r6 *= 4;"
311 		"r1 = %[vals];"
312 		/* here r6 is forced to be precise and has to be propagated
313 		 * back to the bpf_loop() call, but not beyond
314 		 */
315 		"r1 += r6;"
316 		"r0 = *(u32 *)(r1 + 0);"
317 	"1:"
318 		"exit;"
319 		:
320 		: __imm_ptr(vals),
321 		  __imm_ptr(callback_subprog),
322 		  __imm(bpf_loop)
323 		: __clobber_common, "r6"
324 	);
325 }
326 
327 SEC("?raw_tp")
328 __success __log_level(2)
329 __msg("7: (0f) r1 += r6")
330 __msg("mark_precise: frame0: last_idx 7 first_idx 0")
331 __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
332 __msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
333 __msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit")
334 __msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1")
335 __msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5")
336 __msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
337 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
338 __naked int parent_callee_saved_reg_precise(void)
339 {
340 	asm volatile (
341 		"r6 = 3;"
342 
343 		/* call subprog and ignore result; we need this call only to
344 		 * complicate jump history
345 		 */
346 		"r1 = 0;"
347 		"call identity_subprog;"
348 
349 		"r6 *= 4;"
350 		"r1 = %[vals];"
351 		/* here r6 is forced to be precise and has to be propagated
352 		 * back to the beginning, handling (and ignoring) subprog call
353 		 */
354 		"r1 += r6;"
355 		"r0 = *(u32 *)(r1 + 0);"
356 		"exit;"
357 		:
358 		: __imm_ptr(vals)
359 		: __clobber_common, "r6"
360 	);
361 }
362 
363 SEC("?raw_tp")
364 __success __log_level(2)
365 __msg("7: (0f) r1 += r6")
366 __msg("mark_precise: frame0: last_idx 7 first_idx 0")
367 __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
368 __msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
369 __msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5")
370 __msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
371 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
372 __naked int parent_callee_saved_reg_precise_global(void)
373 {
374 	asm volatile (
375 		"r6 = 3;"
376 
377 		/* call subprog and ignore result; we need this call only to
378 		 * complicate jump history
379 		 */
380 		"r1 = 0;"
381 		"call global_identity_subprog;"
382 
383 		"r6 *= 4;"
384 		"r1 = %[vals];"
385 		/* here r6 is forced to be precise and has to be propagated
386 		 * back to the beginning, handling (and ignoring) subprog call
387 		 */
388 		"r1 += r6;"
389 		"r0 = *(u32 *)(r1 + 0);"
390 		"exit;"
391 		:
392 		: __imm_ptr(vals)
393 		: __clobber_common, "r6"
394 	);
395 }
396 
397 SEC("?raw_tp")
398 __success __log_level(2)
399 /* First simulated path does not include callback body */
400 __msg("12: (0f) r1 += r6")
401 __msg("mark_precise: frame0: last_idx 12 first_idx 9")
402 __msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
403 __msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
404 __msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop")
405 __msg("mark_precise: frame0: parent state regs=r6 stack=:")
406 __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
407 __msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
408 __msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
409 __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
410 __msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
411 __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
412 /* State entering callback body popped from states stack */
413 __msg("from 9 to 15: frame1:")
414 __msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb")
415 __msg("15: (b7) r0 = 0")
416 __msg("16: (95) exit")
417 __msg("returning from callee:")
418 __msg("to caller at 9:")
419 /* r1, r4 are always precise for bpf_loop(),
420  * r6 was marked before backtracking to callback body.
421  */
422 __msg("frame 0: propagating r1,r4,r6")
423 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
424 __msg("mark_precise: frame0: regs=r1,r4,r6 stack= before 16: (95) exit")
425 __msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
426 __msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop")
427 __msg("mark_precise: frame0: parent state regs= stack=:")
428 __msg("from 16 to 9: safe")
429 __naked int parent_callee_saved_reg_precise_with_callback(void)
430 {
431 	asm volatile (
432 		"r6 = 3;"
433 
434 		/* call subprog and ignore result; we need this call only to
435 		 * complicate jump history
436 		 */
437 		"r1 = 1;"			/* nr_loops */
438 		"r2 = %[callback_subprog];"	/* callback_fn */
439 		"r3 = 0;"			/* callback_ctx */
440 		"r4 = 0;"			/* flags */
441 		"call %[bpf_loop];"
442 
443 		"r6 *= 4;"
444 		"r1 = %[vals];"
445 		/* here r6 is forced to be precise and has to be propagated
446 		 * back to the beginning, handling (and ignoring) callback call
447 		 */
448 		"r1 += r6;"
449 		"r0 = *(u32 *)(r1 + 0);"
450 		"exit;"
451 		:
452 		: __imm_ptr(vals),
453 		  __imm_ptr(callback_subprog),
454 		  __imm(bpf_loop)
455 		: __clobber_common, "r6"
456 	);
457 }
458 
459 SEC("?raw_tp")
460 __success __log_level(2)
461 __msg("9: (0f) r1 += r6")
462 __msg("mark_precise: frame0: last_idx 9 first_idx 6")
463 __msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
464 __msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
465 __msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
466 __msg("mark_precise: frame0: parent state regs= stack=-8:")
467 __msg("mark_precise: frame0: last_idx 13 first_idx 0")
468 __msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit")
469 __msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1")
470 __msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6")
471 __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
472 __msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
473 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
474 __naked int parent_stack_slot_precise(void)
475 {
476 	asm volatile (
477 		/* spill reg */
478 		"r6 = 3;"
479 		"*(u64 *)(r10 - 8) = r6;"
480 
481 		/* call subprog and ignore result; we need this call only to
482 		 * complicate jump history
483 		 */
484 		"r1 = 0;"
485 		"call identity_subprog;"
486 
487 		/* restore reg from stack; in this case we'll be carrying
488 		 * stack mask when going back into subprog through jump
489 		 * history
490 		 */
491 		"r6 = *(u64 *)(r10 - 8);"
492 
493 		"r6 *= 4;"
494 		"r1 = %[vals];"
495 		/* here r6 is forced to be precise and has to be propagated
496 		 * back to the beginning, handling (and ignoring) subprog call
497 		 */
498 		"r1 += r6;"
499 		"r0 = *(u32 *)(r1 + 0);"
500 		"exit;"
501 		:
502 		: __imm_ptr(vals)
503 		: __clobber_common, "r6"
504 	);
505 }
506 
507 SEC("?raw_tp")
508 __success __log_level(2)
509 __msg("9: (0f) r1 += r6")
510 __msg("mark_precise: frame0: last_idx 9 first_idx 0")
511 __msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
512 __msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
513 __msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
514 __msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6")
515 __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
516 __msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
517 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
518 __naked int parent_stack_slot_precise_global(void)
519 {
520 	asm volatile (
521 		/* spill reg */
522 		"r6 = 3;"
523 		"*(u64 *)(r10 - 8) = r6;"
524 
525 		/* call subprog and ignore result; we need this call only to
526 		 * complicate jump history
527 		 */
528 		"r1 = 0;"
529 		"call global_identity_subprog;"
530 
531 		/* restore reg from stack; in this case we'll be carrying
532 		 * stack mask when going back into subprog through jump
533 		 * history
534 		 */
535 		"r6 = *(u64 *)(r10 - 8);"
536 
537 		"r6 *= 4;"
538 		"r1 = %[vals];"
539 		/* here r6 is forced to be precise and has to be propagated
540 		 * back to the beginning, handling (and ignoring) subprog call
541 		 */
542 		"r1 += r6;"
543 		"r0 = *(u32 *)(r1 + 0);"
544 		"exit;"
545 		:
546 		: __imm_ptr(vals)
547 		: __clobber_common, "r6"
548 	);
549 }
550 
551 SEC("?raw_tp")
552 __success __log_level(2)
553 /* First simulated path does not include callback body */
554 __msg("14: (0f) r1 += r6")
555 __msg("mark_precise: frame0: last_idx 14 first_idx 10")
556 __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
557 __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
558 __msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
559 __msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop")
560 __msg("mark_precise: frame0: parent state regs= stack=-8:")
561 __msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10")
562 __msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
563 __msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
564 __msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
565 __msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
566 __msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
567 __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
568 /* State entering callback body popped from states stack */
569 __msg("from 10 to 17: frame1:")
570 __msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
571 __msg("17: (b7) r0 = 0")
572 __msg("18: (95) exit")
573 __msg("returning from callee:")
574 __msg("to caller at 10:")
575 /* r1, r4 are always precise for bpf_loop(),
576  * fp-8 was marked before backtracking to callback body.
577  */
578 __msg("frame 0: propagating r1,r4,fp-8")
579 __msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1")
580 __msg("mark_precise: frame0: regs=r1,r4 stack=-8 before 18: (95) exit")
581 __msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
582 __msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
583 __msg("mark_precise: frame0: parent state regs= stack=:")
584 __msg("from 18 to 10: safe")
585 __naked int parent_stack_slot_precise_with_callback(void)
586 {
587 	asm volatile (
588 		/* spill reg */
589 		"r6 = 3;"
590 		"*(u64 *)(r10 - 8) = r6;"
591 
592 		/* ensure we have callback frame in jump history */
593 		"r1 = r6;"			/* nr_loops */
594 		"r2 = %[callback_subprog];"	/* callback_fn */
595 		"r3 = 0;"			/* callback_ctx */
596 		"r4 = 0;"			/* flags */
597 		"call %[bpf_loop];"
598 
599 		/* restore reg from stack; in this case we'll be carrying
600 		 * stack mask when going back into subprog through jump
601 		 * history
602 		 */
603 		"r6 = *(u64 *)(r10 - 8);"
604 
605 		"r6 *= 4;"
606 		"r1 = %[vals];"
607 		/* here r6 is forced to be precise and has to be propagated
608 		 * back to the beginning, handling (and ignoring) subprog call
609 		 */
610 		"r1 += r6;"
611 		"r0 = *(u32 *)(r1 + 0);"
612 		"exit;"
613 		:
614 		: __imm_ptr(vals),
615 		  __imm_ptr(callback_subprog),
616 		  __imm(bpf_loop)
617 		: __clobber_common, "r6"
618 	);
619 }
620 
621 __noinline __used
622 static __u64 subprog_with_precise_arg(__u64 x)
623 {
624 	return vals[x]; /* x is forced to be precise */
625 }
626 
627 SEC("?raw_tp")
628 __success __log_level(2)
629 __msg("8: (0f) r2 += r1")
630 __msg("mark_precise: frame1: last_idx 8 first_idx 0")
631 __msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ")
632 __msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2")
633 __msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2")
634 __msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6")
635 __msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3")
636 __naked int subprog_arg_precise(void)
637 {
638 	asm volatile (
639 		"r6 = 3;"
640 		"r1 = r6;"
641 		/* subprog_with_precise_arg expects its argument to be
642 		 * precise, so r1->r6 will be marked precise from inside the
643 		 * subprog
644 		 */
645 		"call subprog_with_precise_arg;"
646 		"r0 += r6;"
647 		"exit;"
648 		:
649 		:
650 		: __clobber_common, "r6"
651 	);
652 }
653 
654 /* r1 is pointer to stack slot;
655  * r2 is a register to spill into that slot
656  * subprog also spills r2 into its own stack slot
657  */
658 __naked __noinline __used
659 static __u64 subprog_spill_reg_precise(void)
660 {
661 	asm volatile (
662 		/* spill to parent stack */
663 		"*(u64 *)(r1 + 0) = r2;"
664 		/* spill to subprog stack (we use -16 offset to avoid
665 		 * accidental confusion with parent's -8 stack slot in
666 		 * verifier log output)
667 		 */
668 		"*(u64 *)(r10 - 16) = r2;"
669 		/* use both spills as return result to propagete precision everywhere */
670 		"r0 = *(u64 *)(r10 - 16);"
671 		"r2 = *(u64 *)(r1 + 0);"
672 		"r0 += r2;"
673 		"exit;"
674 	);
675 }
676 
677 SEC("?raw_tp")
678 __success __log_level(2)
679 __msg("10: (0f) r1 += r7")
680 __msg("mark_precise: frame0: last_idx 10 first_idx 7 subseq_idx -1")
681 __msg("mark_precise: frame0: regs=r7 stack= before 9: (bf) r1 = r8")
682 __msg("mark_precise: frame0: regs=r7 stack= before 8: (27) r7 *= 4")
683 __msg("mark_precise: frame0: regs=r7 stack= before 7: (79) r7 = *(u64 *)(r10 -8)")
684 __msg("mark_precise: frame0: parent state regs= stack=-8:  R0_w=2 R6_w=1 R8_rw=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8_rw=P1")
685 __msg("mark_precise: frame0: last_idx 18 first_idx 0 subseq_idx 7")
686 __msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
687 __msg("mark_precise: frame1: regs= stack= before 17: (0f) r0 += r2")
688 __msg("mark_precise: frame1: regs= stack= before 16: (79) r2 = *(u64 *)(r1 +0)")
689 __msg("mark_precise: frame1: regs= stack= before 15: (79) r0 = *(u64 *)(r10 -16)")
690 __msg("mark_precise: frame1: regs= stack= before 14: (7b) *(u64 *)(r10 -16) = r2")
691 __msg("mark_precise: frame1: regs= stack= before 13: (7b) *(u64 *)(r1 +0) = r2")
692 __msg("mark_precise: frame1: regs=r2 stack= before 6: (85) call pc+6")
693 __msg("mark_precise: frame0: regs=r2 stack= before 5: (bf) r2 = r6")
694 __msg("mark_precise: frame0: regs=r6 stack= before 4: (07) r1 += -8")
695 __msg("mark_precise: frame0: regs=r6 stack= before 3: (bf) r1 = r10")
696 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1")
697 __naked int subprog_spill_into_parent_stack_slot_precise(void)
698 {
699 	asm volatile (
700 		"r6 = 1;"
701 
702 		/* pass pointer to stack slot and r6 to subprog;
703 		 * r6 will be marked precise and spilled into fp-8 slot, which
704 		 * also should be marked precise
705 		 */
706 		"r1 = r10;"
707 		"r1 += -8;"
708 		"r2 = r6;"
709 		"call subprog_spill_reg_precise;"
710 
711 		/* restore reg from stack; in this case we'll be carrying
712 		 * stack mask when going back into subprog through jump
713 		 * history
714 		 */
715 		"r7 = *(u64 *)(r10 - 8);"
716 
717 		"r7 *= 4;"
718 		"r1 = %[vals];"
719 		/* here r7 is forced to be precise and has to be propagated
720 		 * back to the beginning, handling subprog call and logic
721 		 */
722 		"r1 += r7;"
723 		"r0 = *(u32 *)(r1 + 0);"
724 		"exit;"
725 		:
726 		: __imm_ptr(vals)
727 		: __clobber_common, "r6", "r7"
728 	);
729 }
730 
731 SEC("?raw_tp")
732 __success __log_level(2)
733 __msg("17: (0f) r1 += r0")
734 __msg("mark_precise: frame0: last_idx 17 first_idx 0 subseq_idx -1")
735 __msg("mark_precise: frame0: regs=r0 stack= before 16: (bf) r1 = r7")
736 __msg("mark_precise: frame0: regs=r0 stack= before 15: (27) r0 *= 4")
737 __msg("mark_precise: frame0: regs=r0 stack= before 14: (79) r0 = *(u64 *)(r10 -16)")
738 __msg("mark_precise: frame0: regs= stack=-16 before 13: (7b) *(u64 *)(r7 -8) = r0")
739 __msg("mark_precise: frame0: regs=r0 stack= before 12: (79) r0 = *(u64 *)(r8 +16)")
740 __msg("mark_precise: frame0: regs= stack=-16 before 11: (7b) *(u64 *)(r8 +16) = r0")
741 __msg("mark_precise: frame0: regs=r0 stack= before 10: (79) r0 = *(u64 *)(r7 -8)")
742 __msg("mark_precise: frame0: regs= stack=-16 before 9: (7b) *(u64 *)(r10 -16) = r0")
743 __msg("mark_precise: frame0: regs=r0 stack= before 8: (07) r8 += -32")
744 __msg("mark_precise: frame0: regs=r0 stack= before 7: (bf) r8 = r10")
745 __msg("mark_precise: frame0: regs=r0 stack= before 6: (07) r7 += -8")
746 __msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r7 = r10")
747 __msg("mark_precise: frame0: regs=r0 stack= before 21: (95) exit")
748 __msg("mark_precise: frame1: regs=r0 stack= before 20: (bf) r0 = r1")
749 __msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+15")
750 __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
751 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1")
752 __naked int stack_slot_aliases_precision(void)
753 {
754 	asm volatile (
755 		"r6 = 1;"
756 		/* pass r6 through r1 into subprog to get it back as r0;
757 		 * this whole chain will have to be marked as precise later
758 		 */
759 		"r1 = r6;"
760 		"call identity_subprog;"
761 		/* let's setup two registers that are aliased to r10 */
762 		"r7 = r10;"
763 		"r7 += -8;"			/* r7 = r10 - 8 */
764 		"r8 = r10;"
765 		"r8 += -32;"			/* r8 = r10 - 32 */
766 		/* now spill subprog's return value (a r6 -> r1 -> r0 chain)
767 		 * a few times through different stack pointer regs, making
768 		 * sure to use r10, r7, and r8 both in LDX and STX insns, and
769 		 * *importantly* also using a combination of const var_off and
770 		 * insn->off to validate that we record final stack slot
771 		 * correctly, instead of relying on just insn->off derivation,
772 		 * which is only valid for r10-based stack offset
773 		 */
774 		"*(u64 *)(r10 - 16) = r0;"
775 		"r0 = *(u64 *)(r7 - 8);"	/* r7 - 8 == r10 - 16 */
776 		"*(u64 *)(r8 + 16) = r0;"	/* r8 + 16 = r10 - 16 */
777 		"r0 = *(u64 *)(r8 + 16);"
778 		"*(u64 *)(r7 - 8) = r0;"
779 		"r0 = *(u64 *)(r10 - 16);"
780 		/* get ready to use r0 as an index into array to force precision */
781 		"r0 *= 4;"
782 		"r1 = %[vals];"
783 		/* here r0->r1->r6 chain is forced to be precise and has to be
784 		 * propagated back to the beginning, including through the
785 		 * subprog call and all the stack spills and loads
786 		 */
787 		"r1 += r0;"
788 		"r0 = *(u32 *)(r1 + 0);"
789 		"exit;"
790 		:
791 		: __imm_ptr(vals)
792 		: __clobber_common, "r6"
793 	);
794 }
795 
796 char _license[] SEC("license") = "GPL";
797